]> Git Repo - qemu.git/blame - target/arm/translate.c
target/arm: Implement ARMv8.3-JSConv
[qemu.git] / target / arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
36a71934 28#include "tcg-op-gvec.h"
1de7afc9 29#include "qemu/log.h"
534df156 30#include "qemu/bitops.h"
1d854765 31#include "arm_ldst.h"
19a6e31c 32#include "exec/semihost.h"
1497c961 33
2ef6175a
RH
34#include "exec/helper-proto.h"
35#include "exec/helper-gen.h"
2c0262af 36
a7e30d84 37#include "trace-tcg.h"
508127e2 38#include "exec/log.h"
a7e30d84
LV
39
40
2b51668f
PM
41#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 43/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 44#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
09cbd501 45#define ENABLE_ARCH_5J dc_isar_feature(jazelle, s)
2b51668f
PM
46#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 51
86753403 52#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 53
f570c61e 54#include "translate.h"
e12ce78d 55
b5ff1b31
FB
56#if defined(CONFIG_USER_ONLY)
57#define IS_USER(s) 1
58#else
59#define IS_USER(s) (s->user)
60#endif
61
ad69471c 62/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 63static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 64static TCGv_i32 cpu_R[16];
78bcaa3e
RH
65TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66TCGv_i64 cpu_exclusive_addr;
67TCGv_i64 cpu_exclusive_val;
ad69471c 68
b26eefb6 69/* FIXME: These should be removed. */
39d5492a 70static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 71static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 72
022c62cb 73#include "exec/gen-icount.h"
2e70f6ef 74
308e5636 75static const char * const regnames[] =
155c3eac
FN
76 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
78
61adacc8
RH
79/* Function prototypes for gen_ functions calling Neon helpers. */
80typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
81 TCGv_i32, TCGv_i32);
82
b26eefb6
PB
83/* initialize TCG globals. */
84void arm_translate_init(void)
85{
155c3eac
FN
86 int i;
87
155c3eac 88 for (i = 0; i < 16; i++) {
e1ccc054 89 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 90 offsetof(CPUARMState, regs[i]),
155c3eac
FN
91 regnames[i]);
92 }
e1ccc054
RH
93 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
94 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
95 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
96 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 97
e1ccc054 98 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 99 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 100 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 101 offsetof(CPUARMState, exclusive_val), "exclusive_val");
155c3eac 102
14ade10f 103 a64_translate_init();
b26eefb6
PB
104}
105
9bb6558a
PM
106/* Flags for the disas_set_da_iss info argument:
107 * lower bits hold the Rt register number, higher bits are flags.
108 */
109typedef enum ISSInfo {
110 ISSNone = 0,
111 ISSRegMask = 0x1f,
112 ISSInvalid = (1 << 5),
113 ISSIsAcqRel = (1 << 6),
114 ISSIsWrite = (1 << 7),
115 ISSIs16Bit = (1 << 8),
116} ISSInfo;
117
118/* Save the syndrome information for a Data Abort */
119static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
120{
121 uint32_t syn;
122 int sas = memop & MO_SIZE;
123 bool sse = memop & MO_SIGN;
124 bool is_acqrel = issinfo & ISSIsAcqRel;
125 bool is_write = issinfo & ISSIsWrite;
126 bool is_16bit = issinfo & ISSIs16Bit;
127 int srt = issinfo & ISSRegMask;
128
129 if (issinfo & ISSInvalid) {
130 /* Some callsites want to conditionally provide ISS info,
131 * eg "only if this was not a writeback"
132 */
133 return;
134 }
135
136 if (srt == 15) {
137 /* For AArch32, insns where the src/dest is R15 never generate
138 * ISS information. Catching that here saves checking at all
139 * the call sites.
140 */
141 return;
142 }
143
144 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
145 0, 0, 0, is_write, 0, is_16bit);
146 disas_set_insn_syndrome(s, syn);
147}
148
8bd5c820 149static inline int get_a32_user_mem_index(DisasContext *s)
579d21cc 150{
8bd5c820 151 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
579d21cc
PM
152 * insns:
153 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
154 * otherwise, access as if at PL0.
155 */
156 switch (s->mmu_idx) {
157 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
158 case ARMMMUIdx_S12NSE0:
159 case ARMMMUIdx_S12NSE1:
8bd5c820 160 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
579d21cc
PM
161 case ARMMMUIdx_S1E3:
162 case ARMMMUIdx_S1SE0:
163 case ARMMMUIdx_S1SE1:
8bd5c820 164 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
e7b921c2
PM
165 case ARMMMUIdx_MUser:
166 case ARMMMUIdx_MPriv:
167 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
62593718
PM
168 case ARMMMUIdx_MUserNegPri:
169 case ARMMMUIdx_MPrivNegPri:
170 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
b9f587d6
PM
171 case ARMMMUIdx_MSUser:
172 case ARMMMUIdx_MSPriv:
b9f587d6 173 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
62593718
PM
174 case ARMMMUIdx_MSUserNegPri:
175 case ARMMMUIdx_MSPrivNegPri:
176 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
579d21cc
PM
177 case ARMMMUIdx_S2NS:
178 default:
179 g_assert_not_reached();
180 }
181}
182
39d5492a 183static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 184{
39d5492a 185 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
186 tcg_gen_ld_i32(tmp, cpu_env, offset);
187 return tmp;
188}
189
0ecb72a5 190#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 191
39d5492a 192static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
193{
194 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 195 tcg_temp_free_i32(var);
d9ba4830
PB
196}
197
198#define store_cpu_field(var, name) \
0ecb72a5 199 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 200
b26eefb6 201/* Set a variable to the value of a CPU register. */
39d5492a 202static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
203{
204 if (reg == 15) {
205 uint32_t addr;
b90372ad 206 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
207 if (s->thumb)
208 addr = (long)s->pc + 2;
209 else
210 addr = (long)s->pc + 4;
211 tcg_gen_movi_i32(var, addr);
212 } else {
155c3eac 213 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
214 }
215}
216
217/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 218static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 219{
39d5492a 220 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
221 load_reg_var(s, tmp, reg);
222 return tmp;
223}
224
225/* Set a CPU register. The source must be a temporary and will be
226 marked as dead. */
39d5492a 227static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
228{
229 if (reg == 15) {
9b6a3ea7
PM
230 /* In Thumb mode, we must ignore bit 0.
231 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
232 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
233 * We choose to ignore [1:0] in ARM mode for all architecture versions.
234 */
235 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
dcba3a8d 236 s->base.is_jmp = DISAS_JUMP;
b26eefb6 237 }
155c3eac 238 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 239 tcg_temp_free_i32(var);
b26eefb6
PB
240}
241
55203189
PM
242/*
243 * Variant of store_reg which applies v8M stack-limit checks before updating
244 * SP. If the check fails this will result in an exception being taken.
245 * We disable the stack checks for CONFIG_USER_ONLY because we have
246 * no idea what the stack limits should be in that case.
247 * If stack checking is not being done this just acts like store_reg().
248 */
249static void store_sp_checked(DisasContext *s, TCGv_i32 var)
250{
251#ifndef CONFIG_USER_ONLY
252 if (s->v8m_stackcheck) {
253 gen_helper_v8m_stackcheck(cpu_env, var);
254 }
255#endif
256 store_reg(s, 13, var);
257}
258
b26eefb6 259/* Value extensions. */
86831435
PB
260#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
261#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
262#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
263#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
264
1497c961
PB
265#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
266#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 267
b26eefb6 268
39d5492a 269static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 270{
39d5492a 271 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 272 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
273 tcg_temp_free_i32(tmp_mask);
274}
d9ba4830
PB
275/* Set NZCV flags from the high 4 bits of var. */
276#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
277
d4a2dc67 278static void gen_exception_internal(int excp)
d9ba4830 279{
d4a2dc67
PM
280 TCGv_i32 tcg_excp = tcg_const_i32(excp);
281
282 assert(excp_is_internal(excp));
283 gen_helper_exception_internal(cpu_env, tcg_excp);
284 tcg_temp_free_i32(tcg_excp);
285}
286
73710361 287static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
288{
289 TCGv_i32 tcg_excp = tcg_const_i32(excp);
290 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 291 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 292
73710361
GB
293 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
294 tcg_syn, tcg_el);
295
296 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
297 tcg_temp_free_i32(tcg_syn);
298 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
299}
300
50225ad0
PM
301static void gen_ss_advance(DisasContext *s)
302{
303 /* If the singlestep state is Active-not-pending, advance to
304 * Active-pending.
305 */
306 if (s->ss_active) {
307 s->pstate_ss = 0;
308 gen_helper_clear_pstate_ss(cpu_env);
309 }
310}
311
312static void gen_step_complete_exception(DisasContext *s)
313{
314 /* We just completed step of an insn. Move from Active-not-pending
315 * to Active-pending, and then also take the swstep exception.
316 * This corresponds to making the (IMPDEF) choice to prioritize
317 * swstep exceptions over asynchronous exceptions taken to an exception
318 * level where debug is disabled. This choice has the advantage that
319 * we do not need to maintain internal state corresponding to the
320 * ISV/EX syndrome bits between completion of the step and generation
321 * of the exception, and our syndrome information is always correct.
322 */
323 gen_ss_advance(s);
73710361
GB
324 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
325 default_exception_el(s));
dcba3a8d 326 s->base.is_jmp = DISAS_NORETURN;
50225ad0
PM
327}
328
5425415e
PM
329static void gen_singlestep_exception(DisasContext *s)
330{
331 /* Generate the right kind of exception for singlestep, which is
332 * either the architectural singlestep or EXCP_DEBUG for QEMU's
333 * gdb singlestepping.
334 */
335 if (s->ss_active) {
336 gen_step_complete_exception(s);
337 } else {
338 gen_exception_internal(EXCP_DEBUG);
339 }
340}
341
b636649f
PM
342static inline bool is_singlestepping(DisasContext *s)
343{
344 /* Return true if we are singlestepping either because of
345 * architectural singlestep or QEMU gdbstub singlestep. This does
346 * not include the command line '-singlestep' mode which is rather
347 * misnamed as it only means "one instruction per TB" and doesn't
348 * affect the code we generate.
349 */
dcba3a8d 350 return s->base.singlestep_enabled || s->ss_active;
b636649f
PM
351}
352
39d5492a 353static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 354{
39d5492a
PM
355 TCGv_i32 tmp1 = tcg_temp_new_i32();
356 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
357 tcg_gen_ext16s_i32(tmp1, a);
358 tcg_gen_ext16s_i32(tmp2, b);
3670669c 359 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 360 tcg_temp_free_i32(tmp2);
3670669c
PB
361 tcg_gen_sari_i32(a, a, 16);
362 tcg_gen_sari_i32(b, b, 16);
363 tcg_gen_mul_i32(b, b, a);
364 tcg_gen_mov_i32(a, tmp1);
7d1b0095 365 tcg_temp_free_i32(tmp1);
3670669c
PB
366}
367
368/* Byteswap each halfword. */
39d5492a 369static void gen_rev16(TCGv_i32 var)
3670669c 370{
39d5492a 371 TCGv_i32 tmp = tcg_temp_new_i32();
68cedf73 372 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
3670669c 373 tcg_gen_shri_i32(tmp, var, 8);
68cedf73
AJ
374 tcg_gen_and_i32(tmp, tmp, mask);
375 tcg_gen_and_i32(var, var, mask);
3670669c 376 tcg_gen_shli_i32(var, var, 8);
3670669c 377 tcg_gen_or_i32(var, var, tmp);
68cedf73 378 tcg_temp_free_i32(mask);
7d1b0095 379 tcg_temp_free_i32(tmp);
3670669c
PB
380}
381
382/* Byteswap low halfword and sign extend. */
39d5492a 383static void gen_revsh(TCGv_i32 var)
3670669c 384{
1a855029
AJ
385 tcg_gen_ext16u_i32(var, var);
386 tcg_gen_bswap16_i32(var, var);
387 tcg_gen_ext16s_i32(var, var);
3670669c
PB
388}
389
838fa72d 390/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 391static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 392{
838fa72d
AJ
393 TCGv_i64 tmp64 = tcg_temp_new_i64();
394
395 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 396 tcg_temp_free_i32(b);
838fa72d
AJ
397 tcg_gen_shli_i64(tmp64, tmp64, 32);
398 tcg_gen_add_i64(a, tmp64, a);
399
400 tcg_temp_free_i64(tmp64);
401 return a;
402}
403
404/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 405static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
406{
407 TCGv_i64 tmp64 = tcg_temp_new_i64();
408
409 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 410 tcg_temp_free_i32(b);
838fa72d
AJ
411 tcg_gen_shli_i64(tmp64, tmp64, 32);
412 tcg_gen_sub_i64(a, tmp64, a);
413
414 tcg_temp_free_i64(tmp64);
415 return a;
3670669c
PB
416}
417
5e3f878a 418/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 419static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 420{
39d5492a
PM
421 TCGv_i32 lo = tcg_temp_new_i32();
422 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 423 TCGv_i64 ret;
5e3f878a 424
831d7fe8 425 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 426 tcg_temp_free_i32(a);
7d1b0095 427 tcg_temp_free_i32(b);
831d7fe8
RH
428
429 ret = tcg_temp_new_i64();
430 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
431 tcg_temp_free_i32(lo);
432 tcg_temp_free_i32(hi);
831d7fe8
RH
433
434 return ret;
5e3f878a
PB
435}
436
39d5492a 437static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 438{
39d5492a
PM
439 TCGv_i32 lo = tcg_temp_new_i32();
440 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 441 TCGv_i64 ret;
5e3f878a 442
831d7fe8 443 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 444 tcg_temp_free_i32(a);
7d1b0095 445 tcg_temp_free_i32(b);
831d7fe8
RH
446
447 ret = tcg_temp_new_i64();
448 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
449 tcg_temp_free_i32(lo);
450 tcg_temp_free_i32(hi);
831d7fe8
RH
451
452 return ret;
5e3f878a
PB
453}
454
8f01245e 455/* Swap low and high halfwords. */
39d5492a 456static void gen_swap_half(TCGv_i32 var)
8f01245e 457{
39d5492a 458 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
459 tcg_gen_shri_i32(tmp, var, 16);
460 tcg_gen_shli_i32(var, var, 16);
461 tcg_gen_or_i32(var, var, tmp);
7d1b0095 462 tcg_temp_free_i32(tmp);
8f01245e
PB
463}
464
b26eefb6
PB
465/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
466 tmp = (t0 ^ t1) & 0x8000;
467 t0 &= ~0x8000;
468 t1 &= ~0x8000;
469 t0 = (t0 + t1) ^ tmp;
470 */
471
39d5492a 472static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 473{
39d5492a 474 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
475 tcg_gen_xor_i32(tmp, t0, t1);
476 tcg_gen_andi_i32(tmp, tmp, 0x8000);
477 tcg_gen_andi_i32(t0, t0, ~0x8000);
478 tcg_gen_andi_i32(t1, t1, ~0x8000);
479 tcg_gen_add_i32(t0, t0, t1);
480 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
481 tcg_temp_free_i32(tmp);
482 tcg_temp_free_i32(t1);
b26eefb6
PB
483}
484
485/* Set CF to the top bit of var. */
39d5492a 486static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 487{
66c374de 488 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
489}
490
491/* Set N and Z flags from var. */
39d5492a 492static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 493{
66c374de
AJ
494 tcg_gen_mov_i32(cpu_NF, var);
495 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
496}
497
498/* T0 += T1 + CF. */
39d5492a 499static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 500{
396e467c 501 tcg_gen_add_i32(t0, t0, t1);
66c374de 502 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
503}
504
e9bb4aa9 505/* dest = T0 + T1 + CF. */
39d5492a 506static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 507{
e9bb4aa9 508 tcg_gen_add_i32(dest, t0, t1);
66c374de 509 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
510}
511
3670669c 512/* dest = T0 - T1 + CF - 1. */
39d5492a 513static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 514{
3670669c 515 tcg_gen_sub_i32(dest, t0, t1);
66c374de 516 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 517 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
518}
519
72485ec4 520/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 521static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 522{
39d5492a 523 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
524 tcg_gen_movi_i32(tmp, 0);
525 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 526 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 527 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
528 tcg_gen_xor_i32(tmp, t0, t1);
529 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
530 tcg_temp_free_i32(tmp);
531 tcg_gen_mov_i32(dest, cpu_NF);
532}
533
49b4c31e 534/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 535static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 536{
39d5492a 537 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
538 if (TCG_TARGET_HAS_add2_i32) {
539 tcg_gen_movi_i32(tmp, 0);
540 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 541 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
542 } else {
543 TCGv_i64 q0 = tcg_temp_new_i64();
544 TCGv_i64 q1 = tcg_temp_new_i64();
545 tcg_gen_extu_i32_i64(q0, t0);
546 tcg_gen_extu_i32_i64(q1, t1);
547 tcg_gen_add_i64(q0, q0, q1);
548 tcg_gen_extu_i32_i64(q1, cpu_CF);
549 tcg_gen_add_i64(q0, q0, q1);
550 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
551 tcg_temp_free_i64(q0);
552 tcg_temp_free_i64(q1);
553 }
554 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
555 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
556 tcg_gen_xor_i32(tmp, t0, t1);
557 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
558 tcg_temp_free_i32(tmp);
559 tcg_gen_mov_i32(dest, cpu_NF);
560}
561
72485ec4 562/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 563static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 564{
39d5492a 565 TCGv_i32 tmp;
72485ec4
AJ
566 tcg_gen_sub_i32(cpu_NF, t0, t1);
567 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
568 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
569 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
570 tmp = tcg_temp_new_i32();
571 tcg_gen_xor_i32(tmp, t0, t1);
572 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
573 tcg_temp_free_i32(tmp);
574 tcg_gen_mov_i32(dest, cpu_NF);
575}
576
e77f0832 577/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 578static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 579{
39d5492a 580 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
581 tcg_gen_not_i32(tmp, t1);
582 gen_adc_CC(dest, t0, tmp);
39d5492a 583 tcg_temp_free_i32(tmp);
2de68a49
RH
584}
585
365af80e 586#define GEN_SHIFT(name) \
39d5492a 587static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 588{ \
39d5492a 589 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
590 tmp1 = tcg_temp_new_i32(); \
591 tcg_gen_andi_i32(tmp1, t1, 0xff); \
592 tmp2 = tcg_const_i32(0); \
593 tmp3 = tcg_const_i32(0x1f); \
594 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
595 tcg_temp_free_i32(tmp3); \
596 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
597 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
598 tcg_temp_free_i32(tmp2); \
599 tcg_temp_free_i32(tmp1); \
600}
601GEN_SHIFT(shl)
602GEN_SHIFT(shr)
603#undef GEN_SHIFT
604
39d5492a 605static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 606{
39d5492a 607 TCGv_i32 tmp1, tmp2;
365af80e
AJ
608 tmp1 = tcg_temp_new_i32();
609 tcg_gen_andi_i32(tmp1, t1, 0xff);
610 tmp2 = tcg_const_i32(0x1f);
611 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
612 tcg_temp_free_i32(tmp2);
613 tcg_gen_sar_i32(dest, t0, tmp1);
614 tcg_temp_free_i32(tmp1);
615}
616
39d5492a 617static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 618{
39d5492a
PM
619 TCGv_i32 c0 = tcg_const_i32(0);
620 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
621 tcg_gen_neg_i32(tmp, src);
622 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
623 tcg_temp_free_i32(c0);
624 tcg_temp_free_i32(tmp);
625}
ad69471c 626
39d5492a 627static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 628{
9a119ff6 629 if (shift == 0) {
66c374de 630 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 631 } else {
66c374de
AJ
632 tcg_gen_shri_i32(cpu_CF, var, shift);
633 if (shift != 31) {
634 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
635 }
9a119ff6 636 }
9a119ff6 637}
b26eefb6 638
9a119ff6 639/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
640static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
641 int shift, int flags)
9a119ff6
PB
642{
643 switch (shiftop) {
644 case 0: /* LSL */
645 if (shift != 0) {
646 if (flags)
647 shifter_out_im(var, 32 - shift);
648 tcg_gen_shli_i32(var, var, shift);
649 }
650 break;
651 case 1: /* LSR */
652 if (shift == 0) {
653 if (flags) {
66c374de 654 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
655 }
656 tcg_gen_movi_i32(var, 0);
657 } else {
658 if (flags)
659 shifter_out_im(var, shift - 1);
660 tcg_gen_shri_i32(var, var, shift);
661 }
662 break;
663 case 2: /* ASR */
664 if (shift == 0)
665 shift = 32;
666 if (flags)
667 shifter_out_im(var, shift - 1);
668 if (shift == 32)
669 shift = 31;
670 tcg_gen_sari_i32(var, var, shift);
671 break;
672 case 3: /* ROR/RRX */
673 if (shift != 0) {
674 if (flags)
675 shifter_out_im(var, shift - 1);
f669df27 676 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 677 } else {
39d5492a 678 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 679 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
680 if (flags)
681 shifter_out_im(var, 0);
682 tcg_gen_shri_i32(var, var, 1);
b26eefb6 683 tcg_gen_or_i32(var, var, tmp);
7d1b0095 684 tcg_temp_free_i32(tmp);
b26eefb6
PB
685 }
686 }
687};
688
39d5492a
PM
689static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
690 TCGv_i32 shift, int flags)
8984bd2e
PB
691{
692 if (flags) {
693 switch (shiftop) {
9ef39277
BS
694 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
695 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
696 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
697 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
698 }
699 } else {
700 switch (shiftop) {
365af80e
AJ
701 case 0:
702 gen_shl(var, var, shift);
703 break;
704 case 1:
705 gen_shr(var, var, shift);
706 break;
707 case 2:
708 gen_sar(var, var, shift);
709 break;
f669df27
AJ
710 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
711 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
712 }
713 }
7d1b0095 714 tcg_temp_free_i32(shift);
8984bd2e
PB
715}
716
6ddbc6e4
PB
717#define PAS_OP(pfx) \
718 switch (op2) { \
719 case 0: gen_pas_helper(glue(pfx,add16)); break; \
720 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
721 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
722 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
723 case 4: gen_pas_helper(glue(pfx,add8)); break; \
724 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
725 }
39d5492a 726static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 727{
a7812ae4 728 TCGv_ptr tmp;
6ddbc6e4
PB
729
730 switch (op1) {
731#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
732 case 1:
a7812ae4 733 tmp = tcg_temp_new_ptr();
0ecb72a5 734 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 735 PAS_OP(s)
b75263d6 736 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
737 break;
738 case 5:
a7812ae4 739 tmp = tcg_temp_new_ptr();
0ecb72a5 740 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 741 PAS_OP(u)
b75263d6 742 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
743 break;
744#undef gen_pas_helper
745#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
746 case 2:
747 PAS_OP(q);
748 break;
749 case 3:
750 PAS_OP(sh);
751 break;
752 case 6:
753 PAS_OP(uq);
754 break;
755 case 7:
756 PAS_OP(uh);
757 break;
758#undef gen_pas_helper
759 }
760}
9ee6e8bb
PB
761#undef PAS_OP
762
6ddbc6e4
PB
763/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
764#define PAS_OP(pfx) \
ed89a2f1 765 switch (op1) { \
6ddbc6e4
PB
766 case 0: gen_pas_helper(glue(pfx,add8)); break; \
767 case 1: gen_pas_helper(glue(pfx,add16)); break; \
768 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
769 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
770 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
771 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
772 }
39d5492a 773static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 774{
a7812ae4 775 TCGv_ptr tmp;
6ddbc6e4 776
ed89a2f1 777 switch (op2) {
6ddbc6e4
PB
778#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
779 case 0:
a7812ae4 780 tmp = tcg_temp_new_ptr();
0ecb72a5 781 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 782 PAS_OP(s)
b75263d6 783 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
784 break;
785 case 4:
a7812ae4 786 tmp = tcg_temp_new_ptr();
0ecb72a5 787 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 788 PAS_OP(u)
b75263d6 789 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
790 break;
791#undef gen_pas_helper
792#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
793 case 1:
794 PAS_OP(q);
795 break;
796 case 2:
797 PAS_OP(sh);
798 break;
799 case 5:
800 PAS_OP(uq);
801 break;
802 case 6:
803 PAS_OP(uh);
804 break;
805#undef gen_pas_helper
806 }
807}
9ee6e8bb
PB
808#undef PAS_OP
809
39fb730a 810/*
6c2c63d3 811 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
812 * This is common between ARM and Aarch64 targets.
813 */
6c2c63d3 814void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 815{
6c2c63d3
RH
816 TCGv_i32 value;
817 TCGCond cond;
818 bool global = true;
d9ba4830 819
d9ba4830
PB
820 switch (cc) {
821 case 0: /* eq: Z */
d9ba4830 822 case 1: /* ne: !Z */
6c2c63d3
RH
823 cond = TCG_COND_EQ;
824 value = cpu_ZF;
d9ba4830 825 break;
6c2c63d3 826
d9ba4830 827 case 2: /* cs: C */
d9ba4830 828 case 3: /* cc: !C */
6c2c63d3
RH
829 cond = TCG_COND_NE;
830 value = cpu_CF;
d9ba4830 831 break;
6c2c63d3 832
d9ba4830 833 case 4: /* mi: N */
d9ba4830 834 case 5: /* pl: !N */
6c2c63d3
RH
835 cond = TCG_COND_LT;
836 value = cpu_NF;
d9ba4830 837 break;
6c2c63d3 838
d9ba4830 839 case 6: /* vs: V */
d9ba4830 840 case 7: /* vc: !V */
6c2c63d3
RH
841 cond = TCG_COND_LT;
842 value = cpu_VF;
d9ba4830 843 break;
6c2c63d3 844
d9ba4830 845 case 8: /* hi: C && !Z */
6c2c63d3
RH
846 case 9: /* ls: !C || Z -> !(C && !Z) */
847 cond = TCG_COND_NE;
848 value = tcg_temp_new_i32();
849 global = false;
850 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
851 ZF is non-zero for !Z; so AND the two subexpressions. */
852 tcg_gen_neg_i32(value, cpu_CF);
853 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 854 break;
6c2c63d3 855
d9ba4830 856 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 857 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
858 /* Since we're only interested in the sign bit, == 0 is >= 0. */
859 cond = TCG_COND_GE;
860 value = tcg_temp_new_i32();
861 global = false;
862 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 863 break;
6c2c63d3 864
d9ba4830 865 case 12: /* gt: !Z && N == V */
d9ba4830 866 case 13: /* le: Z || N != V */
6c2c63d3
RH
867 cond = TCG_COND_NE;
868 value = tcg_temp_new_i32();
869 global = false;
870 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
871 * the sign bit then AND with ZF to yield the result. */
872 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
873 tcg_gen_sari_i32(value, value, 31);
874 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 875 break;
6c2c63d3 876
9305eac0
RH
877 case 14: /* always */
878 case 15: /* always */
879 /* Use the ALWAYS condition, which will fold early.
880 * It doesn't matter what we use for the value. */
881 cond = TCG_COND_ALWAYS;
882 value = cpu_ZF;
883 goto no_invert;
884
d9ba4830
PB
885 default:
886 fprintf(stderr, "Bad condition code 0x%x\n", cc);
887 abort();
888 }
6c2c63d3
RH
889
890 if (cc & 1) {
891 cond = tcg_invert_cond(cond);
892 }
893
9305eac0 894 no_invert:
6c2c63d3
RH
895 cmp->cond = cond;
896 cmp->value = value;
897 cmp->value_global = global;
898}
899
900void arm_free_cc(DisasCompare *cmp)
901{
902 if (!cmp->value_global) {
903 tcg_temp_free_i32(cmp->value);
904 }
905}
906
907void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
908{
909 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
910}
911
912void arm_gen_test_cc(int cc, TCGLabel *label)
913{
914 DisasCompare cmp;
915 arm_test_cc(&cmp, cc);
916 arm_jump_cc(&cmp, label);
917 arm_free_cc(&cmp);
d9ba4830 918}
2c0262af 919
b1d8e52e 920static const uint8_t table_logic_cc[16] = {
2c0262af
FB
921 1, /* and */
922 1, /* xor */
923 0, /* sub */
924 0, /* rsb */
925 0, /* add */
926 0, /* adc */
927 0, /* sbc */
928 0, /* rsc */
929 1, /* andl */
930 1, /* xorl */
931 0, /* cmp */
932 0, /* cmn */
933 1, /* orr */
934 1, /* mov */
935 1, /* bic */
936 1, /* mvn */
937};
3b46e624 938
4d5e8c96
PM
939static inline void gen_set_condexec(DisasContext *s)
940{
941 if (s->condexec_mask) {
942 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
943 TCGv_i32 tmp = tcg_temp_new_i32();
944 tcg_gen_movi_i32(tmp, val);
945 store_cpu_field(tmp, condexec_bits);
946 }
947}
948
949static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
950{
951 tcg_gen_movi_i32(cpu_R[15], val);
952}
953
d9ba4830
PB
954/* Set PC and Thumb state from an immediate address. */
955static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 956{
39d5492a 957 TCGv_i32 tmp;
99c475ab 958
dcba3a8d 959 s->base.is_jmp = DISAS_JUMP;
d9ba4830 960 if (s->thumb != (addr & 1)) {
7d1b0095 961 tmp = tcg_temp_new_i32();
d9ba4830 962 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 963 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 964 tcg_temp_free_i32(tmp);
d9ba4830 965 }
155c3eac 966 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
967}
968
969/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 970static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 971{
dcba3a8d 972 s->base.is_jmp = DISAS_JUMP;
155c3eac
FN
973 tcg_gen_andi_i32(cpu_R[15], var, ~1);
974 tcg_gen_andi_i32(var, var, 1);
975 store_cpu_field(var, thumb);
d9ba4830
PB
976}
977
3bb8a96f
PM
978/* Set PC and Thumb state from var. var is marked as dead.
979 * For M-profile CPUs, include logic to detect exception-return
980 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
981 * and BX reg, and no others, and happens only for code in Handler mode.
982 */
983static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
984{
985 /* Generate the same code here as for a simple bx, but flag via
dcba3a8d 986 * s->base.is_jmp that we need to do the rest of the work later.
3bb8a96f
PM
987 */
988 gen_bx(s, var);
d02a8698
PM
989 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
990 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
dcba3a8d 991 s->base.is_jmp = DISAS_BX_EXCRET;
3bb8a96f
PM
992 }
993}
994
995static inline void gen_bx_excret_final_code(DisasContext *s)
996{
997 /* Generate the code to finish possible exception return and end the TB */
998 TCGLabel *excret_label = gen_new_label();
d02a8698
PM
999 uint32_t min_magic;
1000
1001 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
1002 /* Covers FNC_RETURN and EXC_RETURN magic */
1003 min_magic = FNC_RETURN_MIN_MAGIC;
1004 } else {
1005 /* EXC_RETURN magic only */
1006 min_magic = EXC_RETURN_MIN_MAGIC;
1007 }
3bb8a96f
PM
1008
1009 /* Is the new PC value in the magic range indicating exception return? */
d02a8698 1010 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
3bb8a96f
PM
1011 /* No: end the TB as we would for a DISAS_JMP */
1012 if (is_singlestepping(s)) {
1013 gen_singlestep_exception(s);
1014 } else {
07ea28b4 1015 tcg_gen_exit_tb(NULL, 0);
3bb8a96f
PM
1016 }
1017 gen_set_label(excret_label);
1018 /* Yes: this is an exception return.
1019 * At this point in runtime env->regs[15] and env->thumb will hold
1020 * the exception-return magic number, which do_v7m_exception_exit()
1021 * will read. Nothing else will be able to see those values because
1022 * the cpu-exec main loop guarantees that we will always go straight
1023 * from raising the exception to the exception-handling code.
1024 *
1025 * gen_ss_advance(s) does nothing on M profile currently but
1026 * calling it is conceptually the right thing as we have executed
1027 * this instruction (compare SWI, HVC, SMC handling).
1028 */
1029 gen_ss_advance(s);
1030 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1031}
1032
fb602cb7
PM
1033static inline void gen_bxns(DisasContext *s, int rm)
1034{
1035 TCGv_i32 var = load_reg(s, rm);
1036
1037 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1038 * we need to sync state before calling it, but:
1039 * - we don't need to do gen_set_pc_im() because the bxns helper will
1040 * always set the PC itself
1041 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1042 * unless it's outside an IT block or the last insn in an IT block,
1043 * so we know that condexec == 0 (already set at the top of the TB)
1044 * is correct in the non-UNPREDICTABLE cases, and we can choose
1045 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1046 */
1047 gen_helper_v7m_bxns(cpu_env, var);
1048 tcg_temp_free_i32(var);
ef475b5d 1049 s->base.is_jmp = DISAS_EXIT;
fb602cb7
PM
1050}
1051
3e3fa230
PM
1052static inline void gen_blxns(DisasContext *s, int rm)
1053{
1054 TCGv_i32 var = load_reg(s, rm);
1055
1056 /* We don't need to sync condexec state, for the same reason as bxns.
1057 * We do however need to set the PC, because the blxns helper reads it.
1058 * The blxns helper may throw an exception.
1059 */
1060 gen_set_pc_im(s, s->pc);
1061 gen_helper_v7m_blxns(cpu_env, var);
1062 tcg_temp_free_i32(var);
1063 s->base.is_jmp = DISAS_EXIT;
1064}
1065
21aeb343
JR
1066/* Variant of store_reg which uses branch&exchange logic when storing
1067 to r15 in ARM architecture v7 and above. The source must be a temporary
1068 and will be marked as dead. */
7dcc1f89 1069static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
1070{
1071 if (reg == 15 && ENABLE_ARCH_7) {
1072 gen_bx(s, var);
1073 } else {
1074 store_reg(s, reg, var);
1075 }
1076}
1077
be5e7a76
DES
1078/* Variant of store_reg which uses branch&exchange logic when storing
1079 * to r15 in ARM architecture v5T and above. This is used for storing
1080 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1081 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 1082static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
1083{
1084 if (reg == 15 && ENABLE_ARCH_5) {
3bb8a96f 1085 gen_bx_excret(s, var);
be5e7a76
DES
1086 } else {
1087 store_reg(s, reg, var);
1088 }
1089}
1090
e334bd31
PB
1091#ifdef CONFIG_USER_ONLY
1092#define IS_USER_ONLY 1
1093#else
1094#define IS_USER_ONLY 0
1095#endif
1096
08307563
PM
1097/* Abstractions of "generate code to do a guest load/store for
1098 * AArch32", where a vaddr is always 32 bits (and is zero
1099 * extended if we're a 64 bit core) and data is also
1100 * 32 bits unless specifically doing a 64 bit access.
1101 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 1102 * that the address argument is TCGv_i32 rather than TCGv.
08307563 1103 */
08307563 1104
7f5616f5 1105static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
08307563 1106{
7f5616f5
RH
1107 TCGv addr = tcg_temp_new();
1108 tcg_gen_extu_i32_tl(addr, a32);
1109
e334bd31 1110 /* Not needed for user-mode BE32, where we use MO_BE instead. */
7f5616f5
RH
1111 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1112 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
e334bd31 1113 }
7f5616f5 1114 return addr;
08307563
PM
1115}
1116
7f5616f5
RH
1117static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1118 int index, TCGMemOp opc)
08307563 1119{
2aeba0d0
JS
1120 TCGv addr;
1121
1122 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1123 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1124 opc |= MO_ALIGN;
1125 }
1126
1127 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1128 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1129 tcg_temp_free(addr);
08307563
PM
1130}
1131
7f5616f5
RH
1132static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1133 int index, TCGMemOp opc)
1134{
2aeba0d0
JS
1135 TCGv addr;
1136
1137 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1138 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1139 opc |= MO_ALIGN;
1140 }
1141
1142 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1143 tcg_gen_qemu_st_i32(val, addr, index, opc);
1144 tcg_temp_free(addr);
1145}
08307563 1146
7f5616f5 1147#define DO_GEN_LD(SUFF, OPC) \
12dcc321 1148static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1149 TCGv_i32 a32, int index) \
08307563 1150{ \
7f5616f5 1151 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1152} \
1153static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1154 TCGv_i32 val, \
1155 TCGv_i32 a32, int index, \
1156 ISSInfo issinfo) \
1157{ \
1158 gen_aa32_ld##SUFF(s, val, a32, index); \
1159 disas_set_da_iss(s, OPC, issinfo); \
08307563
PM
1160}
1161
7f5616f5 1162#define DO_GEN_ST(SUFF, OPC) \
12dcc321 1163static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1164 TCGv_i32 a32, int index) \
08307563 1165{ \
7f5616f5 1166 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1167} \
1168static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1169 TCGv_i32 val, \
1170 TCGv_i32 a32, int index, \
1171 ISSInfo issinfo) \
1172{ \
1173 gen_aa32_st##SUFF(s, val, a32, index); \
1174 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
08307563
PM
1175}
1176
7f5616f5 1177static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
08307563 1178{
e334bd31
PB
1179 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1180 if (!IS_USER_ONLY && s->sctlr_b) {
1181 tcg_gen_rotri_i64(val, val, 32);
1182 }
08307563
PM
1183}
1184
7f5616f5
RH
1185static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1186 int index, TCGMemOp opc)
08307563 1187{
7f5616f5
RH
1188 TCGv addr = gen_aa32_addr(s, a32, opc);
1189 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1190 gen_aa32_frob64(s, val);
1191 tcg_temp_free(addr);
1192}
1193
1194static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1195 TCGv_i32 a32, int index)
1196{
1197 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1198}
1199
1200static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1201 int index, TCGMemOp opc)
1202{
1203 TCGv addr = gen_aa32_addr(s, a32, opc);
e334bd31
PB
1204
1205 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1206 if (!IS_USER_ONLY && s->sctlr_b) {
7f5616f5 1207 TCGv_i64 tmp = tcg_temp_new_i64();
e334bd31 1208 tcg_gen_rotri_i64(tmp, val, 32);
7f5616f5
RH
1209 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1210 tcg_temp_free_i64(tmp);
e334bd31 1211 } else {
7f5616f5 1212 tcg_gen_qemu_st_i64(val, addr, index, opc);
e334bd31 1213 }
7f5616f5 1214 tcg_temp_free(addr);
08307563
PM
1215}
1216
7f5616f5
RH
1217static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1218 TCGv_i32 a32, int index)
1219{
1220 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1221}
08307563 1222
7f5616f5
RH
1223DO_GEN_LD(8s, MO_SB)
1224DO_GEN_LD(8u, MO_UB)
1225DO_GEN_LD(16s, MO_SW)
1226DO_GEN_LD(16u, MO_UW)
1227DO_GEN_LD(32u, MO_UL)
7f5616f5
RH
1228DO_GEN_ST(8, MO_UB)
1229DO_GEN_ST(16, MO_UW)
1230DO_GEN_ST(32, MO_UL)
08307563 1231
37e6456e
PM
1232static inline void gen_hvc(DisasContext *s, int imm16)
1233{
1234 /* The pre HVC helper handles cases when HVC gets trapped
1235 * as an undefined insn by runtime configuration (ie before
1236 * the insn really executes).
1237 */
1238 gen_set_pc_im(s, s->pc - 4);
1239 gen_helper_pre_hvc(cpu_env);
1240 /* Otherwise we will treat this as a real exception which
1241 * happens after execution of the insn. (The distinction matters
1242 * for the PC value reported to the exception handler and also
1243 * for single stepping.)
1244 */
1245 s->svc_imm = imm16;
1246 gen_set_pc_im(s, s->pc);
dcba3a8d 1247 s->base.is_jmp = DISAS_HVC;
37e6456e
PM
1248}
1249
1250static inline void gen_smc(DisasContext *s)
1251{
1252 /* As with HVC, we may take an exception either before or after
1253 * the insn executes.
1254 */
1255 TCGv_i32 tmp;
1256
1257 gen_set_pc_im(s, s->pc - 4);
1258 tmp = tcg_const_i32(syn_aa32_smc());
1259 gen_helper_pre_smc(cpu_env, tmp);
1260 tcg_temp_free_i32(tmp);
1261 gen_set_pc_im(s, s->pc);
dcba3a8d 1262 s->base.is_jmp = DISAS_SMC;
37e6456e
PM
1263}
1264
d4a2dc67
PM
1265static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1266{
1267 gen_set_condexec(s);
1268 gen_set_pc_im(s, s->pc - offset);
1269 gen_exception_internal(excp);
dcba3a8d 1270 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1271}
1272
73710361
GB
1273static void gen_exception_insn(DisasContext *s, int offset, int excp,
1274 int syn, uint32_t target_el)
d4a2dc67
PM
1275{
1276 gen_set_condexec(s);
1277 gen_set_pc_im(s, s->pc - offset);
73710361 1278 gen_exception(excp, syn, target_el);
dcba3a8d 1279 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1280}
1281
c900a2e6
PM
1282static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
1283{
1284 TCGv_i32 tcg_syn;
1285
1286 gen_set_condexec(s);
1287 gen_set_pc_im(s, s->pc - offset);
1288 tcg_syn = tcg_const_i32(syn);
1289 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1290 tcg_temp_free_i32(tcg_syn);
1291 s->base.is_jmp = DISAS_NORETURN;
1292}
1293
b5ff1b31
FB
1294/* Force a TB lookup after an instruction that changes the CPU state. */
1295static inline void gen_lookup_tb(DisasContext *s)
1296{
a6445c52 1297 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
dcba3a8d 1298 s->base.is_jmp = DISAS_EXIT;
b5ff1b31
FB
1299}
1300
19a6e31c
PM
1301static inline void gen_hlt(DisasContext *s, int imm)
1302{
1303 /* HLT. This has two purposes.
1304 * Architecturally, it is an external halting debug instruction.
1305 * Since QEMU doesn't implement external debug, we treat this as
1306 * it is required for halting debug disabled: it will UNDEF.
1307 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1308 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1309 * must trigger semihosting even for ARMv7 and earlier, where
1310 * HLT was an undefined encoding.
1311 * In system mode, we don't allow userspace access to
1312 * semihosting, to provide some semblance of security
1313 * (and for consistency with our 32-bit semihosting).
1314 */
1315 if (semihosting_enabled() &&
1316#ifndef CONFIG_USER_ONLY
1317 s->current_el != 0 &&
1318#endif
1319 (imm == (s->thumb ? 0x3c : 0xf000))) {
1320 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1321 return;
1322 }
1323
1324 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1325 default_exception_el(s));
1326}
1327
b0109805 1328static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1329 TCGv_i32 var)
2c0262af 1330{
1e8d4eec 1331 int val, rm, shift, shiftop;
39d5492a 1332 TCGv_i32 offset;
2c0262af
FB
1333
1334 if (!(insn & (1 << 25))) {
1335 /* immediate */
1336 val = insn & 0xfff;
1337 if (!(insn & (1 << 23)))
1338 val = -val;
537730b9 1339 if (val != 0)
b0109805 1340 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1341 } else {
1342 /* shift/register */
1343 rm = (insn) & 0xf;
1344 shift = (insn >> 7) & 0x1f;
1e8d4eec 1345 shiftop = (insn >> 5) & 3;
b26eefb6 1346 offset = load_reg(s, rm);
9a119ff6 1347 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1348 if (!(insn & (1 << 23)))
b0109805 1349 tcg_gen_sub_i32(var, var, offset);
2c0262af 1350 else
b0109805 1351 tcg_gen_add_i32(var, var, offset);
7d1b0095 1352 tcg_temp_free_i32(offset);
2c0262af
FB
1353 }
1354}
1355
191f9a93 1356static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1357 int extra, TCGv_i32 var)
2c0262af
FB
1358{
1359 int val, rm;
39d5492a 1360 TCGv_i32 offset;
3b46e624 1361
2c0262af
FB
1362 if (insn & (1 << 22)) {
1363 /* immediate */
1364 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1365 if (!(insn & (1 << 23)))
1366 val = -val;
18acad92 1367 val += extra;
537730b9 1368 if (val != 0)
b0109805 1369 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1370 } else {
1371 /* register */
191f9a93 1372 if (extra)
b0109805 1373 tcg_gen_addi_i32(var, var, extra);
2c0262af 1374 rm = (insn) & 0xf;
b26eefb6 1375 offset = load_reg(s, rm);
2c0262af 1376 if (!(insn & (1 << 23)))
b0109805 1377 tcg_gen_sub_i32(var, var, offset);
2c0262af 1378 else
b0109805 1379 tcg_gen_add_i32(var, var, offset);
7d1b0095 1380 tcg_temp_free_i32(offset);
2c0262af
FB
1381 }
1382}
1383
5aaebd13
PM
1384static TCGv_ptr get_fpstatus_ptr(int neon)
1385{
1386 TCGv_ptr statusptr = tcg_temp_new_ptr();
1387 int offset;
1388 if (neon) {
0ecb72a5 1389 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1390 } else {
0ecb72a5 1391 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1392 }
1393 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1394 return statusptr;
1395}
1396
4373f3ce
PB
1397#define VFP_OP2(name) \
1398static inline void gen_vfp_##name(int dp) \
1399{ \
ae1857ec
PM
1400 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1401 if (dp) { \
1402 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1403 } else { \
1404 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1405 } \
1406 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1407}
1408
4373f3ce
PB
1409VFP_OP2(add)
1410VFP_OP2(sub)
1411VFP_OP2(mul)
1412VFP_OP2(div)
1413
1414#undef VFP_OP2
1415
605a6aed
PM
1416static inline void gen_vfp_F1_mul(int dp)
1417{
1418 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1419 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1420 if (dp) {
ae1857ec 1421 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1422 } else {
ae1857ec 1423 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1424 }
ae1857ec 1425 tcg_temp_free_ptr(fpst);
605a6aed
PM
1426}
1427
1428static inline void gen_vfp_F1_neg(int dp)
1429{
1430 /* Like gen_vfp_neg() but put result in F1 */
1431 if (dp) {
1432 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1433 } else {
1434 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1435 }
1436}
1437
4373f3ce
PB
1438static inline void gen_vfp_abs(int dp)
1439{
1440 if (dp)
1441 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1442 else
1443 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1444}
1445
1446static inline void gen_vfp_neg(int dp)
1447{
1448 if (dp)
1449 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1450 else
1451 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1452}
1453
1454static inline void gen_vfp_sqrt(int dp)
1455{
1456 if (dp)
1457 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1458 else
1459 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1460}
1461
1462static inline void gen_vfp_cmp(int dp)
1463{
1464 if (dp)
1465 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1466 else
1467 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1468}
1469
1470static inline void gen_vfp_cmpe(int dp)
1471{
1472 if (dp)
1473 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1474 else
1475 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1476}
1477
1478static inline void gen_vfp_F1_ld0(int dp)
1479{
1480 if (dp)
5b340b51 1481 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1482 else
5b340b51 1483 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1484}
1485
5500b06c
PM
1486#define VFP_GEN_ITOF(name) \
1487static inline void gen_vfp_##name(int dp, int neon) \
1488{ \
5aaebd13 1489 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1490 if (dp) { \
1491 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1492 } else { \
1493 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1494 } \
b7fa9214 1495 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1496}
1497
5500b06c
PM
1498VFP_GEN_ITOF(uito)
1499VFP_GEN_ITOF(sito)
1500#undef VFP_GEN_ITOF
4373f3ce 1501
5500b06c
PM
1502#define VFP_GEN_FTOI(name) \
1503static inline void gen_vfp_##name(int dp, int neon) \
1504{ \
5aaebd13 1505 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1506 if (dp) { \
1507 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1508 } else { \
1509 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1510 } \
b7fa9214 1511 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1512}
1513
5500b06c
PM
1514VFP_GEN_FTOI(toui)
1515VFP_GEN_FTOI(touiz)
1516VFP_GEN_FTOI(tosi)
1517VFP_GEN_FTOI(tosiz)
1518#undef VFP_GEN_FTOI
4373f3ce 1519
16d5b3ca 1520#define VFP_GEN_FIX(name, round) \
5500b06c 1521static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1522{ \
39d5492a 1523 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1524 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1525 if (dp) { \
16d5b3ca
WN
1526 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1527 statusptr); \
5500b06c 1528 } else { \
16d5b3ca
WN
1529 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1530 statusptr); \
5500b06c 1531 } \
b75263d6 1532 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1533 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1534}
16d5b3ca
WN
1535VFP_GEN_FIX(tosh, _round_to_zero)
1536VFP_GEN_FIX(tosl, _round_to_zero)
1537VFP_GEN_FIX(touh, _round_to_zero)
1538VFP_GEN_FIX(toul, _round_to_zero)
1539VFP_GEN_FIX(shto, )
1540VFP_GEN_FIX(slto, )
1541VFP_GEN_FIX(uhto, )
1542VFP_GEN_FIX(ulto, )
4373f3ce 1543#undef VFP_GEN_FIX
9ee6e8bb 1544
39d5492a 1545static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1546{
08307563 1547 if (dp) {
12dcc321 1548 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1549 } else {
12dcc321 1550 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
08307563 1551 }
b5ff1b31
FB
1552}
1553
39d5492a 1554static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1555{
08307563 1556 if (dp) {
12dcc321 1557 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1558 } else {
12dcc321 1559 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
08307563 1560 }
b5ff1b31
FB
1561}
1562
c39c2b90 1563static inline long vfp_reg_offset(bool dp, unsigned reg)
8e96005d 1564{
9a2b5256 1565 if (dp) {
c39c2b90 1566 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
8e96005d 1567 } else {
c39c2b90 1568 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
9a2b5256
RH
1569 if (reg & 1) {
1570 ofs += offsetof(CPU_DoubleU, l.upper);
1571 } else {
1572 ofs += offsetof(CPU_DoubleU, l.lower);
1573 }
1574 return ofs;
8e96005d
FB
1575 }
1576}
9ee6e8bb
PB
1577
1578/* Return the offset of a 32-bit piece of a NEON register.
1579 zero is the least significant end of the register. */
1580static inline long
1581neon_reg_offset (int reg, int n)
1582{
1583 int sreg;
1584 sreg = reg * 2 + n;
1585 return vfp_reg_offset(0, sreg);
1586}
1587
32f91fb7
RH
1588/* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1589 * where 0 is the least significant end of the register.
1590 */
1591static inline long
1592neon_element_offset(int reg, int element, TCGMemOp size)
1593{
1594 int element_size = 1 << size;
1595 int ofs = element * element_size;
1596#ifdef HOST_WORDS_BIGENDIAN
1597 /* Calculate the offset assuming fully little-endian,
1598 * then XOR to account for the order of the 8-byte units.
1599 */
1600 if (element_size < 8) {
1601 ofs ^= 8 - element_size;
1602 }
1603#endif
1604 return neon_reg_offset(reg, 0) + ofs;
1605}
1606
39d5492a 1607static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1608{
39d5492a 1609 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1610 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1611 return tmp;
1612}
1613
2d6ac920
RH
1614static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop)
1615{
1616 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1617
1618 switch (mop) {
1619 case MO_UB:
1620 tcg_gen_ld8u_i32(var, cpu_env, offset);
1621 break;
1622 case MO_UW:
1623 tcg_gen_ld16u_i32(var, cpu_env, offset);
1624 break;
1625 case MO_UL:
1626 tcg_gen_ld_i32(var, cpu_env, offset);
1627 break;
1628 default:
1629 g_assert_not_reached();
1630 }
1631}
1632
ac55d007
RH
1633static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop)
1634{
1635 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1636
1637 switch (mop) {
1638 case MO_UB:
1639 tcg_gen_ld8u_i64(var, cpu_env, offset);
1640 break;
1641 case MO_UW:
1642 tcg_gen_ld16u_i64(var, cpu_env, offset);
1643 break;
1644 case MO_UL:
1645 tcg_gen_ld32u_i64(var, cpu_env, offset);
1646 break;
1647 case MO_Q:
1648 tcg_gen_ld_i64(var, cpu_env, offset);
1649 break;
1650 default:
1651 g_assert_not_reached();
1652 }
1653}
1654
39d5492a 1655static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1656{
1657 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1658 tcg_temp_free_i32(var);
8f8e3aa4
PB
1659}
1660
2d6ac920
RH
1661static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var)
1662{
1663 long offset = neon_element_offset(reg, ele, size);
1664
1665 switch (size) {
1666 case MO_8:
1667 tcg_gen_st8_i32(var, cpu_env, offset);
1668 break;
1669 case MO_16:
1670 tcg_gen_st16_i32(var, cpu_env, offset);
1671 break;
1672 case MO_32:
1673 tcg_gen_st_i32(var, cpu_env, offset);
1674 break;
1675 default:
1676 g_assert_not_reached();
1677 }
1678}
1679
ac55d007
RH
1680static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var)
1681{
1682 long offset = neon_element_offset(reg, ele, size);
1683
1684 switch (size) {
1685 case MO_8:
1686 tcg_gen_st8_i64(var, cpu_env, offset);
1687 break;
1688 case MO_16:
1689 tcg_gen_st16_i64(var, cpu_env, offset);
1690 break;
1691 case MO_32:
1692 tcg_gen_st32_i64(var, cpu_env, offset);
1693 break;
1694 case MO_64:
1695 tcg_gen_st_i64(var, cpu_env, offset);
1696 break;
1697 default:
1698 g_assert_not_reached();
1699 }
1700}
1701
a7812ae4 1702static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1703{
1704 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1705}
1706
a7812ae4 1707static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1708{
1709 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1710}
1711
1a66ac61
RH
1712static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1713{
1714 TCGv_ptr ret = tcg_temp_new_ptr();
1715 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1716 return ret;
1717}
1718
4373f3ce
PB
1719#define tcg_gen_ld_f32 tcg_gen_ld_i32
1720#define tcg_gen_ld_f64 tcg_gen_ld_i64
1721#define tcg_gen_st_f32 tcg_gen_st_i32
1722#define tcg_gen_st_f64 tcg_gen_st_i64
1723
b7bcbe95
FB
1724static inline void gen_mov_F0_vreg(int dp, int reg)
1725{
1726 if (dp)
4373f3ce 1727 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1728 else
4373f3ce 1729 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1730}
1731
1732static inline void gen_mov_F1_vreg(int dp, int reg)
1733{
1734 if (dp)
4373f3ce 1735 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1736 else
4373f3ce 1737 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1738}
1739
1740static inline void gen_mov_vreg_F0(int dp, int reg)
1741{
1742 if (dp)
4373f3ce 1743 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1744 else
4373f3ce 1745 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1746}
1747
d00584b7 1748#define ARM_CP_RW_BIT (1 << 20)
18c9b560 1749
a7812ae4 1750static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1751{
0ecb72a5 1752 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1753}
1754
a7812ae4 1755static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1756{
0ecb72a5 1757 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1758}
1759
39d5492a 1760static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1761{
39d5492a 1762 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1763 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1764 return var;
e677137d
PB
1765}
1766
39d5492a 1767static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1768{
0ecb72a5 1769 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1770 tcg_temp_free_i32(var);
e677137d
PB
1771}
1772
1773static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1774{
1775 iwmmxt_store_reg(cpu_M0, rn);
1776}
1777
1778static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1779{
1780 iwmmxt_load_reg(cpu_M0, rn);
1781}
1782
1783static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1784{
1785 iwmmxt_load_reg(cpu_V1, rn);
1786 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1787}
1788
1789static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1790{
1791 iwmmxt_load_reg(cpu_V1, rn);
1792 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1793}
1794
1795static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1796{
1797 iwmmxt_load_reg(cpu_V1, rn);
1798 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1799}
1800
1801#define IWMMXT_OP(name) \
1802static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1803{ \
1804 iwmmxt_load_reg(cpu_V1, rn); \
1805 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1806}
1807
477955bd
PM
1808#define IWMMXT_OP_ENV(name) \
1809static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1810{ \
1811 iwmmxt_load_reg(cpu_V1, rn); \
1812 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1813}
1814
1815#define IWMMXT_OP_ENV_SIZE(name) \
1816IWMMXT_OP_ENV(name##b) \
1817IWMMXT_OP_ENV(name##w) \
1818IWMMXT_OP_ENV(name##l)
e677137d 1819
477955bd 1820#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1821static inline void gen_op_iwmmxt_##name##_M0(void) \
1822{ \
477955bd 1823 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1824}
1825
1826IWMMXT_OP(maddsq)
1827IWMMXT_OP(madduq)
1828IWMMXT_OP(sadb)
1829IWMMXT_OP(sadw)
1830IWMMXT_OP(mulslw)
1831IWMMXT_OP(mulshw)
1832IWMMXT_OP(mululw)
1833IWMMXT_OP(muluhw)
1834IWMMXT_OP(macsw)
1835IWMMXT_OP(macuw)
1836
477955bd
PM
1837IWMMXT_OP_ENV_SIZE(unpackl)
1838IWMMXT_OP_ENV_SIZE(unpackh)
1839
1840IWMMXT_OP_ENV1(unpacklub)
1841IWMMXT_OP_ENV1(unpackluw)
1842IWMMXT_OP_ENV1(unpacklul)
1843IWMMXT_OP_ENV1(unpackhub)
1844IWMMXT_OP_ENV1(unpackhuw)
1845IWMMXT_OP_ENV1(unpackhul)
1846IWMMXT_OP_ENV1(unpacklsb)
1847IWMMXT_OP_ENV1(unpacklsw)
1848IWMMXT_OP_ENV1(unpacklsl)
1849IWMMXT_OP_ENV1(unpackhsb)
1850IWMMXT_OP_ENV1(unpackhsw)
1851IWMMXT_OP_ENV1(unpackhsl)
1852
1853IWMMXT_OP_ENV_SIZE(cmpeq)
1854IWMMXT_OP_ENV_SIZE(cmpgtu)
1855IWMMXT_OP_ENV_SIZE(cmpgts)
1856
1857IWMMXT_OP_ENV_SIZE(mins)
1858IWMMXT_OP_ENV_SIZE(minu)
1859IWMMXT_OP_ENV_SIZE(maxs)
1860IWMMXT_OP_ENV_SIZE(maxu)
1861
1862IWMMXT_OP_ENV_SIZE(subn)
1863IWMMXT_OP_ENV_SIZE(addn)
1864IWMMXT_OP_ENV_SIZE(subu)
1865IWMMXT_OP_ENV_SIZE(addu)
1866IWMMXT_OP_ENV_SIZE(subs)
1867IWMMXT_OP_ENV_SIZE(adds)
1868
1869IWMMXT_OP_ENV(avgb0)
1870IWMMXT_OP_ENV(avgb1)
1871IWMMXT_OP_ENV(avgw0)
1872IWMMXT_OP_ENV(avgw1)
e677137d 1873
477955bd
PM
1874IWMMXT_OP_ENV(packuw)
1875IWMMXT_OP_ENV(packul)
1876IWMMXT_OP_ENV(packuq)
1877IWMMXT_OP_ENV(packsw)
1878IWMMXT_OP_ENV(packsl)
1879IWMMXT_OP_ENV(packsq)
e677137d 1880
e677137d
PB
1881static void gen_op_iwmmxt_set_mup(void)
1882{
39d5492a 1883 TCGv_i32 tmp;
e677137d
PB
1884 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1885 tcg_gen_ori_i32(tmp, tmp, 2);
1886 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1887}
1888
1889static void gen_op_iwmmxt_set_cup(void)
1890{
39d5492a 1891 TCGv_i32 tmp;
e677137d
PB
1892 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1893 tcg_gen_ori_i32(tmp, tmp, 1);
1894 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1895}
1896
1897static void gen_op_iwmmxt_setpsr_nz(void)
1898{
39d5492a 1899 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1900 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1901 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1902}
1903
1904static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1905{
1906 iwmmxt_load_reg(cpu_V1, rn);
86831435 1907 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1908 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1909}
1910
39d5492a
PM
1911static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1912 TCGv_i32 dest)
18c9b560
AZ
1913{
1914 int rd;
1915 uint32_t offset;
39d5492a 1916 TCGv_i32 tmp;
18c9b560
AZ
1917
1918 rd = (insn >> 16) & 0xf;
da6b5335 1919 tmp = load_reg(s, rd);
18c9b560
AZ
1920
1921 offset = (insn & 0xff) << ((insn >> 7) & 2);
1922 if (insn & (1 << 24)) {
1923 /* Pre indexed */
1924 if (insn & (1 << 23))
da6b5335 1925 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1926 else
da6b5335
FN
1927 tcg_gen_addi_i32(tmp, tmp, -offset);
1928 tcg_gen_mov_i32(dest, tmp);
18c9b560 1929 if (insn & (1 << 21))
da6b5335
FN
1930 store_reg(s, rd, tmp);
1931 else
7d1b0095 1932 tcg_temp_free_i32(tmp);
18c9b560
AZ
1933 } else if (insn & (1 << 21)) {
1934 /* Post indexed */
da6b5335 1935 tcg_gen_mov_i32(dest, tmp);
18c9b560 1936 if (insn & (1 << 23))
da6b5335 1937 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1938 else
da6b5335
FN
1939 tcg_gen_addi_i32(tmp, tmp, -offset);
1940 store_reg(s, rd, tmp);
18c9b560
AZ
1941 } else if (!(insn & (1 << 23)))
1942 return 1;
1943 return 0;
1944}
1945
39d5492a 1946static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1947{
1948 int rd = (insn >> 0) & 0xf;
39d5492a 1949 TCGv_i32 tmp;
18c9b560 1950
da6b5335
FN
1951 if (insn & (1 << 8)) {
1952 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1953 return 1;
da6b5335
FN
1954 } else {
1955 tmp = iwmmxt_load_creg(rd);
1956 }
1957 } else {
7d1b0095 1958 tmp = tcg_temp_new_i32();
da6b5335 1959 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1960 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1961 }
1962 tcg_gen_andi_i32(tmp, tmp, mask);
1963 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1964 tcg_temp_free_i32(tmp);
18c9b560
AZ
1965 return 0;
1966}
1967
a1c7273b 1968/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1969 (ie. an undefined instruction). */
7dcc1f89 1970static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1971{
1972 int rd, wrd;
1973 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1974 TCGv_i32 addr;
1975 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1976
1977 if ((insn & 0x0e000e00) == 0x0c000000) {
1978 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1979 wrd = insn & 0xf;
1980 rdlo = (insn >> 12) & 0xf;
1981 rdhi = (insn >> 16) & 0xf;
d00584b7 1982 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1983 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1984 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1985 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1986 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
d00584b7 1987 } else { /* TMCRR */
da6b5335
FN
1988 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1989 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1990 gen_op_iwmmxt_set_mup();
1991 }
1992 return 0;
1993 }
1994
1995 wrd = (insn >> 12) & 0xf;
7d1b0095 1996 addr = tcg_temp_new_i32();
da6b5335 1997 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1998 tcg_temp_free_i32(addr);
18c9b560 1999 return 1;
da6b5335 2000 }
18c9b560 2001 if (insn & ARM_CP_RW_BIT) {
d00584b7 2002 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 2003 tmp = tcg_temp_new_i32();
12dcc321 2004 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 2005 iwmmxt_store_creg(wrd, tmp);
18c9b560 2006 } else {
e677137d
PB
2007 i = 1;
2008 if (insn & (1 << 8)) {
d00584b7 2009 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 2010 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d 2011 i = 0;
d00584b7 2012 } else { /* WLDRW wRd */
29531141 2013 tmp = tcg_temp_new_i32();
12dcc321 2014 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
2015 }
2016 } else {
29531141 2017 tmp = tcg_temp_new_i32();
d00584b7 2018 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 2019 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
d00584b7 2020 } else { /* WLDRB */
12dcc321 2021 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
2022 }
2023 }
2024 if (i) {
2025 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 2026 tcg_temp_free_i32(tmp);
e677137d 2027 }
18c9b560
AZ
2028 gen_op_iwmmxt_movq_wRn_M0(wrd);
2029 }
2030 } else {
d00584b7 2031 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 2032 tmp = iwmmxt_load_creg(wrd);
12dcc321 2033 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
2034 } else {
2035 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2036 tmp = tcg_temp_new_i32();
e677137d 2037 if (insn & (1 << 8)) {
d00584b7 2038 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 2039 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
d00584b7 2040 } else { /* WSTRW wRd */
ecc7b3aa 2041 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 2042 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
2043 }
2044 } else {
d00584b7 2045 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 2046 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 2047 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
d00584b7 2048 } else { /* WSTRB */
ecc7b3aa 2049 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 2050 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
2051 }
2052 }
18c9b560 2053 }
29531141 2054 tcg_temp_free_i32(tmp);
18c9b560 2055 }
7d1b0095 2056 tcg_temp_free_i32(addr);
18c9b560
AZ
2057 return 0;
2058 }
2059
2060 if ((insn & 0x0f000000) != 0x0e000000)
2061 return 1;
2062
2063 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
d00584b7 2064 case 0x000: /* WOR */
18c9b560
AZ
2065 wrd = (insn >> 12) & 0xf;
2066 rd0 = (insn >> 0) & 0xf;
2067 rd1 = (insn >> 16) & 0xf;
2068 gen_op_iwmmxt_movq_M0_wRn(rd0);
2069 gen_op_iwmmxt_orq_M0_wRn(rd1);
2070 gen_op_iwmmxt_setpsr_nz();
2071 gen_op_iwmmxt_movq_wRn_M0(wrd);
2072 gen_op_iwmmxt_set_mup();
2073 gen_op_iwmmxt_set_cup();
2074 break;
d00584b7 2075 case 0x011: /* TMCR */
18c9b560
AZ
2076 if (insn & 0xf)
2077 return 1;
2078 rd = (insn >> 12) & 0xf;
2079 wrd = (insn >> 16) & 0xf;
2080 switch (wrd) {
2081 case ARM_IWMMXT_wCID:
2082 case ARM_IWMMXT_wCASF:
2083 break;
2084 case ARM_IWMMXT_wCon:
2085 gen_op_iwmmxt_set_cup();
2086 /* Fall through. */
2087 case ARM_IWMMXT_wCSSF:
da6b5335
FN
2088 tmp = iwmmxt_load_creg(wrd);
2089 tmp2 = load_reg(s, rd);
f669df27 2090 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 2091 tcg_temp_free_i32(tmp2);
da6b5335 2092 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
2093 break;
2094 case ARM_IWMMXT_wCGR0:
2095 case ARM_IWMMXT_wCGR1:
2096 case ARM_IWMMXT_wCGR2:
2097 case ARM_IWMMXT_wCGR3:
2098 gen_op_iwmmxt_set_cup();
da6b5335
FN
2099 tmp = load_reg(s, rd);
2100 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
2101 break;
2102 default:
2103 return 1;
2104 }
2105 break;
d00584b7 2106 case 0x100: /* WXOR */
18c9b560
AZ
2107 wrd = (insn >> 12) & 0xf;
2108 rd0 = (insn >> 0) & 0xf;
2109 rd1 = (insn >> 16) & 0xf;
2110 gen_op_iwmmxt_movq_M0_wRn(rd0);
2111 gen_op_iwmmxt_xorq_M0_wRn(rd1);
2112 gen_op_iwmmxt_setpsr_nz();
2113 gen_op_iwmmxt_movq_wRn_M0(wrd);
2114 gen_op_iwmmxt_set_mup();
2115 gen_op_iwmmxt_set_cup();
2116 break;
d00584b7 2117 case 0x111: /* TMRC */
18c9b560
AZ
2118 if (insn & 0xf)
2119 return 1;
2120 rd = (insn >> 12) & 0xf;
2121 wrd = (insn >> 16) & 0xf;
da6b5335
FN
2122 tmp = iwmmxt_load_creg(wrd);
2123 store_reg(s, rd, tmp);
18c9b560 2124 break;
d00584b7 2125 case 0x300: /* WANDN */
18c9b560
AZ
2126 wrd = (insn >> 12) & 0xf;
2127 rd0 = (insn >> 0) & 0xf;
2128 rd1 = (insn >> 16) & 0xf;
2129 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 2130 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
2131 gen_op_iwmmxt_andq_M0_wRn(rd1);
2132 gen_op_iwmmxt_setpsr_nz();
2133 gen_op_iwmmxt_movq_wRn_M0(wrd);
2134 gen_op_iwmmxt_set_mup();
2135 gen_op_iwmmxt_set_cup();
2136 break;
d00584b7 2137 case 0x200: /* WAND */
18c9b560
AZ
2138 wrd = (insn >> 12) & 0xf;
2139 rd0 = (insn >> 0) & 0xf;
2140 rd1 = (insn >> 16) & 0xf;
2141 gen_op_iwmmxt_movq_M0_wRn(rd0);
2142 gen_op_iwmmxt_andq_M0_wRn(rd1);
2143 gen_op_iwmmxt_setpsr_nz();
2144 gen_op_iwmmxt_movq_wRn_M0(wrd);
2145 gen_op_iwmmxt_set_mup();
2146 gen_op_iwmmxt_set_cup();
2147 break;
d00584b7 2148 case 0x810: case 0xa10: /* WMADD */
18c9b560
AZ
2149 wrd = (insn >> 12) & 0xf;
2150 rd0 = (insn >> 0) & 0xf;
2151 rd1 = (insn >> 16) & 0xf;
2152 gen_op_iwmmxt_movq_M0_wRn(rd0);
2153 if (insn & (1 << 21))
2154 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
2155 else
2156 gen_op_iwmmxt_madduq_M0_wRn(rd1);
2157 gen_op_iwmmxt_movq_wRn_M0(wrd);
2158 gen_op_iwmmxt_set_mup();
2159 break;
d00584b7 2160 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
18c9b560
AZ
2161 wrd = (insn >> 12) & 0xf;
2162 rd0 = (insn >> 16) & 0xf;
2163 rd1 = (insn >> 0) & 0xf;
2164 gen_op_iwmmxt_movq_M0_wRn(rd0);
2165 switch ((insn >> 22) & 3) {
2166 case 0:
2167 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
2168 break;
2169 case 1:
2170 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
2171 break;
2172 case 2:
2173 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
2174 break;
2175 case 3:
2176 return 1;
2177 }
2178 gen_op_iwmmxt_movq_wRn_M0(wrd);
2179 gen_op_iwmmxt_set_mup();
2180 gen_op_iwmmxt_set_cup();
2181 break;
d00584b7 2182 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
18c9b560
AZ
2183 wrd = (insn >> 12) & 0xf;
2184 rd0 = (insn >> 16) & 0xf;
2185 rd1 = (insn >> 0) & 0xf;
2186 gen_op_iwmmxt_movq_M0_wRn(rd0);
2187 switch ((insn >> 22) & 3) {
2188 case 0:
2189 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
2190 break;
2191 case 1:
2192 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
2193 break;
2194 case 2:
2195 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
2196 break;
2197 case 3:
2198 return 1;
2199 }
2200 gen_op_iwmmxt_movq_wRn_M0(wrd);
2201 gen_op_iwmmxt_set_mup();
2202 gen_op_iwmmxt_set_cup();
2203 break;
d00584b7 2204 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
18c9b560
AZ
2205 wrd = (insn >> 12) & 0xf;
2206 rd0 = (insn >> 16) & 0xf;
2207 rd1 = (insn >> 0) & 0xf;
2208 gen_op_iwmmxt_movq_M0_wRn(rd0);
2209 if (insn & (1 << 22))
2210 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2211 else
2212 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2213 if (!(insn & (1 << 20)))
2214 gen_op_iwmmxt_addl_M0_wRn(wrd);
2215 gen_op_iwmmxt_movq_wRn_M0(wrd);
2216 gen_op_iwmmxt_set_mup();
2217 break;
d00584b7 2218 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
18c9b560
AZ
2219 wrd = (insn >> 12) & 0xf;
2220 rd0 = (insn >> 16) & 0xf;
2221 rd1 = (insn >> 0) & 0xf;
2222 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2223 if (insn & (1 << 21)) {
2224 if (insn & (1 << 20))
2225 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2226 else
2227 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2228 } else {
2229 if (insn & (1 << 20))
2230 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2231 else
2232 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2233 }
18c9b560
AZ
2234 gen_op_iwmmxt_movq_wRn_M0(wrd);
2235 gen_op_iwmmxt_set_mup();
2236 break;
d00584b7 2237 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
18c9b560
AZ
2238 wrd = (insn >> 12) & 0xf;
2239 rd0 = (insn >> 16) & 0xf;
2240 rd1 = (insn >> 0) & 0xf;
2241 gen_op_iwmmxt_movq_M0_wRn(rd0);
2242 if (insn & (1 << 21))
2243 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2244 else
2245 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2246 if (!(insn & (1 << 20))) {
e677137d
PB
2247 iwmmxt_load_reg(cpu_V1, wrd);
2248 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
2249 }
2250 gen_op_iwmmxt_movq_wRn_M0(wrd);
2251 gen_op_iwmmxt_set_mup();
2252 break;
d00584b7 2253 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
18c9b560
AZ
2254 wrd = (insn >> 12) & 0xf;
2255 rd0 = (insn >> 16) & 0xf;
2256 rd1 = (insn >> 0) & 0xf;
2257 gen_op_iwmmxt_movq_M0_wRn(rd0);
2258 switch ((insn >> 22) & 3) {
2259 case 0:
2260 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2261 break;
2262 case 1:
2263 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2264 break;
2265 case 2:
2266 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2267 break;
2268 case 3:
2269 return 1;
2270 }
2271 gen_op_iwmmxt_movq_wRn_M0(wrd);
2272 gen_op_iwmmxt_set_mup();
2273 gen_op_iwmmxt_set_cup();
2274 break;
d00584b7 2275 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
18c9b560
AZ
2276 wrd = (insn >> 12) & 0xf;
2277 rd0 = (insn >> 16) & 0xf;
2278 rd1 = (insn >> 0) & 0xf;
2279 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2280 if (insn & (1 << 22)) {
2281 if (insn & (1 << 20))
2282 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2283 else
2284 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2285 } else {
2286 if (insn & (1 << 20))
2287 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2288 else
2289 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2290 }
18c9b560
AZ
2291 gen_op_iwmmxt_movq_wRn_M0(wrd);
2292 gen_op_iwmmxt_set_mup();
2293 gen_op_iwmmxt_set_cup();
2294 break;
d00584b7 2295 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
18c9b560
AZ
2296 wrd = (insn >> 12) & 0xf;
2297 rd0 = (insn >> 16) & 0xf;
2298 rd1 = (insn >> 0) & 0xf;
2299 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2300 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2301 tcg_gen_andi_i32(tmp, tmp, 7);
2302 iwmmxt_load_reg(cpu_V1, rd1);
2303 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2304 tcg_temp_free_i32(tmp);
18c9b560
AZ
2305 gen_op_iwmmxt_movq_wRn_M0(wrd);
2306 gen_op_iwmmxt_set_mup();
2307 break;
d00584b7 2308 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2309 if (((insn >> 6) & 3) == 3)
2310 return 1;
18c9b560
AZ
2311 rd = (insn >> 12) & 0xf;
2312 wrd = (insn >> 16) & 0xf;
da6b5335 2313 tmp = load_reg(s, rd);
18c9b560
AZ
2314 gen_op_iwmmxt_movq_M0_wRn(wrd);
2315 switch ((insn >> 6) & 3) {
2316 case 0:
da6b5335
FN
2317 tmp2 = tcg_const_i32(0xff);
2318 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2319 break;
2320 case 1:
da6b5335
FN
2321 tmp2 = tcg_const_i32(0xffff);
2322 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2323 break;
2324 case 2:
da6b5335
FN
2325 tmp2 = tcg_const_i32(0xffffffff);
2326 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2327 break;
da6b5335 2328 default:
f764718d
RH
2329 tmp2 = NULL;
2330 tmp3 = NULL;
18c9b560 2331 }
da6b5335 2332 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2333 tcg_temp_free_i32(tmp3);
2334 tcg_temp_free_i32(tmp2);
7d1b0095 2335 tcg_temp_free_i32(tmp);
18c9b560
AZ
2336 gen_op_iwmmxt_movq_wRn_M0(wrd);
2337 gen_op_iwmmxt_set_mup();
2338 break;
d00584b7 2339 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
18c9b560
AZ
2340 rd = (insn >> 12) & 0xf;
2341 wrd = (insn >> 16) & 0xf;
da6b5335 2342 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2343 return 1;
2344 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2345 tmp = tcg_temp_new_i32();
18c9b560
AZ
2346 switch ((insn >> 22) & 3) {
2347 case 0:
da6b5335 2348 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2349 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2350 if (insn & 8) {
2351 tcg_gen_ext8s_i32(tmp, tmp);
2352 } else {
2353 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2354 }
2355 break;
2356 case 1:
da6b5335 2357 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2358 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2359 if (insn & 8) {
2360 tcg_gen_ext16s_i32(tmp, tmp);
2361 } else {
2362 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2363 }
2364 break;
2365 case 2:
da6b5335 2366 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2367 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2368 break;
18c9b560 2369 }
da6b5335 2370 store_reg(s, rd, tmp);
18c9b560 2371 break;
d00584b7 2372 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2373 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2374 return 1;
da6b5335 2375 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2376 switch ((insn >> 22) & 3) {
2377 case 0:
da6b5335 2378 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2379 break;
2380 case 1:
da6b5335 2381 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2382 break;
2383 case 2:
da6b5335 2384 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2385 break;
18c9b560 2386 }
da6b5335
FN
2387 tcg_gen_shli_i32(tmp, tmp, 28);
2388 gen_set_nzcv(tmp);
7d1b0095 2389 tcg_temp_free_i32(tmp);
18c9b560 2390 break;
d00584b7 2391 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2392 if (((insn >> 6) & 3) == 3)
2393 return 1;
18c9b560
AZ
2394 rd = (insn >> 12) & 0xf;
2395 wrd = (insn >> 16) & 0xf;
da6b5335 2396 tmp = load_reg(s, rd);
18c9b560
AZ
2397 switch ((insn >> 6) & 3) {
2398 case 0:
da6b5335 2399 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2400 break;
2401 case 1:
da6b5335 2402 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2403 break;
2404 case 2:
da6b5335 2405 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2406 break;
18c9b560 2407 }
7d1b0095 2408 tcg_temp_free_i32(tmp);
18c9b560
AZ
2409 gen_op_iwmmxt_movq_wRn_M0(wrd);
2410 gen_op_iwmmxt_set_mup();
2411 break;
d00584b7 2412 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2413 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2414 return 1;
da6b5335 2415 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2416 tmp2 = tcg_temp_new_i32();
da6b5335 2417 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2418 switch ((insn >> 22) & 3) {
2419 case 0:
2420 for (i = 0; i < 7; i ++) {
da6b5335
FN
2421 tcg_gen_shli_i32(tmp2, tmp2, 4);
2422 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2423 }
2424 break;
2425 case 1:
2426 for (i = 0; i < 3; i ++) {
da6b5335
FN
2427 tcg_gen_shli_i32(tmp2, tmp2, 8);
2428 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2429 }
2430 break;
2431 case 2:
da6b5335
FN
2432 tcg_gen_shli_i32(tmp2, tmp2, 16);
2433 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2434 break;
18c9b560 2435 }
da6b5335 2436 gen_set_nzcv(tmp);
7d1b0095
PM
2437 tcg_temp_free_i32(tmp2);
2438 tcg_temp_free_i32(tmp);
18c9b560 2439 break;
d00584b7 2440 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
18c9b560
AZ
2441 wrd = (insn >> 12) & 0xf;
2442 rd0 = (insn >> 16) & 0xf;
2443 gen_op_iwmmxt_movq_M0_wRn(rd0);
2444 switch ((insn >> 22) & 3) {
2445 case 0:
e677137d 2446 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2447 break;
2448 case 1:
e677137d 2449 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2450 break;
2451 case 2:
e677137d 2452 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2453 break;
2454 case 3:
2455 return 1;
2456 }
2457 gen_op_iwmmxt_movq_wRn_M0(wrd);
2458 gen_op_iwmmxt_set_mup();
2459 break;
d00584b7 2460 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2461 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2462 return 1;
da6b5335 2463 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2464 tmp2 = tcg_temp_new_i32();
da6b5335 2465 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2466 switch ((insn >> 22) & 3) {
2467 case 0:
2468 for (i = 0; i < 7; i ++) {
da6b5335
FN
2469 tcg_gen_shli_i32(tmp2, tmp2, 4);
2470 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2471 }
2472 break;
2473 case 1:
2474 for (i = 0; i < 3; i ++) {
da6b5335
FN
2475 tcg_gen_shli_i32(tmp2, tmp2, 8);
2476 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2477 }
2478 break;
2479 case 2:
da6b5335
FN
2480 tcg_gen_shli_i32(tmp2, tmp2, 16);
2481 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2482 break;
18c9b560 2483 }
da6b5335 2484 gen_set_nzcv(tmp);
7d1b0095
PM
2485 tcg_temp_free_i32(tmp2);
2486 tcg_temp_free_i32(tmp);
18c9b560 2487 break;
d00584b7 2488 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
18c9b560
AZ
2489 rd = (insn >> 12) & 0xf;
2490 rd0 = (insn >> 16) & 0xf;
da6b5335 2491 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2492 return 1;
2493 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2494 tmp = tcg_temp_new_i32();
18c9b560
AZ
2495 switch ((insn >> 22) & 3) {
2496 case 0:
da6b5335 2497 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2498 break;
2499 case 1:
da6b5335 2500 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2501 break;
2502 case 2:
da6b5335 2503 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2504 break;
18c9b560 2505 }
da6b5335 2506 store_reg(s, rd, tmp);
18c9b560 2507 break;
d00584b7 2508 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
18c9b560
AZ
2509 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2510 wrd = (insn >> 12) & 0xf;
2511 rd0 = (insn >> 16) & 0xf;
2512 rd1 = (insn >> 0) & 0xf;
2513 gen_op_iwmmxt_movq_M0_wRn(rd0);
2514 switch ((insn >> 22) & 3) {
2515 case 0:
2516 if (insn & (1 << 21))
2517 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2518 else
2519 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2520 break;
2521 case 1:
2522 if (insn & (1 << 21))
2523 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2524 else
2525 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2526 break;
2527 case 2:
2528 if (insn & (1 << 21))
2529 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2530 else
2531 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2532 break;
2533 case 3:
2534 return 1;
2535 }
2536 gen_op_iwmmxt_movq_wRn_M0(wrd);
2537 gen_op_iwmmxt_set_mup();
2538 gen_op_iwmmxt_set_cup();
2539 break;
d00584b7 2540 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
18c9b560
AZ
2541 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2542 wrd = (insn >> 12) & 0xf;
2543 rd0 = (insn >> 16) & 0xf;
2544 gen_op_iwmmxt_movq_M0_wRn(rd0);
2545 switch ((insn >> 22) & 3) {
2546 case 0:
2547 if (insn & (1 << 21))
2548 gen_op_iwmmxt_unpacklsb_M0();
2549 else
2550 gen_op_iwmmxt_unpacklub_M0();
2551 break;
2552 case 1:
2553 if (insn & (1 << 21))
2554 gen_op_iwmmxt_unpacklsw_M0();
2555 else
2556 gen_op_iwmmxt_unpackluw_M0();
2557 break;
2558 case 2:
2559 if (insn & (1 << 21))
2560 gen_op_iwmmxt_unpacklsl_M0();
2561 else
2562 gen_op_iwmmxt_unpacklul_M0();
2563 break;
2564 case 3:
2565 return 1;
2566 }
2567 gen_op_iwmmxt_movq_wRn_M0(wrd);
2568 gen_op_iwmmxt_set_mup();
2569 gen_op_iwmmxt_set_cup();
2570 break;
d00584b7 2571 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
18c9b560
AZ
2572 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2573 wrd = (insn >> 12) & 0xf;
2574 rd0 = (insn >> 16) & 0xf;
2575 gen_op_iwmmxt_movq_M0_wRn(rd0);
2576 switch ((insn >> 22) & 3) {
2577 case 0:
2578 if (insn & (1 << 21))
2579 gen_op_iwmmxt_unpackhsb_M0();
2580 else
2581 gen_op_iwmmxt_unpackhub_M0();
2582 break;
2583 case 1:
2584 if (insn & (1 << 21))
2585 gen_op_iwmmxt_unpackhsw_M0();
2586 else
2587 gen_op_iwmmxt_unpackhuw_M0();
2588 break;
2589 case 2:
2590 if (insn & (1 << 21))
2591 gen_op_iwmmxt_unpackhsl_M0();
2592 else
2593 gen_op_iwmmxt_unpackhul_M0();
2594 break;
2595 case 3:
2596 return 1;
2597 }
2598 gen_op_iwmmxt_movq_wRn_M0(wrd);
2599 gen_op_iwmmxt_set_mup();
2600 gen_op_iwmmxt_set_cup();
2601 break;
d00584b7 2602 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
18c9b560 2603 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2604 if (((insn >> 22) & 3) == 0)
2605 return 1;
18c9b560
AZ
2606 wrd = (insn >> 12) & 0xf;
2607 rd0 = (insn >> 16) & 0xf;
2608 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2609 tmp = tcg_temp_new_i32();
da6b5335 2610 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2611 tcg_temp_free_i32(tmp);
18c9b560 2612 return 1;
da6b5335 2613 }
18c9b560 2614 switch ((insn >> 22) & 3) {
18c9b560 2615 case 1:
477955bd 2616 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2617 break;
2618 case 2:
477955bd 2619 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2620 break;
2621 case 3:
477955bd 2622 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2623 break;
2624 }
7d1b0095 2625 tcg_temp_free_i32(tmp);
18c9b560
AZ
2626 gen_op_iwmmxt_movq_wRn_M0(wrd);
2627 gen_op_iwmmxt_set_mup();
2628 gen_op_iwmmxt_set_cup();
2629 break;
d00584b7 2630 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
18c9b560 2631 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2632 if (((insn >> 22) & 3) == 0)
2633 return 1;
18c9b560
AZ
2634 wrd = (insn >> 12) & 0xf;
2635 rd0 = (insn >> 16) & 0xf;
2636 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2637 tmp = tcg_temp_new_i32();
da6b5335 2638 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2639 tcg_temp_free_i32(tmp);
18c9b560 2640 return 1;
da6b5335 2641 }
18c9b560 2642 switch ((insn >> 22) & 3) {
18c9b560 2643 case 1:
477955bd 2644 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2645 break;
2646 case 2:
477955bd 2647 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2648 break;
2649 case 3:
477955bd 2650 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2651 break;
2652 }
7d1b0095 2653 tcg_temp_free_i32(tmp);
18c9b560
AZ
2654 gen_op_iwmmxt_movq_wRn_M0(wrd);
2655 gen_op_iwmmxt_set_mup();
2656 gen_op_iwmmxt_set_cup();
2657 break;
d00584b7 2658 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
18c9b560 2659 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2660 if (((insn >> 22) & 3) == 0)
2661 return 1;
18c9b560
AZ
2662 wrd = (insn >> 12) & 0xf;
2663 rd0 = (insn >> 16) & 0xf;
2664 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2665 tmp = tcg_temp_new_i32();
da6b5335 2666 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2667 tcg_temp_free_i32(tmp);
18c9b560 2668 return 1;
da6b5335 2669 }
18c9b560 2670 switch ((insn >> 22) & 3) {
18c9b560 2671 case 1:
477955bd 2672 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2673 break;
2674 case 2:
477955bd 2675 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2676 break;
2677 case 3:
477955bd 2678 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2679 break;
2680 }
7d1b0095 2681 tcg_temp_free_i32(tmp);
18c9b560
AZ
2682 gen_op_iwmmxt_movq_wRn_M0(wrd);
2683 gen_op_iwmmxt_set_mup();
2684 gen_op_iwmmxt_set_cup();
2685 break;
d00584b7 2686 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
18c9b560 2687 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2688 if (((insn >> 22) & 3) == 0)
2689 return 1;
18c9b560
AZ
2690 wrd = (insn >> 12) & 0xf;
2691 rd0 = (insn >> 16) & 0xf;
2692 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2693 tmp = tcg_temp_new_i32();
18c9b560 2694 switch ((insn >> 22) & 3) {
18c9b560 2695 case 1:
da6b5335 2696 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2697 tcg_temp_free_i32(tmp);
18c9b560 2698 return 1;
da6b5335 2699 }
477955bd 2700 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2701 break;
2702 case 2:
da6b5335 2703 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2704 tcg_temp_free_i32(tmp);
18c9b560 2705 return 1;
da6b5335 2706 }
477955bd 2707 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2708 break;
2709 case 3:
da6b5335 2710 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2711 tcg_temp_free_i32(tmp);
18c9b560 2712 return 1;
da6b5335 2713 }
477955bd 2714 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2715 break;
2716 }
7d1b0095 2717 tcg_temp_free_i32(tmp);
18c9b560
AZ
2718 gen_op_iwmmxt_movq_wRn_M0(wrd);
2719 gen_op_iwmmxt_set_mup();
2720 gen_op_iwmmxt_set_cup();
2721 break;
d00584b7 2722 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
18c9b560
AZ
2723 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2724 wrd = (insn >> 12) & 0xf;
2725 rd0 = (insn >> 16) & 0xf;
2726 rd1 = (insn >> 0) & 0xf;
2727 gen_op_iwmmxt_movq_M0_wRn(rd0);
2728 switch ((insn >> 22) & 3) {
2729 case 0:
2730 if (insn & (1 << 21))
2731 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2732 else
2733 gen_op_iwmmxt_minub_M0_wRn(rd1);
2734 break;
2735 case 1:
2736 if (insn & (1 << 21))
2737 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2738 else
2739 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2740 break;
2741 case 2:
2742 if (insn & (1 << 21))
2743 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2744 else
2745 gen_op_iwmmxt_minul_M0_wRn(rd1);
2746 break;
2747 case 3:
2748 return 1;
2749 }
2750 gen_op_iwmmxt_movq_wRn_M0(wrd);
2751 gen_op_iwmmxt_set_mup();
2752 break;
d00584b7 2753 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
18c9b560
AZ
2754 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2755 wrd = (insn >> 12) & 0xf;
2756 rd0 = (insn >> 16) & 0xf;
2757 rd1 = (insn >> 0) & 0xf;
2758 gen_op_iwmmxt_movq_M0_wRn(rd0);
2759 switch ((insn >> 22) & 3) {
2760 case 0:
2761 if (insn & (1 << 21))
2762 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2763 else
2764 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2765 break;
2766 case 1:
2767 if (insn & (1 << 21))
2768 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2769 else
2770 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2771 break;
2772 case 2:
2773 if (insn & (1 << 21))
2774 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2775 else
2776 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2777 break;
2778 case 3:
2779 return 1;
2780 }
2781 gen_op_iwmmxt_movq_wRn_M0(wrd);
2782 gen_op_iwmmxt_set_mup();
2783 break;
d00584b7 2784 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
18c9b560
AZ
2785 case 0x402: case 0x502: case 0x602: case 0x702:
2786 wrd = (insn >> 12) & 0xf;
2787 rd0 = (insn >> 16) & 0xf;
2788 rd1 = (insn >> 0) & 0xf;
2789 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2790 tmp = tcg_const_i32((insn >> 20) & 3);
2791 iwmmxt_load_reg(cpu_V1, rd1);
2792 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2793 tcg_temp_free_i32(tmp);
18c9b560
AZ
2794 gen_op_iwmmxt_movq_wRn_M0(wrd);
2795 gen_op_iwmmxt_set_mup();
2796 break;
d00584b7 2797 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
18c9b560
AZ
2798 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2799 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2800 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2801 wrd = (insn >> 12) & 0xf;
2802 rd0 = (insn >> 16) & 0xf;
2803 rd1 = (insn >> 0) & 0xf;
2804 gen_op_iwmmxt_movq_M0_wRn(rd0);
2805 switch ((insn >> 20) & 0xf) {
2806 case 0x0:
2807 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2808 break;
2809 case 0x1:
2810 gen_op_iwmmxt_subub_M0_wRn(rd1);
2811 break;
2812 case 0x3:
2813 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2814 break;
2815 case 0x4:
2816 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2817 break;
2818 case 0x5:
2819 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2820 break;
2821 case 0x7:
2822 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2823 break;
2824 case 0x8:
2825 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2826 break;
2827 case 0x9:
2828 gen_op_iwmmxt_subul_M0_wRn(rd1);
2829 break;
2830 case 0xb:
2831 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2832 break;
2833 default:
2834 return 1;
2835 }
2836 gen_op_iwmmxt_movq_wRn_M0(wrd);
2837 gen_op_iwmmxt_set_mup();
2838 gen_op_iwmmxt_set_cup();
2839 break;
d00584b7 2840 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
18c9b560
AZ
2841 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2842 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2843 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2844 wrd = (insn >> 12) & 0xf;
2845 rd0 = (insn >> 16) & 0xf;
2846 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2847 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2848 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2849 tcg_temp_free_i32(tmp);
18c9b560
AZ
2850 gen_op_iwmmxt_movq_wRn_M0(wrd);
2851 gen_op_iwmmxt_set_mup();
2852 gen_op_iwmmxt_set_cup();
2853 break;
d00584b7 2854 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
18c9b560
AZ
2855 case 0x418: case 0x518: case 0x618: case 0x718:
2856 case 0x818: case 0x918: case 0xa18: case 0xb18:
2857 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2858 wrd = (insn >> 12) & 0xf;
2859 rd0 = (insn >> 16) & 0xf;
2860 rd1 = (insn >> 0) & 0xf;
2861 gen_op_iwmmxt_movq_M0_wRn(rd0);
2862 switch ((insn >> 20) & 0xf) {
2863 case 0x0:
2864 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2865 break;
2866 case 0x1:
2867 gen_op_iwmmxt_addub_M0_wRn(rd1);
2868 break;
2869 case 0x3:
2870 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2871 break;
2872 case 0x4:
2873 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2874 break;
2875 case 0x5:
2876 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2877 break;
2878 case 0x7:
2879 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2880 break;
2881 case 0x8:
2882 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2883 break;
2884 case 0x9:
2885 gen_op_iwmmxt_addul_M0_wRn(rd1);
2886 break;
2887 case 0xb:
2888 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2889 break;
2890 default:
2891 return 1;
2892 }
2893 gen_op_iwmmxt_movq_wRn_M0(wrd);
2894 gen_op_iwmmxt_set_mup();
2895 gen_op_iwmmxt_set_cup();
2896 break;
d00584b7 2897 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
18c9b560
AZ
2898 case 0x408: case 0x508: case 0x608: case 0x708:
2899 case 0x808: case 0x908: case 0xa08: case 0xb08:
2900 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2901 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2902 return 1;
18c9b560
AZ
2903 wrd = (insn >> 12) & 0xf;
2904 rd0 = (insn >> 16) & 0xf;
2905 rd1 = (insn >> 0) & 0xf;
2906 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2907 switch ((insn >> 22) & 3) {
18c9b560
AZ
2908 case 1:
2909 if (insn & (1 << 21))
2910 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2911 else
2912 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2913 break;
2914 case 2:
2915 if (insn & (1 << 21))
2916 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2917 else
2918 gen_op_iwmmxt_packul_M0_wRn(rd1);
2919 break;
2920 case 3:
2921 if (insn & (1 << 21))
2922 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2923 else
2924 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2925 break;
2926 }
2927 gen_op_iwmmxt_movq_wRn_M0(wrd);
2928 gen_op_iwmmxt_set_mup();
2929 gen_op_iwmmxt_set_cup();
2930 break;
2931 case 0x201: case 0x203: case 0x205: case 0x207:
2932 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2933 case 0x211: case 0x213: case 0x215: case 0x217:
2934 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2935 wrd = (insn >> 5) & 0xf;
2936 rd0 = (insn >> 12) & 0xf;
2937 rd1 = (insn >> 0) & 0xf;
2938 if (rd0 == 0xf || rd1 == 0xf)
2939 return 1;
2940 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2941 tmp = load_reg(s, rd0);
2942 tmp2 = load_reg(s, rd1);
18c9b560 2943 switch ((insn >> 16) & 0xf) {
d00584b7 2944 case 0x0: /* TMIA */
da6b5335 2945 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2946 break;
d00584b7 2947 case 0x8: /* TMIAPH */
da6b5335 2948 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2949 break;
d00584b7 2950 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2951 if (insn & (1 << 16))
da6b5335 2952 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2953 if (insn & (1 << 17))
da6b5335
FN
2954 tcg_gen_shri_i32(tmp2, tmp2, 16);
2955 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2956 break;
2957 default:
7d1b0095
PM
2958 tcg_temp_free_i32(tmp2);
2959 tcg_temp_free_i32(tmp);
18c9b560
AZ
2960 return 1;
2961 }
7d1b0095
PM
2962 tcg_temp_free_i32(tmp2);
2963 tcg_temp_free_i32(tmp);
18c9b560
AZ
2964 gen_op_iwmmxt_movq_wRn_M0(wrd);
2965 gen_op_iwmmxt_set_mup();
2966 break;
2967 default:
2968 return 1;
2969 }
2970
2971 return 0;
2972}
2973
a1c7273b 2974/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2975 (ie. an undefined instruction). */
7dcc1f89 2976static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2977{
2978 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2979 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2980
2981 if ((insn & 0x0ff00f10) == 0x0e200010) {
2982 /* Multiply with Internal Accumulate Format */
2983 rd0 = (insn >> 12) & 0xf;
2984 rd1 = insn & 0xf;
2985 acc = (insn >> 5) & 7;
2986
2987 if (acc != 0)
2988 return 1;
2989
3a554c0f
FN
2990 tmp = load_reg(s, rd0);
2991 tmp2 = load_reg(s, rd1);
18c9b560 2992 switch ((insn >> 16) & 0xf) {
d00584b7 2993 case 0x0: /* MIA */
3a554c0f 2994 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2995 break;
d00584b7 2996 case 0x8: /* MIAPH */
3a554c0f 2997 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2998 break;
d00584b7
PM
2999 case 0xc: /* MIABB */
3000 case 0xd: /* MIABT */
3001 case 0xe: /* MIATB */
3002 case 0xf: /* MIATT */
18c9b560 3003 if (insn & (1 << 16))
3a554c0f 3004 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 3005 if (insn & (1 << 17))
3a554c0f
FN
3006 tcg_gen_shri_i32(tmp2, tmp2, 16);
3007 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
3008 break;
3009 default:
3010 return 1;
3011 }
7d1b0095
PM
3012 tcg_temp_free_i32(tmp2);
3013 tcg_temp_free_i32(tmp);
18c9b560
AZ
3014
3015 gen_op_iwmmxt_movq_wRn_M0(acc);
3016 return 0;
3017 }
3018
3019 if ((insn & 0x0fe00ff8) == 0x0c400000) {
3020 /* Internal Accumulator Access Format */
3021 rdhi = (insn >> 16) & 0xf;
3022 rdlo = (insn >> 12) & 0xf;
3023 acc = insn & 7;
3024
3025 if (acc != 0)
3026 return 1;
3027
d00584b7 3028 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 3029 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 3030 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 3031 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 3032 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 3033 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
d00584b7 3034 } else { /* MAR */
3a554c0f
FN
3035 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
3036 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
3037 }
3038 return 0;
3039 }
3040
3041 return 1;
3042}
3043
9ee6e8bb
PB
3044#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
3045#define VFP_SREG(insn, bigbit, smallbit) \
3046 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
3047#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 3048 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
3049 reg = (((insn) >> (bigbit)) & 0x0f) \
3050 | (((insn) >> ((smallbit) - 4)) & 0x10); \
3051 } else { \
3052 if (insn & (1 << (smallbit))) \
3053 return 1; \
3054 reg = ((insn) >> (bigbit)) & 0x0f; \
3055 }} while (0)
3056
3057#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
3058#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
3059#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
3060#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
3061#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
3062#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
3063
4373f3ce 3064/* Move between integer and VFP cores. */
39d5492a 3065static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 3066{
39d5492a 3067 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
3068 tcg_gen_mov_i32(tmp, cpu_F0s);
3069 return tmp;
3070}
3071
39d5492a 3072static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
3073{
3074 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 3075 tcg_temp_free_i32(tmp);
4373f3ce
PB
3076}
3077
39d5492a 3078static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 3079{
39d5492a 3080 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 3081 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
3082 tcg_gen_shli_i32(tmp, var, 16);
3083 tcg_gen_or_i32(var, var, tmp);
7d1b0095 3084 tcg_temp_free_i32(tmp);
ad69471c
PB
3085}
3086
39d5492a 3087static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 3088{
39d5492a 3089 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
3090 tcg_gen_andi_i32(var, var, 0xffff0000);
3091 tcg_gen_shri_i32(tmp, var, 16);
3092 tcg_gen_or_i32(var, var, tmp);
7d1b0095 3093 tcg_temp_free_i32(tmp);
ad69471c
PB
3094}
3095
04731fb5
WN
3096static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
3097 uint32_t dp)
3098{
3099 uint32_t cc = extract32(insn, 20, 2);
3100
3101 if (dp) {
3102 TCGv_i64 frn, frm, dest;
3103 TCGv_i64 tmp, zero, zf, nf, vf;
3104
3105 zero = tcg_const_i64(0);
3106
3107 frn = tcg_temp_new_i64();
3108 frm = tcg_temp_new_i64();
3109 dest = tcg_temp_new_i64();
3110
3111 zf = tcg_temp_new_i64();
3112 nf = tcg_temp_new_i64();
3113 vf = tcg_temp_new_i64();
3114
3115 tcg_gen_extu_i32_i64(zf, cpu_ZF);
3116 tcg_gen_ext_i32_i64(nf, cpu_NF);
3117 tcg_gen_ext_i32_i64(vf, cpu_VF);
3118
3119 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3120 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3121 switch (cc) {
3122 case 0: /* eq: Z */
3123 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
3124 frn, frm);
3125 break;
3126 case 1: /* vs: V */
3127 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
3128 frn, frm);
3129 break;
3130 case 2: /* ge: N == V -> N ^ V == 0 */
3131 tmp = tcg_temp_new_i64();
3132 tcg_gen_xor_i64(tmp, vf, nf);
3133 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3134 frn, frm);
3135 tcg_temp_free_i64(tmp);
3136 break;
3137 case 3: /* gt: !Z && N == V */
3138 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
3139 frn, frm);
3140 tmp = tcg_temp_new_i64();
3141 tcg_gen_xor_i64(tmp, vf, nf);
3142 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3143 dest, frm);
3144 tcg_temp_free_i64(tmp);
3145 break;
3146 }
3147 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3148 tcg_temp_free_i64(frn);
3149 tcg_temp_free_i64(frm);
3150 tcg_temp_free_i64(dest);
3151
3152 tcg_temp_free_i64(zf);
3153 tcg_temp_free_i64(nf);
3154 tcg_temp_free_i64(vf);
3155
3156 tcg_temp_free_i64(zero);
3157 } else {
3158 TCGv_i32 frn, frm, dest;
3159 TCGv_i32 tmp, zero;
3160
3161 zero = tcg_const_i32(0);
3162
3163 frn = tcg_temp_new_i32();
3164 frm = tcg_temp_new_i32();
3165 dest = tcg_temp_new_i32();
3166 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3167 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3168 switch (cc) {
3169 case 0: /* eq: Z */
3170 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
3171 frn, frm);
3172 break;
3173 case 1: /* vs: V */
3174 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
3175 frn, frm);
3176 break;
3177 case 2: /* ge: N == V -> N ^ V == 0 */
3178 tmp = tcg_temp_new_i32();
3179 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3180 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3181 frn, frm);
3182 tcg_temp_free_i32(tmp);
3183 break;
3184 case 3: /* gt: !Z && N == V */
3185 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
3186 frn, frm);
3187 tmp = tcg_temp_new_i32();
3188 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3189 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3190 dest, frm);
3191 tcg_temp_free_i32(tmp);
3192 break;
3193 }
3194 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3195 tcg_temp_free_i32(frn);
3196 tcg_temp_free_i32(frm);
3197 tcg_temp_free_i32(dest);
3198
3199 tcg_temp_free_i32(zero);
3200 }
3201
3202 return 0;
3203}
3204
40cfacdd
WN
3205static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
3206 uint32_t rm, uint32_t dp)
3207{
3208 uint32_t vmin = extract32(insn, 6, 1);
3209 TCGv_ptr fpst = get_fpstatus_ptr(0);
3210
3211 if (dp) {
3212 TCGv_i64 frn, frm, dest;
3213
3214 frn = tcg_temp_new_i64();
3215 frm = tcg_temp_new_i64();
3216 dest = tcg_temp_new_i64();
3217
3218 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3219 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3220 if (vmin) {
f71a2ae5 3221 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 3222 } else {
f71a2ae5 3223 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
3224 }
3225 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3226 tcg_temp_free_i64(frn);
3227 tcg_temp_free_i64(frm);
3228 tcg_temp_free_i64(dest);
3229 } else {
3230 TCGv_i32 frn, frm, dest;
3231
3232 frn = tcg_temp_new_i32();
3233 frm = tcg_temp_new_i32();
3234 dest = tcg_temp_new_i32();
3235
3236 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3237 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3238 if (vmin) {
f71a2ae5 3239 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 3240 } else {
f71a2ae5 3241 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
3242 }
3243 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3244 tcg_temp_free_i32(frn);
3245 tcg_temp_free_i32(frm);
3246 tcg_temp_free_i32(dest);
3247 }
3248
3249 tcg_temp_free_ptr(fpst);
3250 return 0;
3251}
3252
7655f39b
WN
3253static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3254 int rounding)
3255{
3256 TCGv_ptr fpst = get_fpstatus_ptr(0);
3257 TCGv_i32 tcg_rmode;
3258
3259 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
9b049916 3260 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
7655f39b
WN
3261
3262 if (dp) {
3263 TCGv_i64 tcg_op;
3264 TCGv_i64 tcg_res;
3265 tcg_op = tcg_temp_new_i64();
3266 tcg_res = tcg_temp_new_i64();
3267 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3268 gen_helper_rintd(tcg_res, tcg_op, fpst);
3269 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3270 tcg_temp_free_i64(tcg_op);
3271 tcg_temp_free_i64(tcg_res);
3272 } else {
3273 TCGv_i32 tcg_op;
3274 TCGv_i32 tcg_res;
3275 tcg_op = tcg_temp_new_i32();
3276 tcg_res = tcg_temp_new_i32();
3277 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3278 gen_helper_rints(tcg_res, tcg_op, fpst);
3279 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3280 tcg_temp_free_i32(tcg_op);
3281 tcg_temp_free_i32(tcg_res);
3282 }
3283
9b049916 3284 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
7655f39b
WN
3285 tcg_temp_free_i32(tcg_rmode);
3286
3287 tcg_temp_free_ptr(fpst);
3288 return 0;
3289}
3290
c9975a83
WN
3291static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3292 int rounding)
3293{
3294 bool is_signed = extract32(insn, 7, 1);
3295 TCGv_ptr fpst = get_fpstatus_ptr(0);
3296 TCGv_i32 tcg_rmode, tcg_shift;
3297
3298 tcg_shift = tcg_const_i32(0);
3299
3300 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
9b049916 3301 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
c9975a83
WN
3302
3303 if (dp) {
3304 TCGv_i64 tcg_double, tcg_res;
3305 TCGv_i32 tcg_tmp;
3306 /* Rd is encoded as a single precision register even when the source
3307 * is double precision.
3308 */
3309 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3310 tcg_double = tcg_temp_new_i64();
3311 tcg_res = tcg_temp_new_i64();
3312 tcg_tmp = tcg_temp_new_i32();
3313 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3314 if (is_signed) {
3315 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3316 } else {
3317 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3318 }
ecc7b3aa 3319 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
c9975a83
WN
3320 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3321 tcg_temp_free_i32(tcg_tmp);
3322 tcg_temp_free_i64(tcg_res);
3323 tcg_temp_free_i64(tcg_double);
3324 } else {
3325 TCGv_i32 tcg_single, tcg_res;
3326 tcg_single = tcg_temp_new_i32();
3327 tcg_res = tcg_temp_new_i32();
3328 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3329 if (is_signed) {
3330 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3331 } else {
3332 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3333 }
3334 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3335 tcg_temp_free_i32(tcg_res);
3336 tcg_temp_free_i32(tcg_single);
3337 }
3338
9b049916 3339 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
c9975a83
WN
3340 tcg_temp_free_i32(tcg_rmode);
3341
3342 tcg_temp_free_i32(tcg_shift);
3343
3344 tcg_temp_free_ptr(fpst);
3345
3346 return 0;
3347}
7655f39b
WN
3348
3349/* Table for converting the most common AArch32 encoding of
3350 * rounding mode to arm_fprounding order (which matches the
3351 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3352 */
3353static const uint8_t fp_decode_rm[] = {
3354 FPROUNDING_TIEAWAY,
3355 FPROUNDING_TIEEVEN,
3356 FPROUNDING_POSINF,
3357 FPROUNDING_NEGINF,
3358};
3359
7dcc1f89 3360static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
3361{
3362 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3363
d614a513 3364 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
04731fb5
WN
3365 return 1;
3366 }
3367
3368 if (dp) {
3369 VFP_DREG_D(rd, insn);
3370 VFP_DREG_N(rn, insn);
3371 VFP_DREG_M(rm, insn);
3372 } else {
3373 rd = VFP_SREG_D(insn);
3374 rn = VFP_SREG_N(insn);
3375 rm = VFP_SREG_M(insn);
3376 }
3377
3378 if ((insn & 0x0f800e50) == 0x0e000a00) {
3379 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
3380 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3381 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
3382 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3383 /* VRINTA, VRINTN, VRINTP, VRINTM */
3384 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3385 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
3386 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3387 /* VCVTA, VCVTN, VCVTP, VCVTM */
3388 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3389 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3390 }
3391 return 1;
3392}
3393
a1c7273b 3394/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3395 (ie. an undefined instruction). */
7dcc1f89 3396static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3397{
3398 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3399 int dp, veclen;
39d5492a
PM
3400 TCGv_i32 addr;
3401 TCGv_i32 tmp;
3402 TCGv_i32 tmp2;
b7bcbe95 3403
d614a513 3404 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3405 return 1;
d614a513 3406 }
40f137e1 3407
2c7ffc41
PM
3408 /* FIXME: this access check should not take precedence over UNDEF
3409 * for invalid encodings; we will generate incorrect syndrome information
3410 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3411 */
9dbbc748 3412 if (s->fp_excp_el) {
2c7ffc41 3413 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 3414 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3415 return 0;
3416 }
3417
5df8bac1 3418 if (!s->vfp_enabled) {
9ee6e8bb 3419 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3420 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3421 return 1;
3422 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3423 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3424 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3425 return 1;
a50c0f51 3426 }
40f137e1 3427 }
6a57f3eb
WN
3428
3429 if (extract32(insn, 28, 4) == 0xf) {
3430 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3431 * only used in v8 and above.
3432 */
7dcc1f89 3433 return disas_vfp_v8_insn(s, insn);
6a57f3eb
WN
3434 }
3435
b7bcbe95
FB
3436 dp = ((insn & 0xf00) == 0xb00);
3437 switch ((insn >> 24) & 0xf) {
3438 case 0xe:
3439 if (insn & (1 << 4)) {
3440 /* single register transfer */
b7bcbe95
FB
3441 rd = (insn >> 12) & 0xf;
3442 if (dp) {
9ee6e8bb
PB
3443 int size;
3444 int pass;
3445
3446 VFP_DREG_N(rn, insn);
3447 if (insn & 0xf)
b7bcbe95 3448 return 1;
9ee6e8bb 3449 if (insn & 0x00c00060
d614a513 3450 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3451 return 1;
d614a513 3452 }
9ee6e8bb
PB
3453
3454 pass = (insn >> 21) & 1;
3455 if (insn & (1 << 22)) {
3456 size = 0;
3457 offset = ((insn >> 5) & 3) * 8;
3458 } else if (insn & (1 << 5)) {
3459 size = 1;
3460 offset = (insn & (1 << 6)) ? 16 : 0;
3461 } else {
3462 size = 2;
3463 offset = 0;
3464 }
18c9b560 3465 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3466 /* vfp->arm */
ad69471c 3467 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3468 switch (size) {
3469 case 0:
9ee6e8bb 3470 if (offset)
ad69471c 3471 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3472 if (insn & (1 << 23))
ad69471c 3473 gen_uxtb(tmp);
9ee6e8bb 3474 else
ad69471c 3475 gen_sxtb(tmp);
9ee6e8bb
PB
3476 break;
3477 case 1:
9ee6e8bb
PB
3478 if (insn & (1 << 23)) {
3479 if (offset) {
ad69471c 3480 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3481 } else {
ad69471c 3482 gen_uxth(tmp);
9ee6e8bb
PB
3483 }
3484 } else {
3485 if (offset) {
ad69471c 3486 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3487 } else {
ad69471c 3488 gen_sxth(tmp);
9ee6e8bb
PB
3489 }
3490 }
3491 break;
3492 case 2:
9ee6e8bb
PB
3493 break;
3494 }
ad69471c 3495 store_reg(s, rd, tmp);
b7bcbe95
FB
3496 } else {
3497 /* arm->vfp */
ad69471c 3498 tmp = load_reg(s, rd);
9ee6e8bb
PB
3499 if (insn & (1 << 23)) {
3500 /* VDUP */
32f91fb7
RH
3501 int vec_size = pass ? 16 : 8;
3502 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rn, 0),
3503 vec_size, vec_size, tmp);
3504 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
3505 } else {
3506 /* VMOV */
3507 switch (size) {
3508 case 0:
ad69471c 3509 tmp2 = neon_load_reg(rn, pass);
d593c48e 3510 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3511 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3512 break;
3513 case 1:
ad69471c 3514 tmp2 = neon_load_reg(rn, pass);
d593c48e 3515 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3516 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3517 break;
3518 case 2:
9ee6e8bb
PB
3519 break;
3520 }
ad69471c 3521 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3522 }
b7bcbe95 3523 }
9ee6e8bb
PB
3524 } else { /* !dp */
3525 if ((insn & 0x6f) != 0x00)
3526 return 1;
3527 rn = VFP_SREG_N(insn);
18c9b560 3528 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3529 /* vfp->arm */
3530 if (insn & (1 << 21)) {
3531 /* system register */
40f137e1 3532 rn >>= 1;
9ee6e8bb 3533
b7bcbe95 3534 switch (rn) {
40f137e1 3535 case ARM_VFP_FPSID:
4373f3ce 3536 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3537 VFP3 restricts all id registers to privileged
3538 accesses. */
3539 if (IS_USER(s)
d614a513 3540 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3541 return 1;
d614a513 3542 }
4373f3ce 3543 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3544 break;
40f137e1 3545 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3546 if (IS_USER(s))
3547 return 1;
4373f3ce 3548 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3549 break;
40f137e1
PB
3550 case ARM_VFP_FPINST:
3551 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3552 /* Not present in VFP3. */
3553 if (IS_USER(s)
d614a513 3554 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3555 return 1;
d614a513 3556 }
4373f3ce 3557 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3558 break;
40f137e1 3559 case ARM_VFP_FPSCR:
601d70b9 3560 if (rd == 15) {
4373f3ce
PB
3561 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3562 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3563 } else {
7d1b0095 3564 tmp = tcg_temp_new_i32();
4373f3ce
PB
3565 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3566 }
b7bcbe95 3567 break;
a50c0f51 3568 case ARM_VFP_MVFR2:
d614a513 3569 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3570 return 1;
3571 }
3572 /* fall through */
9ee6e8bb
PB
3573 case ARM_VFP_MVFR0:
3574 case ARM_VFP_MVFR1:
3575 if (IS_USER(s)
d614a513 3576 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3577 return 1;
d614a513 3578 }
4373f3ce 3579 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3580 break;
b7bcbe95
FB
3581 default:
3582 return 1;
3583 }
3584 } else {
3585 gen_mov_F0_vreg(0, rn);
4373f3ce 3586 tmp = gen_vfp_mrs();
b7bcbe95
FB
3587 }
3588 if (rd == 15) {
b5ff1b31 3589 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3590 gen_set_nzcv(tmp);
7d1b0095 3591 tcg_temp_free_i32(tmp);
4373f3ce
PB
3592 } else {
3593 store_reg(s, rd, tmp);
3594 }
b7bcbe95
FB
3595 } else {
3596 /* arm->vfp */
b7bcbe95 3597 if (insn & (1 << 21)) {
40f137e1 3598 rn >>= 1;
b7bcbe95
FB
3599 /* system register */
3600 switch (rn) {
40f137e1 3601 case ARM_VFP_FPSID:
9ee6e8bb
PB
3602 case ARM_VFP_MVFR0:
3603 case ARM_VFP_MVFR1:
b7bcbe95
FB
3604 /* Writes are ignored. */
3605 break;
40f137e1 3606 case ARM_VFP_FPSCR:
e4c1cfa5 3607 tmp = load_reg(s, rd);
4373f3ce 3608 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3609 tcg_temp_free_i32(tmp);
b5ff1b31 3610 gen_lookup_tb(s);
b7bcbe95 3611 break;
40f137e1 3612 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3613 if (IS_USER(s))
3614 return 1;
71b3c3de
JR
3615 /* TODO: VFP subarchitecture support.
3616 * For now, keep the EN bit only */
e4c1cfa5 3617 tmp = load_reg(s, rd);
71b3c3de 3618 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3619 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3620 gen_lookup_tb(s);
3621 break;
3622 case ARM_VFP_FPINST:
3623 case ARM_VFP_FPINST2:
23adb861
PM
3624 if (IS_USER(s)) {
3625 return 1;
3626 }
e4c1cfa5 3627 tmp = load_reg(s, rd);
4373f3ce 3628 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3629 break;
b7bcbe95
FB
3630 default:
3631 return 1;
3632 }
3633 } else {
e4c1cfa5 3634 tmp = load_reg(s, rd);
4373f3ce 3635 gen_vfp_msr(tmp);
b7bcbe95
FB
3636 gen_mov_vreg_F0(0, rn);
3637 }
3638 }
3639 }
3640 } else {
3641 /* data processing */
e80941bd
RH
3642 bool rd_is_dp = dp;
3643 bool rm_is_dp = dp;
3644 bool no_output = false;
3645
b7bcbe95
FB
3646 /* The opcode is in bits 23, 21, 20 and 6. */
3647 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
e80941bd 3648 rn = VFP_SREG_N(insn);
b7bcbe95 3649
e80941bd
RH
3650 if (op == 15) {
3651 /* rn is opcode, encoded as per VFP_SREG_N. */
3652 switch (rn) {
3653 case 0x00: /* vmov */
3654 case 0x01: /* vabs */
3655 case 0x02: /* vneg */
3656 case 0x03: /* vsqrt */
3657 break;
3658
3659 case 0x04: /* vcvtb.f64.f16, vcvtb.f32.f16 */
3660 case 0x05: /* vcvtt.f64.f16, vcvtt.f32.f16 */
3661 /*
3662 * VCVTB, VCVTT: only present with the halfprec extension
3663 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3664 * (we choose to UNDEF)
04595bf6 3665 */
e80941bd
RH
3666 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3667 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
3668 return 1;
3669 }
3670 rm_is_dp = false;
3671 break;
3672 case 0x06: /* vcvtb.f16.f32, vcvtb.f16.f64 */
3673 case 0x07: /* vcvtt.f16.f32, vcvtt.f16.f64 */
3674 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3675 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
3676 return 1;
3677 }
3678 rd_is_dp = false;
3679 break;
3680
3681 case 0x08: case 0x0a: /* vcmp, vcmpz */
3682 case 0x09: case 0x0b: /* vcmpe, vcmpez */
3683 no_output = true;
3684 break;
3685
3686 case 0x0c: /* vrintr */
3687 case 0x0d: /* vrintz */
3688 case 0x0e: /* vrintx */
3689 break;
3690
3691 case 0x0f: /* vcvt double<->single */
3692 rd_is_dp = !dp;
3693 break;
3694
3695 case 0x10: /* vcvt.fxx.u32 */
3696 case 0x11: /* vcvt.fxx.s32 */
3697 rm_is_dp = false;
3698 break;
3699 case 0x18: /* vcvtr.u32.fxx */
3700 case 0x19: /* vcvtz.u32.fxx */
3701 case 0x1a: /* vcvtr.s32.fxx */
3702 case 0x1b: /* vcvtz.s32.fxx */
3703 rd_is_dp = false;
3704 break;
3705
3706 case 0x14: /* vcvt fp <-> fixed */
3707 case 0x15:
3708 case 0x16:
3709 case 0x17:
3710 case 0x1c:
3711 case 0x1d:
3712 case 0x1e:
3713 case 0x1f:
3714 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3715 return 1;
3716 }
3717 /* Immediate frac_bits has same format as SREG_M. */
3718 rm_is_dp = false;
3719 break;
3720
6c1f6f27
RH
3721 case 0x13: /* vjcvt */
3722 if (!dp || !dc_isar_feature(aa32_jscvt, s)) {
3723 return 1;
3724 }
3725 rd_is_dp = false;
3726 break;
3727
e80941bd
RH
3728 default:
3729 return 1;
b7bcbe95 3730 }
e80941bd
RH
3731 } else if (dp) {
3732 /* rn is register number */
3733 VFP_DREG_N(rn, insn);
3734 }
3735
3736 if (rd_is_dp) {
3737 VFP_DREG_D(rd, insn);
3738 } else {
3739 rd = VFP_SREG_D(insn);
3740 }
3741 if (rm_is_dp) {
3742 VFP_DREG_M(rm, insn);
b7bcbe95 3743 } else {
9ee6e8bb 3744 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3745 }
3746
69d1fc22 3747 veclen = s->vec_len;
e80941bd 3748 if (op == 15 && rn > 3) {
b7bcbe95 3749 veclen = 0;
e80941bd 3750 }
b7bcbe95
FB
3751
3752 /* Shut up compiler warnings. */
3753 delta_m = 0;
3754 delta_d = 0;
3755 bank_mask = 0;
3b46e624 3756
b7bcbe95
FB
3757 if (veclen > 0) {
3758 if (dp)
3759 bank_mask = 0xc;
3760 else
3761 bank_mask = 0x18;
3762
3763 /* Figure out what type of vector operation this is. */
3764 if ((rd & bank_mask) == 0) {
3765 /* scalar */
3766 veclen = 0;
3767 } else {
3768 if (dp)
69d1fc22 3769 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3770 else
69d1fc22 3771 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3772
3773 if ((rm & bank_mask) == 0) {
3774 /* mixed scalar/vector */
3775 delta_m = 0;
3776 } else {
3777 /* vector */
3778 delta_m = delta_d;
3779 }
3780 }
3781 }
3782
3783 /* Load the initial operands. */
3784 if (op == 15) {
3785 switch (rn) {
e80941bd 3786 case 0x08: case 0x09: /* Compare */
b7bcbe95
FB
3787 gen_mov_F0_vreg(dp, rd);
3788 gen_mov_F1_vreg(dp, rm);
3789 break;
e80941bd 3790 case 0x0a: case 0x0b: /* Compare with zero */
b7bcbe95
FB
3791 gen_mov_F0_vreg(dp, rd);
3792 gen_vfp_F1_ld0(dp);
3793 break;
e80941bd
RH
3794 case 0x14: /* vcvt fp <-> fixed */
3795 case 0x15:
3796 case 0x16:
3797 case 0x17:
3798 case 0x1c:
3799 case 0x1d:
3800 case 0x1e:
3801 case 0x1f:
9ee6e8bb
PB
3802 /* Source and destination the same. */
3803 gen_mov_F0_vreg(dp, rd);
3804 break;
b7bcbe95
FB
3805 default:
3806 /* One source operand. */
e80941bd 3807 gen_mov_F0_vreg(rm_is_dp, rm);
9ee6e8bb 3808 break;
b7bcbe95
FB
3809 }
3810 } else {
3811 /* Two source operands. */
3812 gen_mov_F0_vreg(dp, rn);
3813 gen_mov_F1_vreg(dp, rm);
3814 }
3815
3816 for (;;) {
3817 /* Perform the calculation. */
3818 switch (op) {
605a6aed
PM
3819 case 0: /* VMLA: fd + (fn * fm) */
3820 /* Note that order of inputs to the add matters for NaNs */
3821 gen_vfp_F1_mul(dp);
3822 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3823 gen_vfp_add(dp);
3824 break;
605a6aed 3825 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3826 gen_vfp_mul(dp);
605a6aed
PM
3827 gen_vfp_F1_neg(dp);
3828 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3829 gen_vfp_add(dp);
3830 break;
605a6aed
PM
3831 case 2: /* VNMLS: -fd + (fn * fm) */
3832 /* Note that it isn't valid to replace (-A + B) with (B - A)
3833 * or similar plausible looking simplifications
3834 * because this will give wrong results for NaNs.
3835 */
3836 gen_vfp_F1_mul(dp);
3837 gen_mov_F0_vreg(dp, rd);
3838 gen_vfp_neg(dp);
3839 gen_vfp_add(dp);
b7bcbe95 3840 break;
605a6aed 3841 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3842 gen_vfp_mul(dp);
605a6aed
PM
3843 gen_vfp_F1_neg(dp);
3844 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3845 gen_vfp_neg(dp);
605a6aed 3846 gen_vfp_add(dp);
b7bcbe95
FB
3847 break;
3848 case 4: /* mul: fn * fm */
3849 gen_vfp_mul(dp);
3850 break;
3851 case 5: /* nmul: -(fn * fm) */
3852 gen_vfp_mul(dp);
3853 gen_vfp_neg(dp);
3854 break;
3855 case 6: /* add: fn + fm */
3856 gen_vfp_add(dp);
3857 break;
3858 case 7: /* sub: fn - fm */
3859 gen_vfp_sub(dp);
3860 break;
3861 case 8: /* div: fn / fm */
3862 gen_vfp_div(dp);
3863 break;
da97f52c
PM
3864 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3865 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3866 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3867 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3868 /* These are fused multiply-add, and must be done as one
3869 * floating point operation with no rounding between the
3870 * multiplication and addition steps.
3871 * NB that doing the negations here as separate steps is
3872 * correct : an input NaN should come out with its sign bit
3873 * flipped if it is a negated-input.
3874 */
d614a513 3875 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3876 return 1;
3877 }
3878 if (dp) {
3879 TCGv_ptr fpst;
3880 TCGv_i64 frd;
3881 if (op & 1) {
3882 /* VFNMS, VFMS */
3883 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3884 }
3885 frd = tcg_temp_new_i64();
3886 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3887 if (op & 2) {
3888 /* VFNMA, VFNMS */
3889 gen_helper_vfp_negd(frd, frd);
3890 }
3891 fpst = get_fpstatus_ptr(0);
3892 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3893 cpu_F1d, frd, fpst);
3894 tcg_temp_free_ptr(fpst);
3895 tcg_temp_free_i64(frd);
3896 } else {
3897 TCGv_ptr fpst;
3898 TCGv_i32 frd;
3899 if (op & 1) {
3900 /* VFNMS, VFMS */
3901 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3902 }
3903 frd = tcg_temp_new_i32();
3904 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3905 if (op & 2) {
3906 gen_helper_vfp_negs(frd, frd);
3907 }
3908 fpst = get_fpstatus_ptr(0);
3909 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3910 cpu_F1s, frd, fpst);
3911 tcg_temp_free_ptr(fpst);
3912 tcg_temp_free_i32(frd);
3913 }
3914 break;
9ee6e8bb 3915 case 14: /* fconst */
d614a513
PM
3916 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3917 return 1;
3918 }
9ee6e8bb
PB
3919
3920 n = (insn << 12) & 0x80000000;
3921 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3922 if (dp) {
3923 if (i & 0x40)
3924 i |= 0x3f80;
3925 else
3926 i |= 0x4000;
3927 n |= i << 16;
4373f3ce 3928 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3929 } else {
3930 if (i & 0x40)
3931 i |= 0x780;
3932 else
3933 i |= 0x800;
3934 n |= i << 19;
5b340b51 3935 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3936 }
9ee6e8bb 3937 break;
b7bcbe95
FB
3938 case 15: /* extension space */
3939 switch (rn) {
3940 case 0: /* cpy */
3941 /* no-op */
3942 break;
3943 case 1: /* abs */
3944 gen_vfp_abs(dp);
3945 break;
3946 case 2: /* neg */
3947 gen_vfp_neg(dp);
3948 break;
3949 case 3: /* sqrt */
3950 gen_vfp_sqrt(dp);
3951 break;
239c20c7 3952 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
486624fc
AB
3953 {
3954 TCGv_ptr fpst = get_fpstatus_ptr(false);
3955 TCGv_i32 ahp_mode = get_ahp_flag();
60011498
PB
3956 tmp = gen_vfp_mrs();
3957 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3958 if (dp) {
3959 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
486624fc 3960 fpst, ahp_mode);
239c20c7
WN
3961 } else {
3962 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
486624fc 3963 fpst, ahp_mode);
239c20c7 3964 }
486624fc
AB
3965 tcg_temp_free_i32(ahp_mode);
3966 tcg_temp_free_ptr(fpst);
7d1b0095 3967 tcg_temp_free_i32(tmp);
60011498 3968 break;
486624fc 3969 }
239c20c7 3970 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
486624fc
AB
3971 {
3972 TCGv_ptr fpst = get_fpstatus_ptr(false);
3973 TCGv_i32 ahp = get_ahp_flag();
60011498
PB
3974 tmp = gen_vfp_mrs();
3975 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3976 if (dp) {
3977 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
486624fc 3978 fpst, ahp);
239c20c7
WN
3979 } else {
3980 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
486624fc 3981 fpst, ahp);
239c20c7 3982 }
7d1b0095 3983 tcg_temp_free_i32(tmp);
486624fc
AB
3984 tcg_temp_free_i32(ahp);
3985 tcg_temp_free_ptr(fpst);
60011498 3986 break;
486624fc 3987 }
239c20c7 3988 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
486624fc
AB
3989 {
3990 TCGv_ptr fpst = get_fpstatus_ptr(false);
3991 TCGv_i32 ahp = get_ahp_flag();
7d1b0095 3992 tmp = tcg_temp_new_i32();
486624fc 3993
239c20c7
WN
3994 if (dp) {
3995 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
486624fc 3996 fpst, ahp);
239c20c7
WN
3997 } else {
3998 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
486624fc 3999 fpst, ahp);
239c20c7 4000 }
486624fc
AB
4001 tcg_temp_free_i32(ahp);
4002 tcg_temp_free_ptr(fpst);
60011498
PB
4003 gen_mov_F0_vreg(0, rd);
4004 tmp2 = gen_vfp_mrs();
4005 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
4006 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4007 tcg_temp_free_i32(tmp2);
60011498
PB
4008 gen_vfp_msr(tmp);
4009 break;
486624fc 4010 }
239c20c7 4011 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
486624fc
AB
4012 {
4013 TCGv_ptr fpst = get_fpstatus_ptr(false);
4014 TCGv_i32 ahp = get_ahp_flag();
7d1b0095 4015 tmp = tcg_temp_new_i32();
239c20c7
WN
4016 if (dp) {
4017 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
486624fc 4018 fpst, ahp);
239c20c7
WN
4019 } else {
4020 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
486624fc 4021 fpst, ahp);
239c20c7 4022 }
486624fc
AB
4023 tcg_temp_free_i32(ahp);
4024 tcg_temp_free_ptr(fpst);
60011498
PB
4025 tcg_gen_shli_i32(tmp, tmp, 16);
4026 gen_mov_F0_vreg(0, rd);
4027 tmp2 = gen_vfp_mrs();
4028 tcg_gen_ext16u_i32(tmp2, tmp2);
4029 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4030 tcg_temp_free_i32(tmp2);
60011498
PB
4031 gen_vfp_msr(tmp);
4032 break;
486624fc 4033 }
b7bcbe95
FB
4034 case 8: /* cmp */
4035 gen_vfp_cmp(dp);
4036 break;
4037 case 9: /* cmpe */
4038 gen_vfp_cmpe(dp);
4039 break;
4040 case 10: /* cmpz */
4041 gen_vfp_cmp(dp);
4042 break;
4043 case 11: /* cmpez */
4044 gen_vfp_F1_ld0(dp);
4045 gen_vfp_cmpe(dp);
4046 break;
664c6733
WN
4047 case 12: /* vrintr */
4048 {
4049 TCGv_ptr fpst = get_fpstatus_ptr(0);
4050 if (dp) {
4051 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
4052 } else {
4053 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
4054 }
4055 tcg_temp_free_ptr(fpst);
4056 break;
4057 }
a290c62a
WN
4058 case 13: /* vrintz */
4059 {
4060 TCGv_ptr fpst = get_fpstatus_ptr(0);
4061 TCGv_i32 tcg_rmode;
4062 tcg_rmode = tcg_const_i32(float_round_to_zero);
9b049916 4063 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
a290c62a
WN
4064 if (dp) {
4065 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
4066 } else {
4067 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
4068 }
9b049916 4069 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
a290c62a
WN
4070 tcg_temp_free_i32(tcg_rmode);
4071 tcg_temp_free_ptr(fpst);
4072 break;
4073 }
4e82bc01
WN
4074 case 14: /* vrintx */
4075 {
4076 TCGv_ptr fpst = get_fpstatus_ptr(0);
4077 if (dp) {
4078 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
4079 } else {
4080 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
4081 }
4082 tcg_temp_free_ptr(fpst);
4083 break;
4084 }
b7bcbe95 4085 case 15: /* single<->double conversion */
e80941bd 4086 if (dp) {
4373f3ce 4087 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
e80941bd 4088 } else {
4373f3ce 4089 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
e80941bd 4090 }
b7bcbe95
FB
4091 break;
4092 case 16: /* fuito */
5500b06c 4093 gen_vfp_uito(dp, 0);
b7bcbe95
FB
4094 break;
4095 case 17: /* fsito */
5500b06c 4096 gen_vfp_sito(dp, 0);
b7bcbe95 4097 break;
6c1f6f27
RH
4098 case 19: /* vjcvt */
4099 gen_helper_vjcvt(cpu_F0s, cpu_F0d, cpu_env);
4100 break;
9ee6e8bb 4101 case 20: /* fshto */
5500b06c 4102 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
4103 break;
4104 case 21: /* fslto */
5500b06c 4105 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
4106 break;
4107 case 22: /* fuhto */
5500b06c 4108 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
4109 break;
4110 case 23: /* fulto */
5500b06c 4111 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 4112 break;
b7bcbe95 4113 case 24: /* ftoui */
5500b06c 4114 gen_vfp_toui(dp, 0);
b7bcbe95
FB
4115 break;
4116 case 25: /* ftouiz */
5500b06c 4117 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
4118 break;
4119 case 26: /* ftosi */
5500b06c 4120 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
4121 break;
4122 case 27: /* ftosiz */
5500b06c 4123 gen_vfp_tosiz(dp, 0);
b7bcbe95 4124 break;
9ee6e8bb 4125 case 28: /* ftosh */
5500b06c 4126 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
4127 break;
4128 case 29: /* ftosl */
5500b06c 4129 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
4130 break;
4131 case 30: /* ftouh */
5500b06c 4132 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
4133 break;
4134 case 31: /* ftoul */
5500b06c 4135 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 4136 break;
b7bcbe95 4137 default: /* undefined */
e80941bd 4138 g_assert_not_reached();
b7bcbe95
FB
4139 }
4140 break;
4141 default: /* undefined */
b7bcbe95
FB
4142 return 1;
4143 }
4144
e80941bd
RH
4145 /* Write back the result, if any. */
4146 if (!no_output) {
4147 gen_mov_vreg_F0(rd_is_dp, rd);
239c20c7 4148 }
b7bcbe95
FB
4149
4150 /* break out of the loop if we have finished */
e80941bd 4151 if (veclen == 0) {
b7bcbe95 4152 break;
e80941bd 4153 }
b7bcbe95
FB
4154
4155 if (op == 15 && delta_m == 0) {
4156 /* single source one-many */
4157 while (veclen--) {
4158 rd = ((rd + delta_d) & (bank_mask - 1))
4159 | (rd & bank_mask);
4160 gen_mov_vreg_F0(dp, rd);
4161 }
4162 break;
4163 }
4164 /* Setup the next operands. */
4165 veclen--;
4166 rd = ((rd + delta_d) & (bank_mask - 1))
4167 | (rd & bank_mask);
4168
4169 if (op == 15) {
4170 /* One source operand. */
4171 rm = ((rm + delta_m) & (bank_mask - 1))
4172 | (rm & bank_mask);
4173 gen_mov_F0_vreg(dp, rm);
4174 } else {
4175 /* Two source operands. */
4176 rn = ((rn + delta_d) & (bank_mask - 1))
4177 | (rn & bank_mask);
4178 gen_mov_F0_vreg(dp, rn);
4179 if (delta_m) {
4180 rm = ((rm + delta_m) & (bank_mask - 1))
4181 | (rm & bank_mask);
4182 gen_mov_F1_vreg(dp, rm);
4183 }
4184 }
4185 }
4186 }
4187 break;
4188 case 0xc:
4189 case 0xd:
8387da81 4190 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
4191 /* two-register transfer */
4192 rn = (insn >> 16) & 0xf;
4193 rd = (insn >> 12) & 0xf;
4194 if (dp) {
9ee6e8bb
PB
4195 VFP_DREG_M(rm, insn);
4196 } else {
4197 rm = VFP_SREG_M(insn);
4198 }
b7bcbe95 4199
18c9b560 4200 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
4201 /* vfp->arm */
4202 if (dp) {
4373f3ce
PB
4203 gen_mov_F0_vreg(0, rm * 2);
4204 tmp = gen_vfp_mrs();
4205 store_reg(s, rd, tmp);
4206 gen_mov_F0_vreg(0, rm * 2 + 1);
4207 tmp = gen_vfp_mrs();
4208 store_reg(s, rn, tmp);
b7bcbe95
FB
4209 } else {
4210 gen_mov_F0_vreg(0, rm);
4373f3ce 4211 tmp = gen_vfp_mrs();
8387da81 4212 store_reg(s, rd, tmp);
b7bcbe95 4213 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 4214 tmp = gen_vfp_mrs();
8387da81 4215 store_reg(s, rn, tmp);
b7bcbe95
FB
4216 }
4217 } else {
4218 /* arm->vfp */
4219 if (dp) {
4373f3ce
PB
4220 tmp = load_reg(s, rd);
4221 gen_vfp_msr(tmp);
4222 gen_mov_vreg_F0(0, rm * 2);
4223 tmp = load_reg(s, rn);
4224 gen_vfp_msr(tmp);
4225 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 4226 } else {
8387da81 4227 tmp = load_reg(s, rd);
4373f3ce 4228 gen_vfp_msr(tmp);
b7bcbe95 4229 gen_mov_vreg_F0(0, rm);
8387da81 4230 tmp = load_reg(s, rn);
4373f3ce 4231 gen_vfp_msr(tmp);
b7bcbe95
FB
4232 gen_mov_vreg_F0(0, rm + 1);
4233 }
4234 }
4235 } else {
4236 /* Load/store */
4237 rn = (insn >> 16) & 0xf;
4238 if (dp)
9ee6e8bb 4239 VFP_DREG_D(rd, insn);
b7bcbe95 4240 else
9ee6e8bb 4241 rd = VFP_SREG_D(insn);
b7bcbe95
FB
4242 if ((insn & 0x01200000) == 0x01000000) {
4243 /* Single load/store */
4244 offset = (insn & 0xff) << 2;
4245 if ((insn & (1 << 23)) == 0)
4246 offset = -offset;
934814f1
PM
4247 if (s->thumb && rn == 15) {
4248 /* This is actually UNPREDICTABLE */
4249 addr = tcg_temp_new_i32();
4250 tcg_gen_movi_i32(addr, s->pc & ~2);
4251 } else {
4252 addr = load_reg(s, rn);
4253 }
312eea9f 4254 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4255 if (insn & (1 << 20)) {
312eea9f 4256 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4257 gen_mov_vreg_F0(dp, rd);
4258 } else {
4259 gen_mov_F0_vreg(dp, rd);
312eea9f 4260 gen_vfp_st(s, dp, addr);
b7bcbe95 4261 }
7d1b0095 4262 tcg_temp_free_i32(addr);
b7bcbe95
FB
4263 } else {
4264 /* load/store multiple */
934814f1 4265 int w = insn & (1 << 21);
b7bcbe95
FB
4266 if (dp)
4267 n = (insn >> 1) & 0x7f;
4268 else
4269 n = insn & 0xff;
4270
934814f1
PM
4271 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
4272 /* P == U , W == 1 => UNDEF */
4273 return 1;
4274 }
4275 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
4276 /* UNPREDICTABLE cases for bad immediates: we choose to
4277 * UNDEF to avoid generating huge numbers of TCG ops
4278 */
4279 return 1;
4280 }
4281 if (rn == 15 && w) {
4282 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4283 return 1;
4284 }
4285
4286 if (s->thumb && rn == 15) {
4287 /* This is actually UNPREDICTABLE */
4288 addr = tcg_temp_new_i32();
4289 tcg_gen_movi_i32(addr, s->pc & ~2);
4290 } else {
4291 addr = load_reg(s, rn);
4292 }
b7bcbe95 4293 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 4294 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95 4295
8a954faf
PM
4296 if (s->v8m_stackcheck && rn == 13 && w) {
4297 /*
4298 * Here 'addr' is the lowest address we will store to,
4299 * and is either the old SP (if post-increment) or
4300 * the new SP (if pre-decrement). For post-increment
4301 * where the old value is below the limit and the new
4302 * value is above, it is UNKNOWN whether the limit check
4303 * triggers; we choose to trigger.
4304 */
4305 gen_helper_v8m_stackcheck(cpu_env, addr);
4306 }
4307
b7bcbe95
FB
4308 if (dp)
4309 offset = 8;
4310 else
4311 offset = 4;
4312 for (i = 0; i < n; i++) {
18c9b560 4313 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 4314 /* load */
312eea9f 4315 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4316 gen_mov_vreg_F0(dp, rd + i);
4317 } else {
4318 /* store */
4319 gen_mov_F0_vreg(dp, rd + i);
312eea9f 4320 gen_vfp_st(s, dp, addr);
b7bcbe95 4321 }
312eea9f 4322 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4323 }
934814f1 4324 if (w) {
b7bcbe95
FB
4325 /* writeback */
4326 if (insn & (1 << 24))
4327 offset = -offset * n;
4328 else if (dp && (insn & 1))
4329 offset = 4;
4330 else
4331 offset = 0;
4332
4333 if (offset != 0)
312eea9f
FN
4334 tcg_gen_addi_i32(addr, addr, offset);
4335 store_reg(s, rn, addr);
4336 } else {
7d1b0095 4337 tcg_temp_free_i32(addr);
b7bcbe95
FB
4338 }
4339 }
4340 }
4341 break;
4342 default:
4343 /* Should never happen. */
4344 return 1;
4345 }
4346 return 0;
4347}
4348
90aa39a1 4349static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 4350{
90aa39a1 4351#ifndef CONFIG_USER_ONLY
dcba3a8d 4352 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
90aa39a1
SF
4353 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4354#else
4355 return true;
4356#endif
4357}
6e256c93 4358
8a6b28c7
EC
4359static void gen_goto_ptr(void)
4360{
7f11636d 4361 tcg_gen_lookup_and_goto_ptr();
8a6b28c7
EC
4362}
4363
4cae8f56
AB
4364/* This will end the TB but doesn't guarantee we'll return to
4365 * cpu_loop_exec. Any live exit_requests will be processed as we
4366 * enter the next TB.
4367 */
8a6b28c7 4368static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
90aa39a1
SF
4369{
4370 if (use_goto_tb(s, dest)) {
57fec1fe 4371 tcg_gen_goto_tb(n);
eaed129d 4372 gen_set_pc_im(s, dest);
07ea28b4 4373 tcg_gen_exit_tb(s->base.tb, n);
6e256c93 4374 } else {
eaed129d 4375 gen_set_pc_im(s, dest);
8a6b28c7 4376 gen_goto_ptr();
6e256c93 4377 }
dcba3a8d 4378 s->base.is_jmp = DISAS_NORETURN;
c53be334
FB
4379}
4380
8aaca4c0
FB
4381static inline void gen_jmp (DisasContext *s, uint32_t dest)
4382{
b636649f 4383 if (unlikely(is_singlestepping(s))) {
8aaca4c0 4384 /* An indirect jump so that we still trigger the debug exception. */
5899f386 4385 if (s->thumb)
d9ba4830
PB
4386 dest |= 1;
4387 gen_bx_im(s, dest);
8aaca4c0 4388 } else {
6e256c93 4389 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
4390 }
4391}
4392
39d5492a 4393static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 4394{
ee097184 4395 if (x)
d9ba4830 4396 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 4397 else
d9ba4830 4398 gen_sxth(t0);
ee097184 4399 if (y)
d9ba4830 4400 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 4401 else
d9ba4830
PB
4402 gen_sxth(t1);
4403 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
4404}
4405
4406/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
4407static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4408{
b5ff1b31
FB
4409 uint32_t mask;
4410
4411 mask = 0;
4412 if (flags & (1 << 0))
4413 mask |= 0xff;
4414 if (flags & (1 << 1))
4415 mask |= 0xff00;
4416 if (flags & (1 << 2))
4417 mask |= 0xff0000;
4418 if (flags & (1 << 3))
4419 mask |= 0xff000000;
9ee6e8bb 4420
2ae23e75 4421 /* Mask out undefined bits. */
9ee6e8bb 4422 mask &= ~CPSR_RESERVED;
d614a513 4423 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4424 mask &= ~CPSR_T;
d614a513
PM
4425 }
4426 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4427 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4428 }
4429 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4430 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4431 }
4432 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4433 mask &= ~CPSR_IT;
d614a513 4434 }
4051e12c
PM
4435 /* Mask out execution state and reserved bits. */
4436 if (!spsr) {
4437 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4438 }
b5ff1b31
FB
4439 /* Mask out privileged bits. */
4440 if (IS_USER(s))
9ee6e8bb 4441 mask &= CPSR_USER;
b5ff1b31
FB
4442 return mask;
4443}
4444
2fbac54b 4445/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4446static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4447{
39d5492a 4448 TCGv_i32 tmp;
b5ff1b31
FB
4449 if (spsr) {
4450 /* ??? This is also undefined in system mode. */
4451 if (IS_USER(s))
4452 return 1;
d9ba4830
PB
4453
4454 tmp = load_cpu_field(spsr);
4455 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4456 tcg_gen_andi_i32(t0, t0, mask);
4457 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4458 store_cpu_field(tmp, spsr);
b5ff1b31 4459 } else {
2fbac54b 4460 gen_set_cpsr(t0, mask);
b5ff1b31 4461 }
7d1b0095 4462 tcg_temp_free_i32(t0);
b5ff1b31
FB
4463 gen_lookup_tb(s);
4464 return 0;
4465}
4466
2fbac54b
FN
4467/* Returns nonzero if access to the PSR is not permitted. */
4468static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4469{
39d5492a 4470 TCGv_i32 tmp;
7d1b0095 4471 tmp = tcg_temp_new_i32();
2fbac54b
FN
4472 tcg_gen_movi_i32(tmp, val);
4473 return gen_set_psr(s, mask, spsr, tmp);
4474}
4475
8bfd0550
PM
4476static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4477 int *tgtmode, int *regno)
4478{
4479 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4480 * the target mode and register number, and identify the various
4481 * unpredictable cases.
4482 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4483 * + executed in user mode
4484 * + using R15 as the src/dest register
4485 * + accessing an unimplemented register
4486 * + accessing a register that's inaccessible at current PL/security state*
4487 * + accessing a register that you could access with a different insn
4488 * We choose to UNDEF in all these cases.
4489 * Since we don't know which of the various AArch32 modes we are in
4490 * we have to defer some checks to runtime.
4491 * Accesses to Monitor mode registers from Secure EL1 (which implies
4492 * that EL3 is AArch64) must trap to EL3.
4493 *
4494 * If the access checks fail this function will emit code to take
4495 * an exception and return false. Otherwise it will return true,
4496 * and set *tgtmode and *regno appropriately.
4497 */
4498 int exc_target = default_exception_el(s);
4499
4500 /* These instructions are present only in ARMv8, or in ARMv7 with the
4501 * Virtualization Extensions.
4502 */
4503 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4504 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4505 goto undef;
4506 }
4507
4508 if (IS_USER(s) || rn == 15) {
4509 goto undef;
4510 }
4511
4512 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4513 * of registers into (r, sysm).
4514 */
4515 if (r) {
4516 /* SPSRs for other modes */
4517 switch (sysm) {
4518 case 0xe: /* SPSR_fiq */
4519 *tgtmode = ARM_CPU_MODE_FIQ;
4520 break;
4521 case 0x10: /* SPSR_irq */
4522 *tgtmode = ARM_CPU_MODE_IRQ;
4523 break;
4524 case 0x12: /* SPSR_svc */
4525 *tgtmode = ARM_CPU_MODE_SVC;
4526 break;
4527 case 0x14: /* SPSR_abt */
4528 *tgtmode = ARM_CPU_MODE_ABT;
4529 break;
4530 case 0x16: /* SPSR_und */
4531 *tgtmode = ARM_CPU_MODE_UND;
4532 break;
4533 case 0x1c: /* SPSR_mon */
4534 *tgtmode = ARM_CPU_MODE_MON;
4535 break;
4536 case 0x1e: /* SPSR_hyp */
4537 *tgtmode = ARM_CPU_MODE_HYP;
4538 break;
4539 default: /* unallocated */
4540 goto undef;
4541 }
4542 /* We arbitrarily assign SPSR a register number of 16. */
4543 *regno = 16;
4544 } else {
4545 /* general purpose registers for other modes */
4546 switch (sysm) {
4547 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4548 *tgtmode = ARM_CPU_MODE_USR;
4549 *regno = sysm + 8;
4550 break;
4551 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4552 *tgtmode = ARM_CPU_MODE_FIQ;
4553 *regno = sysm;
4554 break;
4555 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4556 *tgtmode = ARM_CPU_MODE_IRQ;
4557 *regno = sysm & 1 ? 13 : 14;
4558 break;
4559 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4560 *tgtmode = ARM_CPU_MODE_SVC;
4561 *regno = sysm & 1 ? 13 : 14;
4562 break;
4563 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4564 *tgtmode = ARM_CPU_MODE_ABT;
4565 *regno = sysm & 1 ? 13 : 14;
4566 break;
4567 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4568 *tgtmode = ARM_CPU_MODE_UND;
4569 *regno = sysm & 1 ? 13 : 14;
4570 break;
4571 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4572 *tgtmode = ARM_CPU_MODE_MON;
4573 *regno = sysm & 1 ? 13 : 14;
4574 break;
4575 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4576 *tgtmode = ARM_CPU_MODE_HYP;
4577 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4578 *regno = sysm & 1 ? 13 : 17;
4579 break;
4580 default: /* unallocated */
4581 goto undef;
4582 }
4583 }
4584
4585 /* Catch the 'accessing inaccessible register' cases we can detect
4586 * at translate time.
4587 */
4588 switch (*tgtmode) {
4589 case ARM_CPU_MODE_MON:
4590 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4591 goto undef;
4592 }
4593 if (s->current_el == 1) {
4594 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4595 * then accesses to Mon registers trap to EL3
4596 */
4597 exc_target = 3;
4598 goto undef;
4599 }
4600 break;
4601 case ARM_CPU_MODE_HYP:
aec4dd09
PM
4602 /*
4603 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
4604 * (and so we can forbid accesses from EL2 or below). elr_hyp
4605 * can be accessed also from Hyp mode, so forbid accesses from
4606 * EL0 or EL1.
8bfd0550 4607 */
aec4dd09
PM
4608 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
4609 (s->current_el < 3 && *regno != 17)) {
8bfd0550
PM
4610 goto undef;
4611 }
4612 break;
4613 default:
4614 break;
4615 }
4616
4617 return true;
4618
4619undef:
4620 /* If we get here then some access check did not pass */
4621 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4622 return false;
4623}
4624
4625static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4626{
4627 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4628 int tgtmode = 0, regno = 0;
4629
4630 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4631 return;
4632 }
4633
4634 /* Sync state because msr_banked() can raise exceptions */
4635 gen_set_condexec(s);
4636 gen_set_pc_im(s, s->pc - 4);
4637 tcg_reg = load_reg(s, rn);
4638 tcg_tgtmode = tcg_const_i32(tgtmode);
4639 tcg_regno = tcg_const_i32(regno);
4640 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4641 tcg_temp_free_i32(tcg_tgtmode);
4642 tcg_temp_free_i32(tcg_regno);
4643 tcg_temp_free_i32(tcg_reg);
dcba3a8d 4644 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4645}
4646
4647static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4648{
4649 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4650 int tgtmode = 0, regno = 0;
4651
4652 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4653 return;
4654 }
4655
4656 /* Sync state because mrs_banked() can raise exceptions */
4657 gen_set_condexec(s);
4658 gen_set_pc_im(s, s->pc - 4);
4659 tcg_reg = tcg_temp_new_i32();
4660 tcg_tgtmode = tcg_const_i32(tgtmode);
4661 tcg_regno = tcg_const_i32(regno);
4662 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4663 tcg_temp_free_i32(tcg_tgtmode);
4664 tcg_temp_free_i32(tcg_regno);
4665 store_reg(s, rn, tcg_reg);
dcba3a8d 4666 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4667}
4668
fb0e8e79
PM
4669/* Store value to PC as for an exception return (ie don't
4670 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4671 * will do the masking based on the new value of the Thumb bit.
4672 */
4673static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4674{
fb0e8e79
PM
4675 tcg_gen_mov_i32(cpu_R[15], pc);
4676 tcg_temp_free_i32(pc);
b5ff1b31
FB
4677}
4678
b0109805 4679/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4680static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4681{
fb0e8e79
PM
4682 store_pc_exc_ret(s, pc);
4683 /* The cpsr_write_eret helper will mask the low bits of PC
4684 * appropriately depending on the new Thumb bit, so it must
4685 * be called after storing the new PC.
4686 */
e69ad9df
AL
4687 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4688 gen_io_start();
4689 }
235ea1f5 4690 gen_helper_cpsr_write_eret(cpu_env, cpsr);
e69ad9df
AL
4691 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4692 gen_io_end();
4693 }
7d1b0095 4694 tcg_temp_free_i32(cpsr);
b29fd33d 4695 /* Must exit loop to check un-masked IRQs */
dcba3a8d 4696 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb 4697}
3b46e624 4698
fb0e8e79
PM
4699/* Generate an old-style exception return. Marks pc as dead. */
4700static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4701{
4702 gen_rfe(s, pc, load_cpu_field(spsr));
4703}
4704
c22edfeb
AB
4705/*
4706 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4707 * only call the helper when running single threaded TCG code to ensure
4708 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4709 * just skip this instruction. Currently the SEV/SEVL instructions
4710 * which are *one* of many ways to wake the CPU from WFE are not
4711 * implemented so we can't sleep like WFI does.
4712 */
9ee6e8bb
PB
4713static void gen_nop_hint(DisasContext *s, int val)
4714{
4715 switch (val) {
2399d4e7
EC
4716 /* When running in MTTCG we don't generate jumps to the yield and
4717 * WFE helpers as it won't affect the scheduling of other vCPUs.
4718 * If we wanted to more completely model WFE/SEV so we don't busy
4719 * spin unnecessarily we would need to do something more involved.
4720 */
c87e5a61 4721 case 1: /* yield */
2399d4e7 4722 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4723 gen_set_pc_im(s, s->pc);
dcba3a8d 4724 s->base.is_jmp = DISAS_YIELD;
c22edfeb 4725 }
c87e5a61 4726 break;
9ee6e8bb 4727 case 3: /* wfi */
eaed129d 4728 gen_set_pc_im(s, s->pc);
dcba3a8d 4729 s->base.is_jmp = DISAS_WFI;
9ee6e8bb
PB
4730 break;
4731 case 2: /* wfe */
2399d4e7 4732 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4733 gen_set_pc_im(s, s->pc);
dcba3a8d 4734 s->base.is_jmp = DISAS_WFE;
c22edfeb 4735 }
72c1d3af 4736 break;
9ee6e8bb 4737 case 4: /* sev */
12b10571
MR
4738 case 5: /* sevl */
4739 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4740 default: /* nop */
4741 break;
4742 }
4743}
99c475ab 4744
ad69471c 4745#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4746
39d5492a 4747static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4748{
4749 switch (size) {
dd8fbd78
FN
4750 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4751 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4752 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4753 default: abort();
9ee6e8bb 4754 }
9ee6e8bb
PB
4755}
4756
39d5492a 4757static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4758{
4759 switch (size) {
dd8fbd78
FN
4760 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4761 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4762 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4763 default: return;
4764 }
4765}
4766
4767/* 32-bit pairwise ops end up the same as the elementwise versions. */
9ecd3c5c
RH
4768#define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
4769#define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
4770#define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
4771#define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
ad69471c 4772
ad69471c
PB
4773#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4774 switch ((size << 1) | u) { \
4775 case 0: \
dd8fbd78 4776 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4777 break; \
4778 case 1: \
dd8fbd78 4779 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4780 break; \
4781 case 2: \
dd8fbd78 4782 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4783 break; \
4784 case 3: \
dd8fbd78 4785 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4786 break; \
4787 case 4: \
dd8fbd78 4788 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4789 break; \
4790 case 5: \
dd8fbd78 4791 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4792 break; \
4793 default: return 1; \
4794 }} while (0)
9ee6e8bb
PB
4795
4796#define GEN_NEON_INTEGER_OP(name) do { \
4797 switch ((size << 1) | u) { \
ad69471c 4798 case 0: \
dd8fbd78 4799 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4800 break; \
4801 case 1: \
dd8fbd78 4802 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4803 break; \
4804 case 2: \
dd8fbd78 4805 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4806 break; \
4807 case 3: \
dd8fbd78 4808 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4809 break; \
4810 case 4: \
dd8fbd78 4811 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4812 break; \
4813 case 5: \
dd8fbd78 4814 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4815 break; \
9ee6e8bb
PB
4816 default: return 1; \
4817 }} while (0)
4818
39d5492a 4819static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4820{
39d5492a 4821 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4822 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4823 return tmp;
9ee6e8bb
PB
4824}
4825
39d5492a 4826static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4827{
dd8fbd78 4828 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4829 tcg_temp_free_i32(var);
9ee6e8bb
PB
4830}
4831
39d5492a 4832static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4833{
39d5492a 4834 TCGv_i32 tmp;
9ee6e8bb 4835 if (size == 1) {
0fad6efc
PM
4836 tmp = neon_load_reg(reg & 7, reg >> 4);
4837 if (reg & 8) {
dd8fbd78 4838 gen_neon_dup_high16(tmp);
0fad6efc
PM
4839 } else {
4840 gen_neon_dup_low16(tmp);
dd8fbd78 4841 }
0fad6efc
PM
4842 } else {
4843 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4844 }
dd8fbd78 4845 return tmp;
9ee6e8bb
PB
4846}
4847
02acedf9 4848static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4849{
b13708bb
RH
4850 TCGv_ptr pd, pm;
4851
600b828c 4852 if (!q && size == 2) {
02acedf9
PM
4853 return 1;
4854 }
b13708bb
RH
4855 pd = vfp_reg_ptr(true, rd);
4856 pm = vfp_reg_ptr(true, rm);
02acedf9
PM
4857 if (q) {
4858 switch (size) {
4859 case 0:
b13708bb 4860 gen_helper_neon_qunzip8(pd, pm);
02acedf9
PM
4861 break;
4862 case 1:
b13708bb 4863 gen_helper_neon_qunzip16(pd, pm);
02acedf9
PM
4864 break;
4865 case 2:
b13708bb 4866 gen_helper_neon_qunzip32(pd, pm);
02acedf9
PM
4867 break;
4868 default:
4869 abort();
4870 }
4871 } else {
4872 switch (size) {
4873 case 0:
b13708bb 4874 gen_helper_neon_unzip8(pd, pm);
02acedf9
PM
4875 break;
4876 case 1:
b13708bb 4877 gen_helper_neon_unzip16(pd, pm);
02acedf9
PM
4878 break;
4879 default:
4880 abort();
4881 }
4882 }
b13708bb
RH
4883 tcg_temp_free_ptr(pd);
4884 tcg_temp_free_ptr(pm);
02acedf9 4885 return 0;
19457615
FN
4886}
4887
d68a6f3a 4888static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4889{
b13708bb
RH
4890 TCGv_ptr pd, pm;
4891
600b828c 4892 if (!q && size == 2) {
d68a6f3a
PM
4893 return 1;
4894 }
b13708bb
RH
4895 pd = vfp_reg_ptr(true, rd);
4896 pm = vfp_reg_ptr(true, rm);
d68a6f3a
PM
4897 if (q) {
4898 switch (size) {
4899 case 0:
b13708bb 4900 gen_helper_neon_qzip8(pd, pm);
d68a6f3a
PM
4901 break;
4902 case 1:
b13708bb 4903 gen_helper_neon_qzip16(pd, pm);
d68a6f3a
PM
4904 break;
4905 case 2:
b13708bb 4906 gen_helper_neon_qzip32(pd, pm);
d68a6f3a
PM
4907 break;
4908 default:
4909 abort();
4910 }
4911 } else {
4912 switch (size) {
4913 case 0:
b13708bb 4914 gen_helper_neon_zip8(pd, pm);
d68a6f3a
PM
4915 break;
4916 case 1:
b13708bb 4917 gen_helper_neon_zip16(pd, pm);
d68a6f3a
PM
4918 break;
4919 default:
4920 abort();
4921 }
4922 }
b13708bb
RH
4923 tcg_temp_free_ptr(pd);
4924 tcg_temp_free_ptr(pm);
d68a6f3a 4925 return 0;
19457615
FN
4926}
4927
39d5492a 4928static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4929{
39d5492a 4930 TCGv_i32 rd, tmp;
19457615 4931
7d1b0095
PM
4932 rd = tcg_temp_new_i32();
4933 tmp = tcg_temp_new_i32();
19457615
FN
4934
4935 tcg_gen_shli_i32(rd, t0, 8);
4936 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4937 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4938 tcg_gen_or_i32(rd, rd, tmp);
4939
4940 tcg_gen_shri_i32(t1, t1, 8);
4941 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4942 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4943 tcg_gen_or_i32(t1, t1, tmp);
4944 tcg_gen_mov_i32(t0, rd);
4945
7d1b0095
PM
4946 tcg_temp_free_i32(tmp);
4947 tcg_temp_free_i32(rd);
19457615
FN
4948}
4949
39d5492a 4950static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4951{
39d5492a 4952 TCGv_i32 rd, tmp;
19457615 4953
7d1b0095
PM
4954 rd = tcg_temp_new_i32();
4955 tmp = tcg_temp_new_i32();
19457615
FN
4956
4957 tcg_gen_shli_i32(rd, t0, 16);
4958 tcg_gen_andi_i32(tmp, t1, 0xffff);
4959 tcg_gen_or_i32(rd, rd, tmp);
4960 tcg_gen_shri_i32(t1, t1, 16);
4961 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4962 tcg_gen_or_i32(t1, t1, tmp);
4963 tcg_gen_mov_i32(t0, rd);
4964
7d1b0095
PM
4965 tcg_temp_free_i32(tmp);
4966 tcg_temp_free_i32(rd);
19457615
FN
4967}
4968
4969
9ee6e8bb
PB
4970static struct {
4971 int nregs;
4972 int interleave;
4973 int spacing;
308e5636 4974} const neon_ls_element_type[11] = {
ac55d007
RH
4975 {1, 4, 1},
4976 {1, 4, 2},
9ee6e8bb 4977 {4, 1, 1},
ac55d007
RH
4978 {2, 2, 2},
4979 {1, 3, 1},
4980 {1, 3, 2},
9ee6e8bb
PB
4981 {3, 1, 1},
4982 {1, 1, 1},
ac55d007
RH
4983 {1, 2, 1},
4984 {1, 2, 2},
9ee6e8bb
PB
4985 {2, 1, 1}
4986};
4987
4988/* Translate a NEON load/store element instruction. Return nonzero if the
4989 instruction is invalid. */
7dcc1f89 4990static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4991{
4992 int rd, rn, rm;
4993 int op;
4994 int nregs;
4995 int interleave;
84496233 4996 int spacing;
9ee6e8bb
PB
4997 int stride;
4998 int size;
4999 int reg;
9ee6e8bb 5000 int load;
9ee6e8bb 5001 int n;
7377c2c9 5002 int vec_size;
ac55d007
RH
5003 int mmu_idx;
5004 TCGMemOp endian;
39d5492a
PM
5005 TCGv_i32 addr;
5006 TCGv_i32 tmp;
5007 TCGv_i32 tmp2;
84496233 5008 TCGv_i64 tmp64;
9ee6e8bb 5009
2c7ffc41
PM
5010 /* FIXME: this access check should not take precedence over UNDEF
5011 * for invalid encodings; we will generate incorrect syndrome information
5012 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5013 */
9dbbc748 5014 if (s->fp_excp_el) {
2c7ffc41 5015 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 5016 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
5017 return 0;
5018 }
5019
5df8bac1 5020 if (!s->vfp_enabled)
9ee6e8bb
PB
5021 return 1;
5022 VFP_DREG_D(rd, insn);
5023 rn = (insn >> 16) & 0xf;
5024 rm = insn & 0xf;
5025 load = (insn & (1 << 21)) != 0;
ac55d007
RH
5026 endian = s->be_data;
5027 mmu_idx = get_mem_index(s);
9ee6e8bb
PB
5028 if ((insn & (1 << 23)) == 0) {
5029 /* Load store all elements. */
5030 op = (insn >> 8) & 0xf;
5031 size = (insn >> 6) & 3;
84496233 5032 if (op > 10)
9ee6e8bb 5033 return 1;
f2dd89d0
PM
5034 /* Catch UNDEF cases for bad values of align field */
5035 switch (op & 0xc) {
5036 case 4:
5037 if (((insn >> 5) & 1) == 1) {
5038 return 1;
5039 }
5040 break;
5041 case 8:
5042 if (((insn >> 4) & 3) == 3) {
5043 return 1;
5044 }
5045 break;
5046 default:
5047 break;
5048 }
9ee6e8bb
PB
5049 nregs = neon_ls_element_type[op].nregs;
5050 interleave = neon_ls_element_type[op].interleave;
84496233 5051 spacing = neon_ls_element_type[op].spacing;
ac55d007 5052 if (size == 3 && (interleave | spacing) != 1) {
84496233 5053 return 1;
ac55d007 5054 }
e23f12b3
RH
5055 /* For our purposes, bytes are always little-endian. */
5056 if (size == 0) {
5057 endian = MO_LE;
5058 }
5059 /* Consecutive little-endian elements from a single register
5060 * can be promoted to a larger little-endian operation.
5061 */
5062 if (interleave == 1 && endian == MO_LE) {
5063 size = 3;
5064 }
ac55d007 5065 tmp64 = tcg_temp_new_i64();
e318a60b 5066 addr = tcg_temp_new_i32();
ac55d007 5067 tmp2 = tcg_const_i32(1 << size);
dcc65026 5068 load_reg_var(s, addr, rn);
9ee6e8bb 5069 for (reg = 0; reg < nregs; reg++) {
ac55d007
RH
5070 for (n = 0; n < 8 >> size; n++) {
5071 int xs;
5072 for (xs = 0; xs < interleave; xs++) {
5073 int tt = rd + reg + spacing * xs;
5074
5075 if (load) {
5076 gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
5077 neon_store_element64(tt, n, size, tmp64);
5078 } else {
5079 neon_load_element64(tmp64, tt, n, size);
5080 gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
9ee6e8bb 5081 }
ac55d007 5082 tcg_gen_add_i32(addr, addr, tmp2);
9ee6e8bb
PB
5083 }
5084 }
9ee6e8bb 5085 }
e318a60b 5086 tcg_temp_free_i32(addr);
ac55d007
RH
5087 tcg_temp_free_i32(tmp2);
5088 tcg_temp_free_i64(tmp64);
5089 stride = nregs * interleave * 8;
9ee6e8bb
PB
5090 } else {
5091 size = (insn >> 10) & 3;
5092 if (size == 3) {
5093 /* Load single element to all lanes. */
8e18cde3
PM
5094 int a = (insn >> 4) & 1;
5095 if (!load) {
9ee6e8bb 5096 return 1;
8e18cde3 5097 }
9ee6e8bb
PB
5098 size = (insn >> 6) & 3;
5099 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
5100
5101 if (size == 3) {
5102 if (nregs != 4 || a == 0) {
9ee6e8bb 5103 return 1;
99c475ab 5104 }
8e18cde3
PM
5105 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
5106 size = 2;
5107 }
5108 if (nregs == 1 && a == 1 && size == 0) {
5109 return 1;
5110 }
5111 if (nregs == 3 && a == 1) {
5112 return 1;
5113 }
e318a60b 5114 addr = tcg_temp_new_i32();
8e18cde3 5115 load_reg_var(s, addr, rn);
7377c2c9
RH
5116
5117 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
5118 * VLD2/3/4 to all lanes: bit 5 indicates register stride.
5119 */
5120 stride = (insn & (1 << 5)) ? 2 : 1;
5121 vec_size = nregs == 1 ? stride * 8 : 8;
5122
5123 tmp = tcg_temp_new_i32();
5124 for (reg = 0; reg < nregs; reg++) {
5125 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
5126 s->be_data | size);
5127 if ((rd & 1) && vec_size == 16) {
5128 /* We cannot write 16 bytes at once because the
5129 * destination is unaligned.
5130 */
5131 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
5132 8, 8, tmp);
5133 tcg_gen_gvec_mov(0, neon_reg_offset(rd + 1, 0),
5134 neon_reg_offset(rd, 0), 8, 8);
5135 } else {
5136 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
5137 vec_size, vec_size, tmp);
8e18cde3 5138 }
7377c2c9
RH
5139 tcg_gen_addi_i32(addr, addr, 1 << size);
5140 rd += stride;
9ee6e8bb 5141 }
7377c2c9 5142 tcg_temp_free_i32(tmp);
e318a60b 5143 tcg_temp_free_i32(addr);
9ee6e8bb
PB
5144 stride = (1 << size) * nregs;
5145 } else {
5146 /* Single element. */
93262b16 5147 int idx = (insn >> 4) & 0xf;
2d6ac920 5148 int reg_idx;
9ee6e8bb
PB
5149 switch (size) {
5150 case 0:
2d6ac920 5151 reg_idx = (insn >> 5) & 7;
9ee6e8bb
PB
5152 stride = 1;
5153 break;
5154 case 1:
2d6ac920 5155 reg_idx = (insn >> 6) & 3;
9ee6e8bb
PB
5156 stride = (insn & (1 << 5)) ? 2 : 1;
5157 break;
5158 case 2:
2d6ac920 5159 reg_idx = (insn >> 7) & 1;
9ee6e8bb
PB
5160 stride = (insn & (1 << 6)) ? 2 : 1;
5161 break;
5162 default:
5163 abort();
5164 }
5165 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
5166 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
5167 switch (nregs) {
5168 case 1:
5169 if (((idx & (1 << size)) != 0) ||
5170 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
5171 return 1;
5172 }
5173 break;
5174 case 3:
5175 if ((idx & 1) != 0) {
5176 return 1;
5177 }
5178 /* fall through */
5179 case 2:
5180 if (size == 2 && (idx & 2) != 0) {
5181 return 1;
5182 }
5183 break;
5184 case 4:
5185 if ((size == 2) && ((idx & 3) == 3)) {
5186 return 1;
5187 }
5188 break;
5189 default:
5190 abort();
5191 }
5192 if ((rd + stride * (nregs - 1)) > 31) {
5193 /* Attempts to write off the end of the register file
5194 * are UNPREDICTABLE; we choose to UNDEF because otherwise
5195 * the neon_load_reg() would write off the end of the array.
5196 */
5197 return 1;
5198 }
2d6ac920 5199 tmp = tcg_temp_new_i32();
e318a60b 5200 addr = tcg_temp_new_i32();
dcc65026 5201 load_reg_var(s, addr, rn);
9ee6e8bb
PB
5202 for (reg = 0; reg < nregs; reg++) {
5203 if (load) {
2d6ac920
RH
5204 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
5205 s->be_data | size);
5206 neon_store_element(rd, reg_idx, size, tmp);
9ee6e8bb 5207 } else { /* Store */
2d6ac920
RH
5208 neon_load_element(tmp, rd, reg_idx, size);
5209 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
5210 s->be_data | size);
99c475ab 5211 }
9ee6e8bb 5212 rd += stride;
1b2b1e54 5213 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 5214 }
e318a60b 5215 tcg_temp_free_i32(addr);
2d6ac920 5216 tcg_temp_free_i32(tmp);
9ee6e8bb 5217 stride = nregs * (1 << size);
99c475ab 5218 }
9ee6e8bb
PB
5219 }
5220 if (rm != 15) {
39d5492a 5221 TCGv_i32 base;
b26eefb6
PB
5222
5223 base = load_reg(s, rn);
9ee6e8bb 5224 if (rm == 13) {
b26eefb6 5225 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 5226 } else {
39d5492a 5227 TCGv_i32 index;
b26eefb6
PB
5228 index = load_reg(s, rm);
5229 tcg_gen_add_i32(base, base, index);
7d1b0095 5230 tcg_temp_free_i32(index);
9ee6e8bb 5231 }
b26eefb6 5232 store_reg(s, rn, base);
9ee6e8bb
PB
5233 }
5234 return 0;
5235}
3b46e624 5236
39d5492a 5237static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5238{
5239 switch (size) {
5240 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5241 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 5242 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
5243 default: abort();
5244 }
5245}
5246
39d5492a 5247static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5248{
5249 switch (size) {
02da0b2d
PM
5250 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5251 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5252 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
5253 default: abort();
5254 }
5255}
5256
39d5492a 5257static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5258{
5259 switch (size) {
02da0b2d
PM
5260 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5261 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5262 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
5263 default: abort();
5264 }
5265}
5266
39d5492a 5267static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
5268{
5269 switch (size) {
02da0b2d
PM
5270 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5271 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5272 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
5273 default: abort();
5274 }
5275}
5276
39d5492a 5277static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
5278 int q, int u)
5279{
5280 if (q) {
5281 if (u) {
5282 switch (size) {
5283 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5284 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5285 default: abort();
5286 }
5287 } else {
5288 switch (size) {
5289 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5290 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5291 default: abort();
5292 }
5293 }
5294 } else {
5295 if (u) {
5296 switch (size) {
b408a9b0
CL
5297 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5298 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
5299 default: abort();
5300 }
5301 } else {
5302 switch (size) {
5303 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5304 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5305 default: abort();
5306 }
5307 }
5308 }
5309}
5310
39d5492a 5311static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
5312{
5313 if (u) {
5314 switch (size) {
5315 case 0: gen_helper_neon_widen_u8(dest, src); break;
5316 case 1: gen_helper_neon_widen_u16(dest, src); break;
5317 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5318 default: abort();
5319 }
5320 } else {
5321 switch (size) {
5322 case 0: gen_helper_neon_widen_s8(dest, src); break;
5323 case 1: gen_helper_neon_widen_s16(dest, src); break;
5324 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5325 default: abort();
5326 }
5327 }
7d1b0095 5328 tcg_temp_free_i32(src);
ad69471c
PB
5329}
5330
5331static inline void gen_neon_addl(int size)
5332{
5333 switch (size) {
5334 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5335 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5336 case 2: tcg_gen_add_i64(CPU_V001); break;
5337 default: abort();
5338 }
5339}
5340
5341static inline void gen_neon_subl(int size)
5342{
5343 switch (size) {
5344 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5345 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5346 case 2: tcg_gen_sub_i64(CPU_V001); break;
5347 default: abort();
5348 }
5349}
5350
a7812ae4 5351static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
5352{
5353 switch (size) {
5354 case 0: gen_helper_neon_negl_u16(var, var); break;
5355 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
5356 case 2:
5357 tcg_gen_neg_i64(var, var);
5358 break;
ad69471c
PB
5359 default: abort();
5360 }
5361}
5362
a7812ae4 5363static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
5364{
5365 switch (size) {
02da0b2d
PM
5366 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5367 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
5368 default: abort();
5369 }
5370}
5371
39d5492a
PM
5372static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5373 int size, int u)
ad69471c 5374{
a7812ae4 5375 TCGv_i64 tmp;
ad69471c
PB
5376
5377 switch ((size << 1) | u) {
5378 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5379 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5380 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5381 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5382 case 4:
5383 tmp = gen_muls_i64_i32(a, b);
5384 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5385 tcg_temp_free_i64(tmp);
ad69471c
PB
5386 break;
5387 case 5:
5388 tmp = gen_mulu_i64_i32(a, b);
5389 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5390 tcg_temp_free_i64(tmp);
ad69471c
PB
5391 break;
5392 default: abort();
5393 }
c6067f04
CL
5394
5395 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5396 Don't forget to clean them now. */
5397 if (size < 2) {
7d1b0095
PM
5398 tcg_temp_free_i32(a);
5399 tcg_temp_free_i32(b);
c6067f04 5400 }
ad69471c
PB
5401}
5402
39d5492a
PM
5403static void gen_neon_narrow_op(int op, int u, int size,
5404 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
5405{
5406 if (op) {
5407 if (u) {
5408 gen_neon_unarrow_sats(size, dest, src);
5409 } else {
5410 gen_neon_narrow(size, dest, src);
5411 }
5412 } else {
5413 if (u) {
5414 gen_neon_narrow_satu(size, dest, src);
5415 } else {
5416 gen_neon_narrow_sats(size, dest, src);
5417 }
5418 }
5419}
5420
62698be3
PM
5421/* Symbolic constants for op fields for Neon 3-register same-length.
5422 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5423 * table A7-9.
5424 */
5425#define NEON_3R_VHADD 0
5426#define NEON_3R_VQADD 1
5427#define NEON_3R_VRHADD 2
5428#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5429#define NEON_3R_VHSUB 4
5430#define NEON_3R_VQSUB 5
5431#define NEON_3R_VCGT 6
5432#define NEON_3R_VCGE 7
5433#define NEON_3R_VSHL 8
5434#define NEON_3R_VQSHL 9
5435#define NEON_3R_VRSHL 10
5436#define NEON_3R_VQRSHL 11
5437#define NEON_3R_VMAX 12
5438#define NEON_3R_VMIN 13
5439#define NEON_3R_VABD 14
5440#define NEON_3R_VABA 15
5441#define NEON_3R_VADD_VSUB 16
5442#define NEON_3R_VTST_VCEQ 17
4a7832b0 5443#define NEON_3R_VML 18 /* VMLA, VMLS */
62698be3
PM
5444#define NEON_3R_VMUL 19
5445#define NEON_3R_VPMAX 20
5446#define NEON_3R_VPMIN 21
5447#define NEON_3R_VQDMULH_VQRDMULH 22
36a71934 5448#define NEON_3R_VPADD_VQRDMLAH 23
f1ecb913 5449#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
36a71934 5450#define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
62698be3
PM
5451#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5452#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5453#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5454#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5455#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 5456#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
5457
5458static const uint8_t neon_3r_sizes[] = {
5459 [NEON_3R_VHADD] = 0x7,
5460 [NEON_3R_VQADD] = 0xf,
5461 [NEON_3R_VRHADD] = 0x7,
5462 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5463 [NEON_3R_VHSUB] = 0x7,
5464 [NEON_3R_VQSUB] = 0xf,
5465 [NEON_3R_VCGT] = 0x7,
5466 [NEON_3R_VCGE] = 0x7,
5467 [NEON_3R_VSHL] = 0xf,
5468 [NEON_3R_VQSHL] = 0xf,
5469 [NEON_3R_VRSHL] = 0xf,
5470 [NEON_3R_VQRSHL] = 0xf,
5471 [NEON_3R_VMAX] = 0x7,
5472 [NEON_3R_VMIN] = 0x7,
5473 [NEON_3R_VABD] = 0x7,
5474 [NEON_3R_VABA] = 0x7,
5475 [NEON_3R_VADD_VSUB] = 0xf,
5476 [NEON_3R_VTST_VCEQ] = 0x7,
5477 [NEON_3R_VML] = 0x7,
5478 [NEON_3R_VMUL] = 0x7,
5479 [NEON_3R_VPMAX] = 0x7,
5480 [NEON_3R_VPMIN] = 0x7,
5481 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
36a71934 5482 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
f1ecb913 5483 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
36a71934 5484 [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
62698be3
PM
5485 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5486 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5487 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5488 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5489 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 5490 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5491};
5492
600b828c
PM
5493/* Symbolic constants for op fields for Neon 2-register miscellaneous.
5494 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5495 * table A7-13.
5496 */
5497#define NEON_2RM_VREV64 0
5498#define NEON_2RM_VREV32 1
5499#define NEON_2RM_VREV16 2
5500#define NEON_2RM_VPADDL 4
5501#define NEON_2RM_VPADDL_U 5
9d935509
AB
5502#define NEON_2RM_AESE 6 /* Includes AESD */
5503#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
5504#define NEON_2RM_VCLS 8
5505#define NEON_2RM_VCLZ 9
5506#define NEON_2RM_VCNT 10
5507#define NEON_2RM_VMVN 11
5508#define NEON_2RM_VPADAL 12
5509#define NEON_2RM_VPADAL_U 13
5510#define NEON_2RM_VQABS 14
5511#define NEON_2RM_VQNEG 15
5512#define NEON_2RM_VCGT0 16
5513#define NEON_2RM_VCGE0 17
5514#define NEON_2RM_VCEQ0 18
5515#define NEON_2RM_VCLE0 19
5516#define NEON_2RM_VCLT0 20
f1ecb913 5517#define NEON_2RM_SHA1H 21
600b828c
PM
5518#define NEON_2RM_VABS 22
5519#define NEON_2RM_VNEG 23
5520#define NEON_2RM_VCGT0_F 24
5521#define NEON_2RM_VCGE0_F 25
5522#define NEON_2RM_VCEQ0_F 26
5523#define NEON_2RM_VCLE0_F 27
5524#define NEON_2RM_VCLT0_F 28
5525#define NEON_2RM_VABS_F 30
5526#define NEON_2RM_VNEG_F 31
5527#define NEON_2RM_VSWP 32
5528#define NEON_2RM_VTRN 33
5529#define NEON_2RM_VUZP 34
5530#define NEON_2RM_VZIP 35
5531#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5532#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5533#define NEON_2RM_VSHLL 38
f1ecb913 5534#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 5535#define NEON_2RM_VRINTN 40
2ce70625 5536#define NEON_2RM_VRINTX 41
34f7b0a2
WN
5537#define NEON_2RM_VRINTA 42
5538#define NEON_2RM_VRINTZ 43
600b828c 5539#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 5540#define NEON_2RM_VRINTM 45
600b828c 5541#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 5542#define NEON_2RM_VRINTP 47
901ad525
WN
5543#define NEON_2RM_VCVTAU 48
5544#define NEON_2RM_VCVTAS 49
5545#define NEON_2RM_VCVTNU 50
5546#define NEON_2RM_VCVTNS 51
5547#define NEON_2RM_VCVTPU 52
5548#define NEON_2RM_VCVTPS 53
5549#define NEON_2RM_VCVTMU 54
5550#define NEON_2RM_VCVTMS 55
600b828c
PM
5551#define NEON_2RM_VRECPE 56
5552#define NEON_2RM_VRSQRTE 57
5553#define NEON_2RM_VRECPE_F 58
5554#define NEON_2RM_VRSQRTE_F 59
5555#define NEON_2RM_VCVT_FS 60
5556#define NEON_2RM_VCVT_FU 61
5557#define NEON_2RM_VCVT_SF 62
5558#define NEON_2RM_VCVT_UF 63
5559
5560static int neon_2rm_is_float_op(int op)
5561{
5562 /* Return true if this neon 2reg-misc op is float-to-float */
5563 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5564 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5565 op == NEON_2RM_VRINTM ||
5566 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5567 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5568}
5569
fe8fcf3d
PM
5570static bool neon_2rm_is_v8_op(int op)
5571{
5572 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5573 switch (op) {
5574 case NEON_2RM_VRINTN:
5575 case NEON_2RM_VRINTA:
5576 case NEON_2RM_VRINTM:
5577 case NEON_2RM_VRINTP:
5578 case NEON_2RM_VRINTZ:
5579 case NEON_2RM_VRINTX:
5580 case NEON_2RM_VCVTAU:
5581 case NEON_2RM_VCVTAS:
5582 case NEON_2RM_VCVTNU:
5583 case NEON_2RM_VCVTNS:
5584 case NEON_2RM_VCVTPU:
5585 case NEON_2RM_VCVTPS:
5586 case NEON_2RM_VCVTMU:
5587 case NEON_2RM_VCVTMS:
5588 return true;
5589 default:
5590 return false;
5591 }
5592}
5593
600b828c
PM
5594/* Each entry in this array has bit n set if the insn allows
5595 * size value n (otherwise it will UNDEF). Since unallocated
5596 * op values will have no bits set they always UNDEF.
5597 */
5598static const uint8_t neon_2rm_sizes[] = {
5599 [NEON_2RM_VREV64] = 0x7,
5600 [NEON_2RM_VREV32] = 0x3,
5601 [NEON_2RM_VREV16] = 0x1,
5602 [NEON_2RM_VPADDL] = 0x7,
5603 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5604 [NEON_2RM_AESE] = 0x1,
5605 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5606 [NEON_2RM_VCLS] = 0x7,
5607 [NEON_2RM_VCLZ] = 0x7,
5608 [NEON_2RM_VCNT] = 0x1,
5609 [NEON_2RM_VMVN] = 0x1,
5610 [NEON_2RM_VPADAL] = 0x7,
5611 [NEON_2RM_VPADAL_U] = 0x7,
5612 [NEON_2RM_VQABS] = 0x7,
5613 [NEON_2RM_VQNEG] = 0x7,
5614 [NEON_2RM_VCGT0] = 0x7,
5615 [NEON_2RM_VCGE0] = 0x7,
5616 [NEON_2RM_VCEQ0] = 0x7,
5617 [NEON_2RM_VCLE0] = 0x7,
5618 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5619 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5620 [NEON_2RM_VABS] = 0x7,
5621 [NEON_2RM_VNEG] = 0x7,
5622 [NEON_2RM_VCGT0_F] = 0x4,
5623 [NEON_2RM_VCGE0_F] = 0x4,
5624 [NEON_2RM_VCEQ0_F] = 0x4,
5625 [NEON_2RM_VCLE0_F] = 0x4,
5626 [NEON_2RM_VCLT0_F] = 0x4,
5627 [NEON_2RM_VABS_F] = 0x4,
5628 [NEON_2RM_VNEG_F] = 0x4,
5629 [NEON_2RM_VSWP] = 0x1,
5630 [NEON_2RM_VTRN] = 0x7,
5631 [NEON_2RM_VUZP] = 0x7,
5632 [NEON_2RM_VZIP] = 0x7,
5633 [NEON_2RM_VMOVN] = 0x7,
5634 [NEON_2RM_VQMOVN] = 0x7,
5635 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5636 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5637 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5638 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5639 [NEON_2RM_VRINTA] = 0x4,
5640 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5641 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5642 [NEON_2RM_VRINTM] = 0x4,
600b828c 5643 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5644 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5645 [NEON_2RM_VCVTAU] = 0x4,
5646 [NEON_2RM_VCVTAS] = 0x4,
5647 [NEON_2RM_VCVTNU] = 0x4,
5648 [NEON_2RM_VCVTNS] = 0x4,
5649 [NEON_2RM_VCVTPU] = 0x4,
5650 [NEON_2RM_VCVTPS] = 0x4,
5651 [NEON_2RM_VCVTMU] = 0x4,
5652 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5653 [NEON_2RM_VRECPE] = 0x4,
5654 [NEON_2RM_VRSQRTE] = 0x4,
5655 [NEON_2RM_VRECPE_F] = 0x4,
5656 [NEON_2RM_VRSQRTE_F] = 0x4,
5657 [NEON_2RM_VCVT_FS] = 0x4,
5658 [NEON_2RM_VCVT_FU] = 0x4,
5659 [NEON_2RM_VCVT_SF] = 0x4,
5660 [NEON_2RM_VCVT_UF] = 0x4,
5661};
5662
36a71934
RH
5663
5664/* Expand v8.1 simd helper. */
5665static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
5666 int q, int rd, int rn, int rm)
5667{
962fcbf2 5668 if (dc_isar_feature(aa32_rdm, s)) {
36a71934
RH
5669 int opr_sz = (1 + q) * 8;
5670 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
5671 vfp_reg_offset(1, rn),
5672 vfp_reg_offset(1, rm), cpu_env,
5673 opr_sz, opr_sz, 0, fn);
5674 return 0;
5675 }
5676 return 1;
5677}
5678
eabcd6fa
RH
5679/*
5680 * Expanders for VBitOps_VBIF, VBIT, VBSL.
5681 */
5682static void gen_bsl_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
5683{
5684 tcg_gen_xor_i64(rn, rn, rm);
5685 tcg_gen_and_i64(rn, rn, rd);
5686 tcg_gen_xor_i64(rd, rm, rn);
5687}
5688
5689static void gen_bit_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
5690{
5691 tcg_gen_xor_i64(rn, rn, rd);
5692 tcg_gen_and_i64(rn, rn, rm);
5693 tcg_gen_xor_i64(rd, rd, rn);
5694}
5695
5696static void gen_bif_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
5697{
5698 tcg_gen_xor_i64(rn, rn, rd);
5699 tcg_gen_andc_i64(rn, rn, rm);
5700 tcg_gen_xor_i64(rd, rd, rn);
5701}
5702
5703static void gen_bsl_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
5704{
5705 tcg_gen_xor_vec(vece, rn, rn, rm);
5706 tcg_gen_and_vec(vece, rn, rn, rd);
5707 tcg_gen_xor_vec(vece, rd, rm, rn);
5708}
5709
5710static void gen_bit_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
5711{
5712 tcg_gen_xor_vec(vece, rn, rn, rd);
5713 tcg_gen_and_vec(vece, rn, rn, rm);
5714 tcg_gen_xor_vec(vece, rd, rd, rn);
5715}
5716
5717static void gen_bif_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
5718{
5719 tcg_gen_xor_vec(vece, rn, rn, rd);
5720 tcg_gen_andc_vec(vece, rn, rn, rm);
5721 tcg_gen_xor_vec(vece, rd, rd, rn);
5722}
5723
5724const GVecGen3 bsl_op = {
5725 .fni8 = gen_bsl_i64,
5726 .fniv = gen_bsl_vec,
5727 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5728 .load_dest = true
5729};
5730
5731const GVecGen3 bit_op = {
5732 .fni8 = gen_bit_i64,
5733 .fniv = gen_bit_vec,
5734 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5735 .load_dest = true
5736};
5737
5738const GVecGen3 bif_op = {
5739 .fni8 = gen_bif_i64,
5740 .fniv = gen_bif_vec,
5741 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5742 .load_dest = true
5743};
5744
41f6c113
RH
5745static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5746{
5747 tcg_gen_vec_sar8i_i64(a, a, shift);
5748 tcg_gen_vec_add8_i64(d, d, a);
5749}
5750
5751static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5752{
5753 tcg_gen_vec_sar16i_i64(a, a, shift);
5754 tcg_gen_vec_add16_i64(d, d, a);
5755}
5756
5757static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5758{
5759 tcg_gen_sari_i32(a, a, shift);
5760 tcg_gen_add_i32(d, d, a);
5761}
5762
5763static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5764{
5765 tcg_gen_sari_i64(a, a, shift);
5766 tcg_gen_add_i64(d, d, a);
5767}
5768
5769static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5770{
5771 tcg_gen_sari_vec(vece, a, a, sh);
5772 tcg_gen_add_vec(vece, d, d, a);
5773}
5774
5775const GVecGen2i ssra_op[4] = {
5776 { .fni8 = gen_ssra8_i64,
5777 .fniv = gen_ssra_vec,
5778 .load_dest = true,
5779 .opc = INDEX_op_sari_vec,
5780 .vece = MO_8 },
5781 { .fni8 = gen_ssra16_i64,
5782 .fniv = gen_ssra_vec,
5783 .load_dest = true,
5784 .opc = INDEX_op_sari_vec,
5785 .vece = MO_16 },
5786 { .fni4 = gen_ssra32_i32,
5787 .fniv = gen_ssra_vec,
5788 .load_dest = true,
5789 .opc = INDEX_op_sari_vec,
5790 .vece = MO_32 },
5791 { .fni8 = gen_ssra64_i64,
5792 .fniv = gen_ssra_vec,
5793 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5794 .load_dest = true,
5795 .opc = INDEX_op_sari_vec,
5796 .vece = MO_64 },
5797};
5798
5799static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5800{
5801 tcg_gen_vec_shr8i_i64(a, a, shift);
5802 tcg_gen_vec_add8_i64(d, d, a);
5803}
5804
5805static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5806{
5807 tcg_gen_vec_shr16i_i64(a, a, shift);
5808 tcg_gen_vec_add16_i64(d, d, a);
5809}
5810
5811static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5812{
5813 tcg_gen_shri_i32(a, a, shift);
5814 tcg_gen_add_i32(d, d, a);
5815}
5816
5817static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5818{
5819 tcg_gen_shri_i64(a, a, shift);
5820 tcg_gen_add_i64(d, d, a);
5821}
5822
5823static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5824{
5825 tcg_gen_shri_vec(vece, a, a, sh);
5826 tcg_gen_add_vec(vece, d, d, a);
5827}
5828
5829const GVecGen2i usra_op[4] = {
5830 { .fni8 = gen_usra8_i64,
5831 .fniv = gen_usra_vec,
5832 .load_dest = true,
5833 .opc = INDEX_op_shri_vec,
5834 .vece = MO_8, },
5835 { .fni8 = gen_usra16_i64,
5836 .fniv = gen_usra_vec,
5837 .load_dest = true,
5838 .opc = INDEX_op_shri_vec,
5839 .vece = MO_16, },
5840 { .fni4 = gen_usra32_i32,
5841 .fniv = gen_usra_vec,
5842 .load_dest = true,
5843 .opc = INDEX_op_shri_vec,
5844 .vece = MO_32, },
5845 { .fni8 = gen_usra64_i64,
5846 .fniv = gen_usra_vec,
5847 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5848 .load_dest = true,
5849 .opc = INDEX_op_shri_vec,
5850 .vece = MO_64, },
5851};
eabcd6fa 5852
f3cd8218
RH
5853static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5854{
5855 uint64_t mask = dup_const(MO_8, 0xff >> shift);
5856 TCGv_i64 t = tcg_temp_new_i64();
5857
5858 tcg_gen_shri_i64(t, a, shift);
5859 tcg_gen_andi_i64(t, t, mask);
5860 tcg_gen_andi_i64(d, d, ~mask);
5861 tcg_gen_or_i64(d, d, t);
5862 tcg_temp_free_i64(t);
5863}
5864
5865static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5866{
5867 uint64_t mask = dup_const(MO_16, 0xffff >> shift);
5868 TCGv_i64 t = tcg_temp_new_i64();
5869
5870 tcg_gen_shri_i64(t, a, shift);
5871 tcg_gen_andi_i64(t, t, mask);
5872 tcg_gen_andi_i64(d, d, ~mask);
5873 tcg_gen_or_i64(d, d, t);
5874 tcg_temp_free_i64(t);
5875}
5876
5877static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5878{
5879 tcg_gen_shri_i32(a, a, shift);
5880 tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
5881}
5882
5883static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5884{
5885 tcg_gen_shri_i64(a, a, shift);
5886 tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
5887}
5888
5889static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5890{
5891 if (sh == 0) {
5892 tcg_gen_mov_vec(d, a);
5893 } else {
5894 TCGv_vec t = tcg_temp_new_vec_matching(d);
5895 TCGv_vec m = tcg_temp_new_vec_matching(d);
5896
5897 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
5898 tcg_gen_shri_vec(vece, t, a, sh);
5899 tcg_gen_and_vec(vece, d, d, m);
5900 tcg_gen_or_vec(vece, d, d, t);
5901
5902 tcg_temp_free_vec(t);
5903 tcg_temp_free_vec(m);
5904 }
5905}
5906
5907const GVecGen2i sri_op[4] = {
5908 { .fni8 = gen_shr8_ins_i64,
5909 .fniv = gen_shr_ins_vec,
5910 .load_dest = true,
5911 .opc = INDEX_op_shri_vec,
5912 .vece = MO_8 },
5913 { .fni8 = gen_shr16_ins_i64,
5914 .fniv = gen_shr_ins_vec,
5915 .load_dest = true,
5916 .opc = INDEX_op_shri_vec,
5917 .vece = MO_16 },
5918 { .fni4 = gen_shr32_ins_i32,
5919 .fniv = gen_shr_ins_vec,
5920 .load_dest = true,
5921 .opc = INDEX_op_shri_vec,
5922 .vece = MO_32 },
5923 { .fni8 = gen_shr64_ins_i64,
5924 .fniv = gen_shr_ins_vec,
5925 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5926 .load_dest = true,
5927 .opc = INDEX_op_shri_vec,
5928 .vece = MO_64 },
5929};
5930
5931static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5932{
5933 uint64_t mask = dup_const(MO_8, 0xff << shift);
5934 TCGv_i64 t = tcg_temp_new_i64();
5935
5936 tcg_gen_shli_i64(t, a, shift);
5937 tcg_gen_andi_i64(t, t, mask);
5938 tcg_gen_andi_i64(d, d, ~mask);
5939 tcg_gen_or_i64(d, d, t);
5940 tcg_temp_free_i64(t);
5941}
5942
5943static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5944{
5945 uint64_t mask = dup_const(MO_16, 0xffff << shift);
5946 TCGv_i64 t = tcg_temp_new_i64();
5947
5948 tcg_gen_shli_i64(t, a, shift);
5949 tcg_gen_andi_i64(t, t, mask);
5950 tcg_gen_andi_i64(d, d, ~mask);
5951 tcg_gen_or_i64(d, d, t);
5952 tcg_temp_free_i64(t);
5953}
5954
5955static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5956{
5957 tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
5958}
5959
5960static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5961{
5962 tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
5963}
5964
5965static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5966{
5967 if (sh == 0) {
5968 tcg_gen_mov_vec(d, a);
5969 } else {
5970 TCGv_vec t = tcg_temp_new_vec_matching(d);
5971 TCGv_vec m = tcg_temp_new_vec_matching(d);
5972
5973 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
5974 tcg_gen_shli_vec(vece, t, a, sh);
5975 tcg_gen_and_vec(vece, d, d, m);
5976 tcg_gen_or_vec(vece, d, d, t);
5977
5978 tcg_temp_free_vec(t);
5979 tcg_temp_free_vec(m);
5980 }
5981}
5982
5983const GVecGen2i sli_op[4] = {
5984 { .fni8 = gen_shl8_ins_i64,
5985 .fniv = gen_shl_ins_vec,
5986 .load_dest = true,
5987 .opc = INDEX_op_shli_vec,
5988 .vece = MO_8 },
5989 { .fni8 = gen_shl16_ins_i64,
5990 .fniv = gen_shl_ins_vec,
5991 .load_dest = true,
5992 .opc = INDEX_op_shli_vec,
5993 .vece = MO_16 },
5994 { .fni4 = gen_shl32_ins_i32,
5995 .fniv = gen_shl_ins_vec,
5996 .load_dest = true,
5997 .opc = INDEX_op_shli_vec,
5998 .vece = MO_32 },
5999 { .fni8 = gen_shl64_ins_i64,
6000 .fniv = gen_shl_ins_vec,
6001 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
6002 .load_dest = true,
6003 .opc = INDEX_op_shli_vec,
6004 .vece = MO_64 },
6005};
6006
4a7832b0
RH
6007static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6008{
6009 gen_helper_neon_mul_u8(a, a, b);
6010 gen_helper_neon_add_u8(d, d, a);
6011}
6012
6013static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6014{
6015 gen_helper_neon_mul_u8(a, a, b);
6016 gen_helper_neon_sub_u8(d, d, a);
6017}
6018
6019static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6020{
6021 gen_helper_neon_mul_u16(a, a, b);
6022 gen_helper_neon_add_u16(d, d, a);
6023}
6024
6025static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6026{
6027 gen_helper_neon_mul_u16(a, a, b);
6028 gen_helper_neon_sub_u16(d, d, a);
6029}
6030
6031static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6032{
6033 tcg_gen_mul_i32(a, a, b);
6034 tcg_gen_add_i32(d, d, a);
6035}
6036
6037static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6038{
6039 tcg_gen_mul_i32(a, a, b);
6040 tcg_gen_sub_i32(d, d, a);
6041}
6042
6043static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
6044{
6045 tcg_gen_mul_i64(a, a, b);
6046 tcg_gen_add_i64(d, d, a);
6047}
6048
6049static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
6050{
6051 tcg_gen_mul_i64(a, a, b);
6052 tcg_gen_sub_i64(d, d, a);
6053}
6054
6055static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
6056{
6057 tcg_gen_mul_vec(vece, a, a, b);
6058 tcg_gen_add_vec(vece, d, d, a);
6059}
6060
6061static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
6062{
6063 tcg_gen_mul_vec(vece, a, a, b);
6064 tcg_gen_sub_vec(vece, d, d, a);
6065}
6066
6067/* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
6068 * these tables are shared with AArch64 which does support them.
6069 */
6070const GVecGen3 mla_op[4] = {
6071 { .fni4 = gen_mla8_i32,
6072 .fniv = gen_mla_vec,
6073 .opc = INDEX_op_mul_vec,
6074 .load_dest = true,
6075 .vece = MO_8 },
6076 { .fni4 = gen_mla16_i32,
6077 .fniv = gen_mla_vec,
6078 .opc = INDEX_op_mul_vec,
6079 .load_dest = true,
6080 .vece = MO_16 },
6081 { .fni4 = gen_mla32_i32,
6082 .fniv = gen_mla_vec,
6083 .opc = INDEX_op_mul_vec,
6084 .load_dest = true,
6085 .vece = MO_32 },
6086 { .fni8 = gen_mla64_i64,
6087 .fniv = gen_mla_vec,
6088 .opc = INDEX_op_mul_vec,
6089 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
6090 .load_dest = true,
6091 .vece = MO_64 },
6092};
6093
6094const GVecGen3 mls_op[4] = {
6095 { .fni4 = gen_mls8_i32,
6096 .fniv = gen_mls_vec,
6097 .opc = INDEX_op_mul_vec,
6098 .load_dest = true,
6099 .vece = MO_8 },
6100 { .fni4 = gen_mls16_i32,
6101 .fniv = gen_mls_vec,
6102 .opc = INDEX_op_mul_vec,
6103 .load_dest = true,
6104 .vece = MO_16 },
6105 { .fni4 = gen_mls32_i32,
6106 .fniv = gen_mls_vec,
6107 .opc = INDEX_op_mul_vec,
6108 .load_dest = true,
6109 .vece = MO_32 },
6110 { .fni8 = gen_mls64_i64,
6111 .fniv = gen_mls_vec,
6112 .opc = INDEX_op_mul_vec,
6113 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
6114 .load_dest = true,
6115 .vece = MO_64 },
6116};
6117
ea580fa3
RH
6118/* CMTST : test is "if (X & Y != 0)". */
6119static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6120{
6121 tcg_gen_and_i32(d, a, b);
6122 tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
6123 tcg_gen_neg_i32(d, d);
6124}
6125
6126void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
6127{
6128 tcg_gen_and_i64(d, a, b);
6129 tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
6130 tcg_gen_neg_i64(d, d);
6131}
6132
6133static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
6134{
6135 tcg_gen_and_vec(vece, d, a, b);
6136 tcg_gen_dupi_vec(vece, a, 0);
6137 tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
6138}
6139
6140const GVecGen3 cmtst_op[4] = {
6141 { .fni4 = gen_helper_neon_tst_u8,
6142 .fniv = gen_cmtst_vec,
6143 .vece = MO_8 },
6144 { .fni4 = gen_helper_neon_tst_u16,
6145 .fniv = gen_cmtst_vec,
6146 .vece = MO_16 },
6147 { .fni4 = gen_cmtst_i32,
6148 .fniv = gen_cmtst_vec,
6149 .vece = MO_32 },
6150 { .fni8 = gen_cmtst_i64,
6151 .fniv = gen_cmtst_vec,
6152 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
6153 .vece = MO_64 },
6154};
6155
89e68b57
RH
6156static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
6157 TCGv_vec a, TCGv_vec b)
6158{
6159 TCGv_vec x = tcg_temp_new_vec_matching(t);
6160 tcg_gen_add_vec(vece, x, a, b);
6161 tcg_gen_usadd_vec(vece, t, a, b);
6162 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
6163 tcg_gen_or_vec(vece, sat, sat, x);
6164 tcg_temp_free_vec(x);
6165}
6166
6167const GVecGen4 uqadd_op[4] = {
6168 { .fniv = gen_uqadd_vec,
6169 .fno = gen_helper_gvec_uqadd_b,
6170 .opc = INDEX_op_usadd_vec,
6171 .write_aofs = true,
6172 .vece = MO_8 },
6173 { .fniv = gen_uqadd_vec,
6174 .fno = gen_helper_gvec_uqadd_h,
6175 .opc = INDEX_op_usadd_vec,
6176 .write_aofs = true,
6177 .vece = MO_16 },
6178 { .fniv = gen_uqadd_vec,
6179 .fno = gen_helper_gvec_uqadd_s,
6180 .opc = INDEX_op_usadd_vec,
6181 .write_aofs = true,
6182 .vece = MO_32 },
6183 { .fniv = gen_uqadd_vec,
6184 .fno = gen_helper_gvec_uqadd_d,
6185 .opc = INDEX_op_usadd_vec,
6186 .write_aofs = true,
6187 .vece = MO_64 },
6188};
6189
6190static void gen_sqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
6191 TCGv_vec a, TCGv_vec b)
6192{
6193 TCGv_vec x = tcg_temp_new_vec_matching(t);
6194 tcg_gen_add_vec(vece, x, a, b);
6195 tcg_gen_ssadd_vec(vece, t, a, b);
6196 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
6197 tcg_gen_or_vec(vece, sat, sat, x);
6198 tcg_temp_free_vec(x);
6199}
6200
6201const GVecGen4 sqadd_op[4] = {
6202 { .fniv = gen_sqadd_vec,
6203 .fno = gen_helper_gvec_sqadd_b,
6204 .opc = INDEX_op_ssadd_vec,
6205 .write_aofs = true,
6206 .vece = MO_8 },
6207 { .fniv = gen_sqadd_vec,
6208 .fno = gen_helper_gvec_sqadd_h,
6209 .opc = INDEX_op_ssadd_vec,
6210 .write_aofs = true,
6211 .vece = MO_16 },
6212 { .fniv = gen_sqadd_vec,
6213 .fno = gen_helper_gvec_sqadd_s,
6214 .opc = INDEX_op_ssadd_vec,
6215 .write_aofs = true,
6216 .vece = MO_32 },
6217 { .fniv = gen_sqadd_vec,
6218 .fno = gen_helper_gvec_sqadd_d,
6219 .opc = INDEX_op_ssadd_vec,
6220 .write_aofs = true,
6221 .vece = MO_64 },
6222};
6223
6224static void gen_uqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
6225 TCGv_vec a, TCGv_vec b)
6226{
6227 TCGv_vec x = tcg_temp_new_vec_matching(t);
6228 tcg_gen_sub_vec(vece, x, a, b);
6229 tcg_gen_ussub_vec(vece, t, a, b);
6230 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
6231 tcg_gen_or_vec(vece, sat, sat, x);
6232 tcg_temp_free_vec(x);
6233}
6234
6235const GVecGen4 uqsub_op[4] = {
6236 { .fniv = gen_uqsub_vec,
6237 .fno = gen_helper_gvec_uqsub_b,
6238 .opc = INDEX_op_ussub_vec,
6239 .write_aofs = true,
6240 .vece = MO_8 },
6241 { .fniv = gen_uqsub_vec,
6242 .fno = gen_helper_gvec_uqsub_h,
6243 .opc = INDEX_op_ussub_vec,
6244 .write_aofs = true,
6245 .vece = MO_16 },
6246 { .fniv = gen_uqsub_vec,
6247 .fno = gen_helper_gvec_uqsub_s,
6248 .opc = INDEX_op_ussub_vec,
6249 .write_aofs = true,
6250 .vece = MO_32 },
6251 { .fniv = gen_uqsub_vec,
6252 .fno = gen_helper_gvec_uqsub_d,
6253 .opc = INDEX_op_ussub_vec,
6254 .write_aofs = true,
6255 .vece = MO_64 },
6256};
6257
6258static void gen_sqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
6259 TCGv_vec a, TCGv_vec b)
6260{
6261 TCGv_vec x = tcg_temp_new_vec_matching(t);
6262 tcg_gen_sub_vec(vece, x, a, b);
6263 tcg_gen_sssub_vec(vece, t, a, b);
6264 tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
6265 tcg_gen_or_vec(vece, sat, sat, x);
6266 tcg_temp_free_vec(x);
6267}
6268
6269const GVecGen4 sqsub_op[4] = {
6270 { .fniv = gen_sqsub_vec,
6271 .fno = gen_helper_gvec_sqsub_b,
6272 .opc = INDEX_op_sssub_vec,
6273 .write_aofs = true,
6274 .vece = MO_8 },
6275 { .fniv = gen_sqsub_vec,
6276 .fno = gen_helper_gvec_sqsub_h,
6277 .opc = INDEX_op_sssub_vec,
6278 .write_aofs = true,
6279 .vece = MO_16 },
6280 { .fniv = gen_sqsub_vec,
6281 .fno = gen_helper_gvec_sqsub_s,
6282 .opc = INDEX_op_sssub_vec,
6283 .write_aofs = true,
6284 .vece = MO_32 },
6285 { .fniv = gen_sqsub_vec,
6286 .fno = gen_helper_gvec_sqsub_d,
6287 .opc = INDEX_op_sssub_vec,
6288 .write_aofs = true,
6289 .vece = MO_64 },
6290};
6291
9ee6e8bb
PB
6292/* Translate a NEON data processing instruction. Return nonzero if the
6293 instruction is invalid.
ad69471c
PB
6294 We process data in a mixture of 32-bit and 64-bit chunks.
6295 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 6296
7dcc1f89 6297static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
6298{
6299 int op;
6300 int q;
eabcd6fa 6301 int rd, rn, rm, rd_ofs, rn_ofs, rm_ofs;
9ee6e8bb
PB
6302 int size;
6303 int shift;
6304 int pass;
6305 int count;
6306 int pairwise;
6307 int u;
eabcd6fa 6308 int vec_size;
f3cd8218 6309 uint32_t imm;
39d5492a 6310 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
1a66ac61 6311 TCGv_ptr ptr1, ptr2, ptr3;
a7812ae4 6312 TCGv_i64 tmp64;
9ee6e8bb 6313
2c7ffc41
PM
6314 /* FIXME: this access check should not take precedence over UNDEF
6315 * for invalid encodings; we will generate incorrect syndrome information
6316 * for attempts to execute invalid vfp/neon encodings with FP disabled.
6317 */
9dbbc748 6318 if (s->fp_excp_el) {
2c7ffc41 6319 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 6320 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
6321 return 0;
6322 }
6323
5df8bac1 6324 if (!s->vfp_enabled)
9ee6e8bb
PB
6325 return 1;
6326 q = (insn & (1 << 6)) != 0;
6327 u = (insn >> 24) & 1;
6328 VFP_DREG_D(rd, insn);
6329 VFP_DREG_N(rn, insn);
6330 VFP_DREG_M(rm, insn);
6331 size = (insn >> 20) & 3;
eabcd6fa
RH
6332 vec_size = q ? 16 : 8;
6333 rd_ofs = neon_reg_offset(rd, 0);
6334 rn_ofs = neon_reg_offset(rn, 0);
6335 rm_ofs = neon_reg_offset(rm, 0);
6336
9ee6e8bb
PB
6337 if ((insn & (1 << 23)) == 0) {
6338 /* Three register same length. */
6339 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
6340 /* Catch invalid op and bad size combinations: UNDEF */
6341 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
6342 return 1;
6343 }
25f84f79
PM
6344 /* All insns of this form UNDEF for either this condition or the
6345 * superset of cases "Q==1"; we catch the latter later.
6346 */
6347 if (q && ((rd | rn | rm) & 1)) {
6348 return 1;
6349 }
36a71934
RH
6350 switch (op) {
6351 case NEON_3R_SHA:
6352 /* The SHA-1/SHA-256 3-register instructions require special
6353 * treatment here, as their size field is overloaded as an
6354 * op type selector, and they all consume their input in a
6355 * single pass.
6356 */
f1ecb913
AB
6357 if (!q) {
6358 return 1;
6359 }
6360 if (!u) { /* SHA-1 */
962fcbf2 6361 if (!dc_isar_feature(aa32_sha1, s)) {
f1ecb913
AB
6362 return 1;
6363 }
1a66ac61
RH
6364 ptr1 = vfp_reg_ptr(true, rd);
6365 ptr2 = vfp_reg_ptr(true, rn);
6366 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913 6367 tmp4 = tcg_const_i32(size);
1a66ac61 6368 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
f1ecb913
AB
6369 tcg_temp_free_i32(tmp4);
6370 } else { /* SHA-256 */
962fcbf2 6371 if (!dc_isar_feature(aa32_sha2, s) || size == 3) {
f1ecb913
AB
6372 return 1;
6373 }
1a66ac61
RH
6374 ptr1 = vfp_reg_ptr(true, rd);
6375 ptr2 = vfp_reg_ptr(true, rn);
6376 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913
AB
6377 switch (size) {
6378 case 0:
1a66ac61 6379 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
f1ecb913
AB
6380 break;
6381 case 1:
1a66ac61 6382 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
f1ecb913
AB
6383 break;
6384 case 2:
1a66ac61 6385 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
f1ecb913
AB
6386 break;
6387 }
6388 }
1a66ac61
RH
6389 tcg_temp_free_ptr(ptr1);
6390 tcg_temp_free_ptr(ptr2);
6391 tcg_temp_free_ptr(ptr3);
f1ecb913 6392 return 0;
36a71934
RH
6393
6394 case NEON_3R_VPADD_VQRDMLAH:
6395 if (!u) {
6396 break; /* VPADD */
6397 }
6398 /* VQRDMLAH */
6399 switch (size) {
6400 case 1:
6401 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
6402 q, rd, rn, rm);
6403 case 2:
6404 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
6405 q, rd, rn, rm);
6406 }
6407 return 1;
6408
6409 case NEON_3R_VFM_VQRDMLSH:
6410 if (!u) {
6411 /* VFM, VFMS */
6412 if (size == 1) {
6413 return 1;
6414 }
6415 break;
6416 }
6417 /* VQRDMLSH */
6418 switch (size) {
6419 case 1:
6420 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
6421 q, rd, rn, rm);
6422 case 2:
6423 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
6424 q, rd, rn, rm);
6425 }
6426 return 1;
eabcd6fa
RH
6427
6428 case NEON_3R_LOGIC: /* Logic ops. */
6429 switch ((u << 2) | size) {
6430 case 0: /* VAND */
6431 tcg_gen_gvec_and(0, rd_ofs, rn_ofs, rm_ofs,
6432 vec_size, vec_size);
6433 break;
6434 case 1: /* VBIC */
6435 tcg_gen_gvec_andc(0, rd_ofs, rn_ofs, rm_ofs,
6436 vec_size, vec_size);
6437 break;
2900847f
RH
6438 case 2: /* VORR */
6439 tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs,
6440 vec_size, vec_size);
eabcd6fa
RH
6441 break;
6442 case 3: /* VORN */
6443 tcg_gen_gvec_orc(0, rd_ofs, rn_ofs, rm_ofs,
6444 vec_size, vec_size);
6445 break;
6446 case 4: /* VEOR */
6447 tcg_gen_gvec_xor(0, rd_ofs, rn_ofs, rm_ofs,
6448 vec_size, vec_size);
6449 break;
6450 case 5: /* VBSL */
6451 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
6452 vec_size, vec_size, &bsl_op);
6453 break;
6454 case 6: /* VBIT */
6455 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
6456 vec_size, vec_size, &bit_op);
6457 break;
6458 case 7: /* VBIF */
6459 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
6460 vec_size, vec_size, &bif_op);
6461 break;
6462 }
6463 return 0;
e4717ae0
RH
6464
6465 case NEON_3R_VADD_VSUB:
6466 if (u) {
6467 tcg_gen_gvec_sub(size, rd_ofs, rn_ofs, rm_ofs,
6468 vec_size, vec_size);
6469 } else {
6470 tcg_gen_gvec_add(size, rd_ofs, rn_ofs, rm_ofs,
6471 vec_size, vec_size);
6472 }
6473 return 0;
82083184 6474
89e68b57
RH
6475 case NEON_3R_VQADD:
6476 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
6477 rn_ofs, rm_ofs, vec_size, vec_size,
6478 (u ? uqadd_op : sqadd_op) + size);
6479 break;
6480
6481 case NEON_3R_VQSUB:
6482 tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
6483 rn_ofs, rm_ofs, vec_size, vec_size,
6484 (u ? uqsub_op : sqsub_op) + size);
6485 break;
6486
82083184
RH
6487 case NEON_3R_VMUL: /* VMUL */
6488 if (u) {
6489 /* Polynomial case allows only P8 and is handled below. */
6490 if (size != 0) {
6491 return 1;
6492 }
6493 } else {
6494 tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs,
6495 vec_size, vec_size);
6496 return 0;
6497 }
6498 break;
4a7832b0
RH
6499
6500 case NEON_3R_VML: /* VMLA, VMLS */
6501 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
6502 u ? &mls_op[size] : &mla_op[size]);
6503 return 0;
ea580fa3
RH
6504
6505 case NEON_3R_VTST_VCEQ:
6506 if (u) { /* VCEQ */
6507 tcg_gen_gvec_cmp(TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs,
6508 vec_size, vec_size);
6509 } else { /* VTST */
6510 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
6511 vec_size, vec_size, &cmtst_op[size]);
6512 }
6513 return 0;
6514
6515 case NEON_3R_VCGT:
6516 tcg_gen_gvec_cmp(u ? TCG_COND_GTU : TCG_COND_GT, size,
6517 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
6518 return 0;
6519
6520 case NEON_3R_VCGE:
6521 tcg_gen_gvec_cmp(u ? TCG_COND_GEU : TCG_COND_GE, size,
6522 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
6523 return 0;
6f278221
RH
6524
6525 case NEON_3R_VMAX:
6526 if (u) {
6527 tcg_gen_gvec_umax(size, rd_ofs, rn_ofs, rm_ofs,
6528 vec_size, vec_size);
6529 } else {
6530 tcg_gen_gvec_smax(size, rd_ofs, rn_ofs, rm_ofs,
6531 vec_size, vec_size);
6532 }
6533 return 0;
6534 case NEON_3R_VMIN:
6535 if (u) {
6536 tcg_gen_gvec_umin(size, rd_ofs, rn_ofs, rm_ofs,
6537 vec_size, vec_size);
6538 } else {
6539 tcg_gen_gvec_smin(size, rd_ofs, rn_ofs, rm_ofs,
6540 vec_size, vec_size);
6541 }
6542 return 0;
f1ecb913 6543 }
4a7832b0 6544
eabcd6fa 6545 if (size == 3) {
62698be3 6546 /* 64-bit element instructions. */
9ee6e8bb 6547 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
6548 neon_load_reg64(cpu_V0, rn + pass);
6549 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 6550 switch (op) {
62698be3 6551 case NEON_3R_VSHL:
ad69471c
PB
6552 if (u) {
6553 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
6554 } else {
6555 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
6556 }
6557 break;
62698be3 6558 case NEON_3R_VQSHL:
ad69471c 6559 if (u) {
02da0b2d
PM
6560 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
6561 cpu_V1, cpu_V0);
ad69471c 6562 } else {
02da0b2d
PM
6563 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
6564 cpu_V1, cpu_V0);
ad69471c
PB
6565 }
6566 break;
62698be3 6567 case NEON_3R_VRSHL:
ad69471c
PB
6568 if (u) {
6569 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 6570 } else {
ad69471c
PB
6571 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
6572 }
6573 break;
62698be3 6574 case NEON_3R_VQRSHL:
ad69471c 6575 if (u) {
02da0b2d
PM
6576 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
6577 cpu_V1, cpu_V0);
ad69471c 6578 } else {
02da0b2d
PM
6579 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
6580 cpu_V1, cpu_V0);
1e8d4eec 6581 }
9ee6e8bb 6582 break;
9ee6e8bb
PB
6583 default:
6584 abort();
2c0262af 6585 }
ad69471c 6586 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 6587 }
9ee6e8bb 6588 return 0;
2c0262af 6589 }
25f84f79 6590 pairwise = 0;
9ee6e8bb 6591 switch (op) {
62698be3
PM
6592 case NEON_3R_VSHL:
6593 case NEON_3R_VQSHL:
6594 case NEON_3R_VRSHL:
6595 case NEON_3R_VQRSHL:
9ee6e8bb 6596 {
ad69471c
PB
6597 int rtmp;
6598 /* Shift instruction operands are reversed. */
6599 rtmp = rn;
9ee6e8bb 6600 rn = rm;
ad69471c 6601 rm = rtmp;
9ee6e8bb 6602 }
2c0262af 6603 break;
36a71934 6604 case NEON_3R_VPADD_VQRDMLAH:
62698be3
PM
6605 case NEON_3R_VPMAX:
6606 case NEON_3R_VPMIN:
9ee6e8bb 6607 pairwise = 1;
2c0262af 6608 break;
25f84f79
PM
6609 case NEON_3R_FLOAT_ARITH:
6610 pairwise = (u && size < 2); /* if VPADD (float) */
6611 break;
6612 case NEON_3R_FLOAT_MINMAX:
6613 pairwise = u; /* if VPMIN/VPMAX (float) */
6614 break;
6615 case NEON_3R_FLOAT_CMP:
6616 if (!u && size) {
6617 /* no encoding for U=0 C=1x */
6618 return 1;
6619 }
6620 break;
6621 case NEON_3R_FLOAT_ACMP:
6622 if (!u) {
6623 return 1;
6624 }
6625 break;
505935fc
WN
6626 case NEON_3R_FLOAT_MISC:
6627 /* VMAXNM/VMINNM in ARMv8 */
d614a513 6628 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
6629 return 1;
6630 }
2c0262af 6631 break;
36a71934
RH
6632 case NEON_3R_VFM_VQRDMLSH:
6633 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
6634 return 1;
6635 }
6636 break;
9ee6e8bb 6637 default:
2c0262af 6638 break;
9ee6e8bb 6639 }
dd8fbd78 6640
25f84f79
PM
6641 if (pairwise && q) {
6642 /* All the pairwise insns UNDEF if Q is set */
6643 return 1;
6644 }
6645
9ee6e8bb
PB
6646 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6647
6648 if (pairwise) {
6649 /* Pairwise. */
a5a14945
JR
6650 if (pass < 1) {
6651 tmp = neon_load_reg(rn, 0);
6652 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 6653 } else {
a5a14945
JR
6654 tmp = neon_load_reg(rm, 0);
6655 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
6656 }
6657 } else {
6658 /* Elementwise. */
dd8fbd78
FN
6659 tmp = neon_load_reg(rn, pass);
6660 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
6661 }
6662 switch (op) {
62698be3 6663 case NEON_3R_VHADD:
9ee6e8bb
PB
6664 GEN_NEON_INTEGER_OP(hadd);
6665 break;
62698be3 6666 case NEON_3R_VRHADD:
9ee6e8bb 6667 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 6668 break;
62698be3 6669 case NEON_3R_VHSUB:
9ee6e8bb
PB
6670 GEN_NEON_INTEGER_OP(hsub);
6671 break;
62698be3 6672 case NEON_3R_VSHL:
ad69471c 6673 GEN_NEON_INTEGER_OP(shl);
2c0262af 6674 break;
62698be3 6675 case NEON_3R_VQSHL:
02da0b2d 6676 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 6677 break;
62698be3 6678 case NEON_3R_VRSHL:
ad69471c 6679 GEN_NEON_INTEGER_OP(rshl);
2c0262af 6680 break;
62698be3 6681 case NEON_3R_VQRSHL:
02da0b2d 6682 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 6683 break;
62698be3 6684 case NEON_3R_VABD:
9ee6e8bb
PB
6685 GEN_NEON_INTEGER_OP(abd);
6686 break;
62698be3 6687 case NEON_3R_VABA:
9ee6e8bb 6688 GEN_NEON_INTEGER_OP(abd);
7d1b0095 6689 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
6690 tmp2 = neon_load_reg(rd, pass);
6691 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 6692 break;
62698be3 6693 case NEON_3R_VMUL:
82083184
RH
6694 /* VMUL.P8; other cases already eliminated. */
6695 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb 6696 break;
62698be3 6697 case NEON_3R_VPMAX:
9ee6e8bb
PB
6698 GEN_NEON_INTEGER_OP(pmax);
6699 break;
62698be3 6700 case NEON_3R_VPMIN:
9ee6e8bb
PB
6701 GEN_NEON_INTEGER_OP(pmin);
6702 break;
62698be3 6703 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
6704 if (!u) { /* VQDMULH */
6705 switch (size) {
02da0b2d
PM
6706 case 1:
6707 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6708 break;
6709 case 2:
6710 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6711 break;
62698be3 6712 default: abort();
9ee6e8bb 6713 }
62698be3 6714 } else { /* VQRDMULH */
9ee6e8bb 6715 switch (size) {
02da0b2d
PM
6716 case 1:
6717 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6718 break;
6719 case 2:
6720 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6721 break;
62698be3 6722 default: abort();
9ee6e8bb
PB
6723 }
6724 }
6725 break;
36a71934 6726 case NEON_3R_VPADD_VQRDMLAH:
9ee6e8bb 6727 switch (size) {
dd8fbd78
FN
6728 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
6729 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
6730 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 6731 default: abort();
9ee6e8bb
PB
6732 }
6733 break;
62698be3 6734 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
6735 {
6736 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
6737 switch ((u << 2) | size) {
6738 case 0: /* VADD */
aa47cfdd
PM
6739 case 4: /* VPADD */
6740 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6741 break;
6742 case 2: /* VSUB */
aa47cfdd 6743 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6744 break;
6745 case 6: /* VABD */
aa47cfdd 6746 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6747 break;
6748 default:
62698be3 6749 abort();
9ee6e8bb 6750 }
aa47cfdd 6751 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6752 break;
aa47cfdd 6753 }
62698be3 6754 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
6755 {
6756 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6757 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6758 if (!u) {
7d1b0095 6759 tcg_temp_free_i32(tmp2);
dd8fbd78 6760 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6761 if (size == 0) {
aa47cfdd 6762 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6763 } else {
aa47cfdd 6764 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
6765 }
6766 }
aa47cfdd 6767 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6768 break;
aa47cfdd 6769 }
62698be3 6770 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
6771 {
6772 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 6773 if (!u) {
aa47cfdd 6774 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 6775 } else {
aa47cfdd
PM
6776 if (size == 0) {
6777 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6778 } else {
6779 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6780 }
b5ff1b31 6781 }
aa47cfdd 6782 tcg_temp_free_ptr(fpstatus);
2c0262af 6783 break;
aa47cfdd 6784 }
62698be3 6785 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
6786 {
6787 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6788 if (size == 0) {
6789 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
6790 } else {
6791 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
6792 }
6793 tcg_temp_free_ptr(fpstatus);
2c0262af 6794 break;
aa47cfdd 6795 }
62698be3 6796 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
6797 {
6798 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6799 if (size == 0) {
f71a2ae5 6800 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 6801 } else {
f71a2ae5 6802 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
6803 }
6804 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6805 break;
aa47cfdd 6806 }
505935fc
WN
6807 case NEON_3R_FLOAT_MISC:
6808 if (u) {
6809 /* VMAXNM/VMINNM */
6810 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6811 if (size == 0) {
f71a2ae5 6812 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 6813 } else {
f71a2ae5 6814 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
6815 }
6816 tcg_temp_free_ptr(fpstatus);
6817 } else {
6818 if (size == 0) {
6819 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
6820 } else {
6821 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
6822 }
6823 }
2c0262af 6824 break;
36a71934 6825 case NEON_3R_VFM_VQRDMLSH:
da97f52c
PM
6826 {
6827 /* VFMA, VFMS: fused multiply-add */
6828 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6829 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
6830 if (size) {
6831 /* VFMS */
6832 gen_helper_vfp_negs(tmp, tmp);
6833 }
6834 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
6835 tcg_temp_free_i32(tmp3);
6836 tcg_temp_free_ptr(fpstatus);
6837 break;
6838 }
9ee6e8bb
PB
6839 default:
6840 abort();
2c0262af 6841 }
7d1b0095 6842 tcg_temp_free_i32(tmp2);
dd8fbd78 6843
9ee6e8bb
PB
6844 /* Save the result. For elementwise operations we can put it
6845 straight into the destination register. For pairwise operations
6846 we have to be careful to avoid clobbering the source operands. */
6847 if (pairwise && rd == rm) {
dd8fbd78 6848 neon_store_scratch(pass, tmp);
9ee6e8bb 6849 } else {
dd8fbd78 6850 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6851 }
6852
6853 } /* for pass */
6854 if (pairwise && rd == rm) {
6855 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
6856 tmp = neon_load_scratch(pass);
6857 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6858 }
6859 }
ad69471c 6860 /* End of 3 register same size operations. */
9ee6e8bb
PB
6861 } else if (insn & (1 << 4)) {
6862 if ((insn & 0x00380080) != 0) {
6863 /* Two registers and shift. */
6864 op = (insn >> 8) & 0xf;
6865 if (insn & (1 << 7)) {
cc13115b
PM
6866 /* 64-bit shift. */
6867 if (op > 7) {
6868 return 1;
6869 }
9ee6e8bb
PB
6870 size = 3;
6871 } else {
6872 size = 2;
6873 while ((insn & (1 << (size + 19))) == 0)
6874 size--;
6875 }
6876 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
9ee6e8bb
PB
6877 if (op < 8) {
6878 /* Shift by immediate:
6879 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
6880 if (q && ((rd | rm) & 1)) {
6881 return 1;
6882 }
6883 if (!u && (op == 4 || op == 6)) {
6884 return 1;
6885 }
9ee6e8bb
PB
6886 /* Right shifts are encoded as N - shift, where N is the
6887 element size in bits. */
1dc8425e 6888 if (op <= 4) {
9ee6e8bb 6889 shift = shift - (1 << (size + 3));
1dc8425e
RH
6890 }
6891
6892 switch (op) {
6893 case 0: /* VSHR */
6894 /* Right shift comes here negative. */
6895 shift = -shift;
6896 /* Shifts larger than the element size are architecturally
6897 * valid. Unsigned results in all zeros; signed results
6898 * in all sign bits.
6899 */
6900 if (!u) {
6901 tcg_gen_gvec_sari(size, rd_ofs, rm_ofs,
6902 MIN(shift, (8 << size) - 1),
6903 vec_size, vec_size);
6904 } else if (shift >= 8 << size) {
6905 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
6906 } else {
6907 tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift,
6908 vec_size, vec_size);
6909 }
6910 return 0;
6911
41f6c113
RH
6912 case 1: /* VSRA */
6913 /* Right shift comes here negative. */
6914 shift = -shift;
6915 /* Shifts larger than the element size are architecturally
6916 * valid. Unsigned results in all zeros; signed results
6917 * in all sign bits.
6918 */
6919 if (!u) {
6920 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
6921 MIN(shift, (8 << size) - 1),
6922 &ssra_op[size]);
6923 } else if (shift >= 8 << size) {
6924 /* rd += 0 */
6925 } else {
6926 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
6927 shift, &usra_op[size]);
6928 }
6929 return 0;
6930
f3cd8218
RH
6931 case 4: /* VSRI */
6932 if (!u) {
6933 return 1;
6934 }
6935 /* Right shift comes here negative. */
6936 shift = -shift;
6937 /* Shift out of range leaves destination unchanged. */
6938 if (shift < 8 << size) {
6939 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
6940 shift, &sri_op[size]);
6941 }
6942 return 0;
6943
1dc8425e 6944 case 5: /* VSHL, VSLI */
f3cd8218
RH
6945 if (u) { /* VSLI */
6946 /* Shift out of range leaves destination unchanged. */
6947 if (shift < 8 << size) {
6948 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size,
6949 vec_size, shift, &sli_op[size]);
6950 }
6951 } else { /* VSHL */
1dc8425e
RH
6952 /* Shifts larger than the element size are
6953 * architecturally valid and results in zero.
6954 */
6955 if (shift >= 8 << size) {
6956 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
6957 } else {
6958 tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
6959 vec_size, vec_size);
6960 }
1dc8425e 6961 }
f3cd8218 6962 return 0;
1dc8425e
RH
6963 }
6964
9ee6e8bb
PB
6965 if (size == 3) {
6966 count = q + 1;
6967 } else {
6968 count = q ? 4: 2;
6969 }
1dc8425e
RH
6970
6971 /* To avoid excessive duplication of ops we implement shift
6972 * by immediate using the variable shift operations.
6973 */
6974 imm = dup_const(size, shift);
9ee6e8bb
PB
6975
6976 for (pass = 0; pass < count; pass++) {
ad69471c
PB
6977 if (size == 3) {
6978 neon_load_reg64(cpu_V0, rm + pass);
6979 tcg_gen_movi_i64(cpu_V1, imm);
6980 switch (op) {
ad69471c
PB
6981 case 2: /* VRSHR */
6982 case 3: /* VRSRA */
6983 if (u)
6984 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6985 else
ad69471c 6986 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6987 break;
0322b26e 6988 case 6: /* VQSHLU */
02da0b2d
PM
6989 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6990 cpu_V0, cpu_V1);
ad69471c 6991 break;
0322b26e
PM
6992 case 7: /* VQSHL */
6993 if (u) {
02da0b2d 6994 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
6995 cpu_V0, cpu_V1);
6996 } else {
02da0b2d 6997 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
6998 cpu_V0, cpu_V1);
6999 }
9ee6e8bb 7000 break;
1dc8425e
RH
7001 default:
7002 g_assert_not_reached();
9ee6e8bb 7003 }
41f6c113 7004 if (op == 3) {
ad69471c 7005 /* Accumulate. */
5371cb81 7006 neon_load_reg64(cpu_V1, rd + pass);
ad69471c 7007 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
7008 }
7009 neon_store_reg64(cpu_V0, rd + pass);
7010 } else { /* size < 3 */
7011 /* Operands in T0 and T1. */
dd8fbd78 7012 tmp = neon_load_reg(rm, pass);
7d1b0095 7013 tmp2 = tcg_temp_new_i32();
dd8fbd78 7014 tcg_gen_movi_i32(tmp2, imm);
ad69471c 7015 switch (op) {
ad69471c
PB
7016 case 2: /* VRSHR */
7017 case 3: /* VRSRA */
7018 GEN_NEON_INTEGER_OP(rshl);
7019 break;
0322b26e 7020 case 6: /* VQSHLU */
ad69471c 7021 switch (size) {
0322b26e 7022 case 0:
02da0b2d
PM
7023 gen_helper_neon_qshlu_s8(tmp, cpu_env,
7024 tmp, tmp2);
0322b26e
PM
7025 break;
7026 case 1:
02da0b2d
PM
7027 gen_helper_neon_qshlu_s16(tmp, cpu_env,
7028 tmp, tmp2);
0322b26e
PM
7029 break;
7030 case 2:
02da0b2d
PM
7031 gen_helper_neon_qshlu_s32(tmp, cpu_env,
7032 tmp, tmp2);
0322b26e
PM
7033 break;
7034 default:
cc13115b 7035 abort();
ad69471c
PB
7036 }
7037 break;
0322b26e 7038 case 7: /* VQSHL */
02da0b2d 7039 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 7040 break;
1dc8425e
RH
7041 default:
7042 g_assert_not_reached();
ad69471c 7043 }
7d1b0095 7044 tcg_temp_free_i32(tmp2);
ad69471c 7045
41f6c113 7046 if (op == 3) {
ad69471c 7047 /* Accumulate. */
dd8fbd78 7048 tmp2 = neon_load_reg(rd, pass);
5371cb81 7049 gen_neon_add(size, tmp, tmp2);
7d1b0095 7050 tcg_temp_free_i32(tmp2);
ad69471c 7051 }
dd8fbd78 7052 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7053 }
7054 } /* for pass */
7055 } else if (op < 10) {
ad69471c 7056 /* Shift by immediate and narrow:
9ee6e8bb 7057 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 7058 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
7059 if (rm & 1) {
7060 return 1;
7061 }
9ee6e8bb
PB
7062 shift = shift - (1 << (size + 3));
7063 size++;
92cdfaeb 7064 if (size == 3) {
a7812ae4 7065 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
7066 neon_load_reg64(cpu_V0, rm);
7067 neon_load_reg64(cpu_V1, rm + 1);
7068 for (pass = 0; pass < 2; pass++) {
7069 TCGv_i64 in;
7070 if (pass == 0) {
7071 in = cpu_V0;
7072 } else {
7073 in = cpu_V1;
7074 }
ad69471c 7075 if (q) {
0b36f4cd 7076 if (input_unsigned) {
92cdfaeb 7077 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 7078 } else {
92cdfaeb 7079 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 7080 }
ad69471c 7081 } else {
0b36f4cd 7082 if (input_unsigned) {
92cdfaeb 7083 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 7084 } else {
92cdfaeb 7085 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 7086 }
ad69471c 7087 }
7d1b0095 7088 tmp = tcg_temp_new_i32();
92cdfaeb
PM
7089 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
7090 neon_store_reg(rd, pass, tmp);
7091 } /* for pass */
7092 tcg_temp_free_i64(tmp64);
7093 } else {
7094 if (size == 1) {
7095 imm = (uint16_t)shift;
7096 imm |= imm << 16;
2c0262af 7097 } else {
92cdfaeb
PM
7098 /* size == 2 */
7099 imm = (uint32_t)shift;
7100 }
7101 tmp2 = tcg_const_i32(imm);
7102 tmp4 = neon_load_reg(rm + 1, 0);
7103 tmp5 = neon_load_reg(rm + 1, 1);
7104 for (pass = 0; pass < 2; pass++) {
7105 if (pass == 0) {
7106 tmp = neon_load_reg(rm, 0);
7107 } else {
7108 tmp = tmp4;
7109 }
0b36f4cd
CL
7110 gen_neon_shift_narrow(size, tmp, tmp2, q,
7111 input_unsigned);
92cdfaeb
PM
7112 if (pass == 0) {
7113 tmp3 = neon_load_reg(rm, 1);
7114 } else {
7115 tmp3 = tmp5;
7116 }
0b36f4cd
CL
7117 gen_neon_shift_narrow(size, tmp3, tmp2, q,
7118 input_unsigned);
36aa55dc 7119 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
7120 tcg_temp_free_i32(tmp);
7121 tcg_temp_free_i32(tmp3);
7122 tmp = tcg_temp_new_i32();
92cdfaeb
PM
7123 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
7124 neon_store_reg(rd, pass, tmp);
7125 } /* for pass */
c6067f04 7126 tcg_temp_free_i32(tmp2);
b75263d6 7127 }
9ee6e8bb 7128 } else if (op == 10) {
cc13115b
PM
7129 /* VSHLL, VMOVL */
7130 if (q || (rd & 1)) {
9ee6e8bb 7131 return 1;
cc13115b 7132 }
ad69471c
PB
7133 tmp = neon_load_reg(rm, 0);
7134 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 7135 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7136 if (pass == 1)
7137 tmp = tmp2;
7138
7139 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 7140
9ee6e8bb
PB
7141 if (shift != 0) {
7142 /* The shift is less than the width of the source
ad69471c
PB
7143 type, so we can just shift the whole register. */
7144 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
7145 /* Widen the result of shift: we need to clear
7146 * the potential overflow bits resulting from
7147 * left bits of the narrow input appearing as
7148 * right bits of left the neighbour narrow
7149 * input. */
ad69471c
PB
7150 if (size < 2 || !u) {
7151 uint64_t imm64;
7152 if (size == 0) {
7153 imm = (0xffu >> (8 - shift));
7154 imm |= imm << 16;
acdf01ef 7155 } else if (size == 1) {
ad69471c 7156 imm = 0xffff >> (16 - shift);
acdf01ef
CL
7157 } else {
7158 /* size == 2 */
7159 imm = 0xffffffff >> (32 - shift);
7160 }
7161 if (size < 2) {
7162 imm64 = imm | (((uint64_t)imm) << 32);
7163 } else {
7164 imm64 = imm;
9ee6e8bb 7165 }
acdf01ef 7166 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
7167 }
7168 }
ad69471c 7169 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 7170 }
f73534a5 7171 } else if (op >= 14) {
9ee6e8bb 7172 /* VCVT fixed-point. */
cc13115b
PM
7173 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
7174 return 1;
7175 }
f73534a5
PM
7176 /* We have already masked out the must-be-1 top bit of imm6,
7177 * hence this 32-shift where the ARM ARM has 64-imm6.
7178 */
7179 shift = 32 - shift;
9ee6e8bb 7180 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 7181 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 7182 if (!(op & 1)) {
9ee6e8bb 7183 if (u)
5500b06c 7184 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 7185 else
5500b06c 7186 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
7187 } else {
7188 if (u)
5500b06c 7189 gen_vfp_toul(0, shift, 1);
9ee6e8bb 7190 else
5500b06c 7191 gen_vfp_tosl(0, shift, 1);
2c0262af 7192 }
4373f3ce 7193 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
7194 }
7195 } else {
9ee6e8bb
PB
7196 return 1;
7197 }
7198 } else { /* (insn & 0x00380080) == 0 */
246fa4ac
RH
7199 int invert, reg_ofs, vec_size;
7200
7d80fee5
PM
7201 if (q && (rd & 1)) {
7202 return 1;
7203 }
9ee6e8bb
PB
7204
7205 op = (insn >> 8) & 0xf;
7206 /* One register and immediate. */
7207 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
7208 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
7209 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
7210 * We choose to not special-case this and will behave as if a
7211 * valid constant encoding of 0 had been given.
7212 */
9ee6e8bb
PB
7213 switch (op) {
7214 case 0: case 1:
7215 /* no-op */
7216 break;
7217 case 2: case 3:
7218 imm <<= 8;
7219 break;
7220 case 4: case 5:
7221 imm <<= 16;
7222 break;
7223 case 6: case 7:
7224 imm <<= 24;
7225 break;
7226 case 8: case 9:
7227 imm |= imm << 16;
7228 break;
7229 case 10: case 11:
7230 imm = (imm << 8) | (imm << 24);
7231 break;
7232 case 12:
8e31209e 7233 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
7234 break;
7235 case 13:
7236 imm = (imm << 16) | 0xffff;
7237 break;
7238 case 14:
7239 imm |= (imm << 8) | (imm << 16) | (imm << 24);
246fa4ac 7240 if (invert) {
9ee6e8bb 7241 imm = ~imm;
246fa4ac 7242 }
9ee6e8bb
PB
7243 break;
7244 case 15:
7d80fee5
PM
7245 if (invert) {
7246 return 1;
7247 }
9ee6e8bb
PB
7248 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
7249 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
7250 break;
7251 }
246fa4ac 7252 if (invert) {
9ee6e8bb 7253 imm = ~imm;
246fa4ac 7254 }
9ee6e8bb 7255
246fa4ac
RH
7256 reg_ofs = neon_reg_offset(rd, 0);
7257 vec_size = q ? 16 : 8;
7258
7259 if (op & 1 && op < 12) {
7260 if (invert) {
7261 /* The immediate value has already been inverted,
7262 * so BIC becomes AND.
7263 */
7264 tcg_gen_gvec_andi(MO_32, reg_ofs, reg_ofs, imm,
7265 vec_size, vec_size);
9ee6e8bb 7266 } else {
246fa4ac
RH
7267 tcg_gen_gvec_ori(MO_32, reg_ofs, reg_ofs, imm,
7268 vec_size, vec_size);
7269 }
7270 } else {
7271 /* VMOV, VMVN. */
7272 if (op == 14 && invert) {
7273 TCGv_i64 t64 = tcg_temp_new_i64();
7274
7275 for (pass = 0; pass <= q; ++pass) {
7276 uint64_t val = 0;
a5a14945 7277 int n;
246fa4ac
RH
7278
7279 for (n = 0; n < 8; n++) {
7280 if (imm & (1 << (n + pass * 8))) {
7281 val |= 0xffull << (n * 8);
7282 }
9ee6e8bb 7283 }
246fa4ac
RH
7284 tcg_gen_movi_i64(t64, val);
7285 neon_store_reg64(t64, rd + pass);
9ee6e8bb 7286 }
246fa4ac
RH
7287 tcg_temp_free_i64(t64);
7288 } else {
7289 tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm);
9ee6e8bb
PB
7290 }
7291 }
7292 }
e4b3861d 7293 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
7294 if (size != 3) {
7295 op = (insn >> 8) & 0xf;
7296 if ((insn & (1 << 6)) == 0) {
7297 /* Three registers of different lengths. */
7298 int src1_wide;
7299 int src2_wide;
7300 int prewiden;
526d0096
PM
7301 /* undefreq: bit 0 : UNDEF if size == 0
7302 * bit 1 : UNDEF if size == 1
7303 * bit 2 : UNDEF if size == 2
7304 * bit 3 : UNDEF if U == 1
7305 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
7306 */
7307 int undefreq;
7308 /* prewiden, src1_wide, src2_wide, undefreq */
7309 static const int neon_3reg_wide[16][4] = {
7310 {1, 0, 0, 0}, /* VADDL */
7311 {1, 1, 0, 0}, /* VADDW */
7312 {1, 0, 0, 0}, /* VSUBL */
7313 {1, 1, 0, 0}, /* VSUBW */
7314 {0, 1, 1, 0}, /* VADDHN */
7315 {0, 0, 0, 0}, /* VABAL */
7316 {0, 1, 1, 0}, /* VSUBHN */
7317 {0, 0, 0, 0}, /* VABDL */
7318 {0, 0, 0, 0}, /* VMLAL */
526d0096 7319 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 7320 {0, 0, 0, 0}, /* VMLSL */
526d0096 7321 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 7322 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 7323 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 7324 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 7325 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
7326 };
7327
7328 prewiden = neon_3reg_wide[op][0];
7329 src1_wide = neon_3reg_wide[op][1];
7330 src2_wide = neon_3reg_wide[op][2];
695272dc 7331 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 7332
526d0096
PM
7333 if ((undefreq & (1 << size)) ||
7334 ((undefreq & 8) && u)) {
695272dc
PM
7335 return 1;
7336 }
7337 if ((src1_wide && (rn & 1)) ||
7338 (src2_wide && (rm & 1)) ||
7339 (!src2_wide && (rd & 1))) {
ad69471c 7340 return 1;
695272dc 7341 }
ad69471c 7342
4e624eda
PM
7343 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
7344 * outside the loop below as it only performs a single pass.
7345 */
7346 if (op == 14 && size == 2) {
7347 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
7348
962fcbf2 7349 if (!dc_isar_feature(aa32_pmull, s)) {
4e624eda
PM
7350 return 1;
7351 }
7352 tcg_rn = tcg_temp_new_i64();
7353 tcg_rm = tcg_temp_new_i64();
7354 tcg_rd = tcg_temp_new_i64();
7355 neon_load_reg64(tcg_rn, rn);
7356 neon_load_reg64(tcg_rm, rm);
7357 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
7358 neon_store_reg64(tcg_rd, rd);
7359 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
7360 neon_store_reg64(tcg_rd, rd + 1);
7361 tcg_temp_free_i64(tcg_rn);
7362 tcg_temp_free_i64(tcg_rm);
7363 tcg_temp_free_i64(tcg_rd);
7364 return 0;
7365 }
7366
9ee6e8bb
PB
7367 /* Avoid overlapping operands. Wide source operands are
7368 always aligned so will never overlap with wide
7369 destinations in problematic ways. */
8f8e3aa4 7370 if (rd == rm && !src2_wide) {
dd8fbd78
FN
7371 tmp = neon_load_reg(rm, 1);
7372 neon_store_scratch(2, tmp);
8f8e3aa4 7373 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
7374 tmp = neon_load_reg(rn, 1);
7375 neon_store_scratch(2, tmp);
9ee6e8bb 7376 }
f764718d 7377 tmp3 = NULL;
9ee6e8bb 7378 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7379 if (src1_wide) {
7380 neon_load_reg64(cpu_V0, rn + pass);
f764718d 7381 tmp = NULL;
9ee6e8bb 7382 } else {
ad69471c 7383 if (pass == 1 && rd == rn) {
dd8fbd78 7384 tmp = neon_load_scratch(2);
9ee6e8bb 7385 } else {
ad69471c
PB
7386 tmp = neon_load_reg(rn, pass);
7387 }
7388 if (prewiden) {
7389 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
7390 }
7391 }
ad69471c
PB
7392 if (src2_wide) {
7393 neon_load_reg64(cpu_V1, rm + pass);
f764718d 7394 tmp2 = NULL;
9ee6e8bb 7395 } else {
ad69471c 7396 if (pass == 1 && rd == rm) {
dd8fbd78 7397 tmp2 = neon_load_scratch(2);
9ee6e8bb 7398 } else {
ad69471c
PB
7399 tmp2 = neon_load_reg(rm, pass);
7400 }
7401 if (prewiden) {
7402 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 7403 }
9ee6e8bb
PB
7404 }
7405 switch (op) {
7406 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 7407 gen_neon_addl(size);
9ee6e8bb 7408 break;
79b0e534 7409 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 7410 gen_neon_subl(size);
9ee6e8bb
PB
7411 break;
7412 case 5: case 7: /* VABAL, VABDL */
7413 switch ((size << 1) | u) {
ad69471c
PB
7414 case 0:
7415 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
7416 break;
7417 case 1:
7418 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
7419 break;
7420 case 2:
7421 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
7422 break;
7423 case 3:
7424 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
7425 break;
7426 case 4:
7427 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
7428 break;
7429 case 5:
7430 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
7431 break;
9ee6e8bb
PB
7432 default: abort();
7433 }
7d1b0095
PM
7434 tcg_temp_free_i32(tmp2);
7435 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7436 break;
7437 case 8: case 9: case 10: case 11: case 12: case 13:
7438 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 7439 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
7440 break;
7441 case 14: /* Polynomial VMULL */
e5ca24cb 7442 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
7443 tcg_temp_free_i32(tmp2);
7444 tcg_temp_free_i32(tmp);
e5ca24cb 7445 break;
695272dc
PM
7446 default: /* 15 is RESERVED: caught earlier */
7447 abort();
9ee6e8bb 7448 }
ebcd88ce
PM
7449 if (op == 13) {
7450 /* VQDMULL */
7451 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
7452 neon_store_reg64(cpu_V0, rd + pass);
7453 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 7454 /* Accumulate. */
ebcd88ce 7455 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 7456 switch (op) {
4dc064e6
PM
7457 case 10: /* VMLSL */
7458 gen_neon_negl(cpu_V0, size);
7459 /* Fall through */
7460 case 5: case 8: /* VABAL, VMLAL */
ad69471c 7461 gen_neon_addl(size);
9ee6e8bb
PB
7462 break;
7463 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 7464 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
7465 if (op == 11) {
7466 gen_neon_negl(cpu_V0, size);
7467 }
ad69471c
PB
7468 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
7469 break;
9ee6e8bb
PB
7470 default:
7471 abort();
7472 }
ad69471c 7473 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7474 } else if (op == 4 || op == 6) {
7475 /* Narrowing operation. */
7d1b0095 7476 tmp = tcg_temp_new_i32();
79b0e534 7477 if (!u) {
9ee6e8bb 7478 switch (size) {
ad69471c
PB
7479 case 0:
7480 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
7481 break;
7482 case 1:
7483 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
7484 break;
7485 case 2:
7486 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 7487 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 7488 break;
9ee6e8bb
PB
7489 default: abort();
7490 }
7491 } else {
7492 switch (size) {
ad69471c
PB
7493 case 0:
7494 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
7495 break;
7496 case 1:
7497 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
7498 break;
7499 case 2:
7500 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
7501 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 7502 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 7503 break;
9ee6e8bb
PB
7504 default: abort();
7505 }
7506 }
ad69471c
PB
7507 if (pass == 0) {
7508 tmp3 = tmp;
7509 } else {
7510 neon_store_reg(rd, 0, tmp3);
7511 neon_store_reg(rd, 1, tmp);
7512 }
9ee6e8bb
PB
7513 } else {
7514 /* Write back the result. */
ad69471c 7515 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7516 }
7517 }
7518 } else {
3e3326df
PM
7519 /* Two registers and a scalar. NB that for ops of this form
7520 * the ARM ARM labels bit 24 as Q, but it is in our variable
7521 * 'u', not 'q'.
7522 */
7523 if (size == 0) {
7524 return 1;
7525 }
9ee6e8bb 7526 switch (op) {
9ee6e8bb 7527 case 1: /* Float VMLA scalar */
9ee6e8bb 7528 case 5: /* Floating point VMLS scalar */
9ee6e8bb 7529 case 9: /* Floating point VMUL scalar */
3e3326df
PM
7530 if (size == 1) {
7531 return 1;
7532 }
7533 /* fall through */
7534 case 0: /* Integer VMLA scalar */
7535 case 4: /* Integer VMLS scalar */
7536 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
7537 case 12: /* VQDMULH scalar */
7538 case 13: /* VQRDMULH scalar */
3e3326df
PM
7539 if (u && ((rd | rn) & 1)) {
7540 return 1;
7541 }
dd8fbd78
FN
7542 tmp = neon_get_scalar(size, rm);
7543 neon_store_scratch(0, tmp);
9ee6e8bb 7544 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
7545 tmp = neon_load_scratch(0);
7546 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
7547 if (op == 12) {
7548 if (size == 1) {
02da0b2d 7549 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 7550 } else {
02da0b2d 7551 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
7552 }
7553 } else if (op == 13) {
7554 if (size == 1) {
02da0b2d 7555 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 7556 } else {
02da0b2d 7557 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
7558 }
7559 } else if (op & 1) {
aa47cfdd
PM
7560 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7561 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
7562 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
7563 } else {
7564 switch (size) {
dd8fbd78
FN
7565 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
7566 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
7567 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 7568 default: abort();
9ee6e8bb
PB
7569 }
7570 }
7d1b0095 7571 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7572 if (op < 8) {
7573 /* Accumulate. */
dd8fbd78 7574 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
7575 switch (op) {
7576 case 0:
dd8fbd78 7577 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
7578 break;
7579 case 1:
aa47cfdd
PM
7580 {
7581 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7582 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
7583 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7584 break;
aa47cfdd 7585 }
9ee6e8bb 7586 case 4:
dd8fbd78 7587 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
7588 break;
7589 case 5:
aa47cfdd
PM
7590 {
7591 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7592 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
7593 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7594 break;
aa47cfdd 7595 }
9ee6e8bb
PB
7596 default:
7597 abort();
7598 }
7d1b0095 7599 tcg_temp_free_i32(tmp2);
9ee6e8bb 7600 }
dd8fbd78 7601 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7602 }
7603 break;
9ee6e8bb 7604 case 3: /* VQDMLAL scalar */
9ee6e8bb 7605 case 7: /* VQDMLSL scalar */
9ee6e8bb 7606 case 11: /* VQDMULL scalar */
3e3326df 7607 if (u == 1) {
ad69471c 7608 return 1;
3e3326df
PM
7609 }
7610 /* fall through */
7611 case 2: /* VMLAL sclar */
7612 case 6: /* VMLSL scalar */
7613 case 10: /* VMULL scalar */
7614 if (rd & 1) {
7615 return 1;
7616 }
dd8fbd78 7617 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
7618 /* We need a copy of tmp2 because gen_neon_mull
7619 * deletes it during pass 0. */
7d1b0095 7620 tmp4 = tcg_temp_new_i32();
c6067f04 7621 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 7622 tmp3 = neon_load_reg(rn, 1);
ad69471c 7623
9ee6e8bb 7624 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7625 if (pass == 0) {
7626 tmp = neon_load_reg(rn, 0);
9ee6e8bb 7627 } else {
dd8fbd78 7628 tmp = tmp3;
c6067f04 7629 tmp2 = tmp4;
9ee6e8bb 7630 }
ad69471c 7631 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
7632 if (op != 11) {
7633 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 7634 }
9ee6e8bb 7635 switch (op) {
4dc064e6
PM
7636 case 6:
7637 gen_neon_negl(cpu_V0, size);
7638 /* Fall through */
7639 case 2:
ad69471c 7640 gen_neon_addl(size);
9ee6e8bb
PB
7641 break;
7642 case 3: case 7:
ad69471c 7643 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
7644 if (op == 7) {
7645 gen_neon_negl(cpu_V0, size);
7646 }
ad69471c 7647 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
7648 break;
7649 case 10:
7650 /* no-op */
7651 break;
7652 case 11:
ad69471c 7653 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
7654 break;
7655 default:
7656 abort();
7657 }
ad69471c 7658 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 7659 }
61adacc8
RH
7660 break;
7661 case 14: /* VQRDMLAH scalar */
7662 case 15: /* VQRDMLSH scalar */
7663 {
7664 NeonGenThreeOpEnvFn *fn;
dd8fbd78 7665
962fcbf2 7666 if (!dc_isar_feature(aa32_rdm, s)) {
61adacc8
RH
7667 return 1;
7668 }
7669 if (u && ((rd | rn) & 1)) {
7670 return 1;
7671 }
7672 if (op == 14) {
7673 if (size == 1) {
7674 fn = gen_helper_neon_qrdmlah_s16;
7675 } else {
7676 fn = gen_helper_neon_qrdmlah_s32;
7677 }
7678 } else {
7679 if (size == 1) {
7680 fn = gen_helper_neon_qrdmlsh_s16;
7681 } else {
7682 fn = gen_helper_neon_qrdmlsh_s32;
7683 }
7684 }
dd8fbd78 7685
61adacc8
RH
7686 tmp2 = neon_get_scalar(size, rm);
7687 for (pass = 0; pass < (u ? 4 : 2); pass++) {
7688 tmp = neon_load_reg(rn, pass);
7689 tmp3 = neon_load_reg(rd, pass);
7690 fn(tmp, cpu_env, tmp, tmp2, tmp3);
7691 tcg_temp_free_i32(tmp3);
7692 neon_store_reg(rd, pass, tmp);
7693 }
7694 tcg_temp_free_i32(tmp2);
7695 }
9ee6e8bb 7696 break;
61adacc8
RH
7697 default:
7698 g_assert_not_reached();
9ee6e8bb
PB
7699 }
7700 }
7701 } else { /* size == 3 */
7702 if (!u) {
7703 /* Extract. */
9ee6e8bb 7704 imm = (insn >> 8) & 0xf;
ad69471c
PB
7705
7706 if (imm > 7 && !q)
7707 return 1;
7708
52579ea1
PM
7709 if (q && ((rd | rn | rm) & 1)) {
7710 return 1;
7711 }
7712
ad69471c
PB
7713 if (imm == 0) {
7714 neon_load_reg64(cpu_V0, rn);
7715 if (q) {
7716 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 7717 }
ad69471c
PB
7718 } else if (imm == 8) {
7719 neon_load_reg64(cpu_V0, rn + 1);
7720 if (q) {
7721 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 7722 }
ad69471c 7723 } else if (q) {
a7812ae4 7724 tmp64 = tcg_temp_new_i64();
ad69471c
PB
7725 if (imm < 8) {
7726 neon_load_reg64(cpu_V0, rn);
a7812ae4 7727 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
7728 } else {
7729 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 7730 neon_load_reg64(tmp64, rm);
ad69471c
PB
7731 }
7732 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 7733 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
7734 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7735 if (imm < 8) {
7736 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 7737 } else {
ad69471c
PB
7738 neon_load_reg64(cpu_V1, rm + 1);
7739 imm -= 8;
9ee6e8bb 7740 }
ad69471c 7741 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
7742 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
7743 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 7744 tcg_temp_free_i64(tmp64);
ad69471c 7745 } else {
a7812ae4 7746 /* BUGFIX */
ad69471c 7747 neon_load_reg64(cpu_V0, rn);
a7812ae4 7748 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 7749 neon_load_reg64(cpu_V1, rm);
a7812ae4 7750 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
7751 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7752 }
7753 neon_store_reg64(cpu_V0, rd);
7754 if (q) {
7755 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
7756 }
7757 } else if ((insn & (1 << 11)) == 0) {
7758 /* Two register misc. */
7759 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
7760 size = (insn >> 18) & 3;
600b828c
PM
7761 /* UNDEF for unknown op values and bad op-size combinations */
7762 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
7763 return 1;
7764 }
fe8fcf3d
PM
7765 if (neon_2rm_is_v8_op(op) &&
7766 !arm_dc_feature(s, ARM_FEATURE_V8)) {
7767 return 1;
7768 }
fc2a9b37
PM
7769 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
7770 q && ((rm | rd) & 1)) {
7771 return 1;
7772 }
9ee6e8bb 7773 switch (op) {
600b828c 7774 case NEON_2RM_VREV64:
9ee6e8bb 7775 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
7776 tmp = neon_load_reg(rm, pass * 2);
7777 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 7778 switch (size) {
dd8fbd78
FN
7779 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7780 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
7781 case 2: /* no-op */ break;
7782 default: abort();
7783 }
dd8fbd78 7784 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 7785 if (size == 2) {
dd8fbd78 7786 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 7787 } else {
9ee6e8bb 7788 switch (size) {
dd8fbd78
FN
7789 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
7790 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
7791 default: abort();
7792 }
dd8fbd78 7793 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
7794 }
7795 }
7796 break;
600b828c
PM
7797 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
7798 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
7799 for (pass = 0; pass < q + 1; pass++) {
7800 tmp = neon_load_reg(rm, pass * 2);
7801 gen_neon_widen(cpu_V0, tmp, size, op & 1);
7802 tmp = neon_load_reg(rm, pass * 2 + 1);
7803 gen_neon_widen(cpu_V1, tmp, size, op & 1);
7804 switch (size) {
7805 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
7806 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
7807 case 2: tcg_gen_add_i64(CPU_V001); break;
7808 default: abort();
7809 }
600b828c 7810 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 7811 /* Accumulate. */
ad69471c
PB
7812 neon_load_reg64(cpu_V1, rd + pass);
7813 gen_neon_addl(size);
9ee6e8bb 7814 }
ad69471c 7815 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7816 }
7817 break;
600b828c 7818 case NEON_2RM_VTRN:
9ee6e8bb 7819 if (size == 2) {
a5a14945 7820 int n;
9ee6e8bb 7821 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
7822 tmp = neon_load_reg(rm, n);
7823 tmp2 = neon_load_reg(rd, n + 1);
7824 neon_store_reg(rm, n, tmp2);
7825 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
7826 }
7827 } else {
7828 goto elementwise;
7829 }
7830 break;
600b828c 7831 case NEON_2RM_VUZP:
02acedf9 7832 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 7833 return 1;
9ee6e8bb
PB
7834 }
7835 break;
600b828c 7836 case NEON_2RM_VZIP:
d68a6f3a 7837 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 7838 return 1;
9ee6e8bb
PB
7839 }
7840 break;
600b828c
PM
7841 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
7842 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
7843 if (rm & 1) {
7844 return 1;
7845 }
f764718d 7846 tmp2 = NULL;
9ee6e8bb 7847 for (pass = 0; pass < 2; pass++) {
ad69471c 7848 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 7849 tmp = tcg_temp_new_i32();
600b828c
PM
7850 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
7851 tmp, cpu_V0);
ad69471c
PB
7852 if (pass == 0) {
7853 tmp2 = tmp;
7854 } else {
7855 neon_store_reg(rd, 0, tmp2);
7856 neon_store_reg(rd, 1, tmp);
9ee6e8bb 7857 }
9ee6e8bb
PB
7858 }
7859 break;
600b828c 7860 case NEON_2RM_VSHLL:
fc2a9b37 7861 if (q || (rd & 1)) {
9ee6e8bb 7862 return 1;
600b828c 7863 }
ad69471c
PB
7864 tmp = neon_load_reg(rm, 0);
7865 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 7866 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7867 if (pass == 1)
7868 tmp = tmp2;
7869 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 7870 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 7871 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7872 }
7873 break;
600b828c 7874 case NEON_2RM_VCVT_F16_F32:
486624fc
AB
7875 {
7876 TCGv_ptr fpst;
7877 TCGv_i32 ahp;
7878
d614a513 7879 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7880 q || (rm & 1)) {
7881 return 1;
7882 }
7d1b0095
PM
7883 tmp = tcg_temp_new_i32();
7884 tmp2 = tcg_temp_new_i32();
486624fc
AB
7885 fpst = get_fpstatus_ptr(true);
7886 ahp = get_ahp_flag();
60011498 7887 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
486624fc 7888 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
60011498 7889 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
486624fc 7890 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
60011498
PB
7891 tcg_gen_shli_i32(tmp2, tmp2, 16);
7892 tcg_gen_or_i32(tmp2, tmp2, tmp);
7893 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
486624fc 7894 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
60011498
PB
7895 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
7896 neon_store_reg(rd, 0, tmp2);
7d1b0095 7897 tmp2 = tcg_temp_new_i32();
486624fc 7898 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
60011498
PB
7899 tcg_gen_shli_i32(tmp2, tmp2, 16);
7900 tcg_gen_or_i32(tmp2, tmp2, tmp);
7901 neon_store_reg(rd, 1, tmp2);
7d1b0095 7902 tcg_temp_free_i32(tmp);
486624fc
AB
7903 tcg_temp_free_i32(ahp);
7904 tcg_temp_free_ptr(fpst);
60011498 7905 break;
486624fc 7906 }
600b828c 7907 case NEON_2RM_VCVT_F32_F16:
486624fc
AB
7908 {
7909 TCGv_ptr fpst;
7910 TCGv_i32 ahp;
d614a513 7911 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7912 q || (rd & 1)) {
7913 return 1;
7914 }
486624fc
AB
7915 fpst = get_fpstatus_ptr(true);
7916 ahp = get_ahp_flag();
7d1b0095 7917 tmp3 = tcg_temp_new_i32();
60011498
PB
7918 tmp = neon_load_reg(rm, 0);
7919 tmp2 = neon_load_reg(rm, 1);
7920 tcg_gen_ext16u_i32(tmp3, tmp);
486624fc 7921 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498
PB
7922 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
7923 tcg_gen_shri_i32(tmp3, tmp, 16);
486624fc 7924 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498 7925 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 7926 tcg_temp_free_i32(tmp);
60011498 7927 tcg_gen_ext16u_i32(tmp3, tmp2);
486624fc 7928 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498
PB
7929 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
7930 tcg_gen_shri_i32(tmp3, tmp2, 16);
486624fc 7931 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498 7932 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
7933 tcg_temp_free_i32(tmp2);
7934 tcg_temp_free_i32(tmp3);
486624fc
AB
7935 tcg_temp_free_i32(ahp);
7936 tcg_temp_free_ptr(fpst);
60011498 7937 break;
486624fc 7938 }
9d935509 7939 case NEON_2RM_AESE: case NEON_2RM_AESMC:
962fcbf2 7940 if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) {
9d935509
AB
7941 return 1;
7942 }
1a66ac61
RH
7943 ptr1 = vfp_reg_ptr(true, rd);
7944 ptr2 = vfp_reg_ptr(true, rm);
9d935509
AB
7945
7946 /* Bit 6 is the lowest opcode bit; it distinguishes between
7947 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7948 */
7949 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7950
7951 if (op == NEON_2RM_AESE) {
1a66ac61 7952 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
9d935509 7953 } else {
1a66ac61 7954 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
9d935509 7955 }
1a66ac61
RH
7956 tcg_temp_free_ptr(ptr1);
7957 tcg_temp_free_ptr(ptr2);
9d935509
AB
7958 tcg_temp_free_i32(tmp3);
7959 break;
f1ecb913 7960 case NEON_2RM_SHA1H:
962fcbf2 7961 if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) {
f1ecb913
AB
7962 return 1;
7963 }
1a66ac61
RH
7964 ptr1 = vfp_reg_ptr(true, rd);
7965 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 7966
1a66ac61 7967 gen_helper_crypto_sha1h(ptr1, ptr2);
f1ecb913 7968
1a66ac61
RH
7969 tcg_temp_free_ptr(ptr1);
7970 tcg_temp_free_ptr(ptr2);
f1ecb913
AB
7971 break;
7972 case NEON_2RM_SHA1SU1:
7973 if ((rm | rd) & 1) {
7974 return 1;
7975 }
7976 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7977 if (q) {
962fcbf2 7978 if (!dc_isar_feature(aa32_sha2, s)) {
f1ecb913
AB
7979 return 1;
7980 }
962fcbf2 7981 } else if (!dc_isar_feature(aa32_sha1, s)) {
f1ecb913
AB
7982 return 1;
7983 }
1a66ac61
RH
7984 ptr1 = vfp_reg_ptr(true, rd);
7985 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 7986 if (q) {
1a66ac61 7987 gen_helper_crypto_sha256su0(ptr1, ptr2);
f1ecb913 7988 } else {
1a66ac61 7989 gen_helper_crypto_sha1su1(ptr1, ptr2);
f1ecb913 7990 }
1a66ac61
RH
7991 tcg_temp_free_ptr(ptr1);
7992 tcg_temp_free_ptr(ptr2);
f1ecb913 7993 break;
4bf940be
RH
7994
7995 case NEON_2RM_VMVN:
7996 tcg_gen_gvec_not(0, rd_ofs, rm_ofs, vec_size, vec_size);
7997 break;
7998 case NEON_2RM_VNEG:
7999 tcg_gen_gvec_neg(size, rd_ofs, rm_ofs, vec_size, vec_size);
8000 break;
8001
9ee6e8bb
PB
8002 default:
8003 elementwise:
8004 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 8005 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
8006 tcg_gen_ld_f32(cpu_F0s, cpu_env,
8007 neon_reg_offset(rm, pass));
f764718d 8008 tmp = NULL;
9ee6e8bb 8009 } else {
dd8fbd78 8010 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
8011 }
8012 switch (op) {
600b828c 8013 case NEON_2RM_VREV32:
9ee6e8bb 8014 switch (size) {
dd8fbd78
FN
8015 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
8016 case 1: gen_swap_half(tmp); break;
600b828c 8017 default: abort();
9ee6e8bb
PB
8018 }
8019 break;
600b828c 8020 case NEON_2RM_VREV16:
dd8fbd78 8021 gen_rev16(tmp);
9ee6e8bb 8022 break;
600b828c 8023 case NEON_2RM_VCLS:
9ee6e8bb 8024 switch (size) {
dd8fbd78
FN
8025 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
8026 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
8027 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 8028 default: abort();
9ee6e8bb
PB
8029 }
8030 break;
600b828c 8031 case NEON_2RM_VCLZ:
9ee6e8bb 8032 switch (size) {
dd8fbd78
FN
8033 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
8034 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7539a012 8035 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
600b828c 8036 default: abort();
9ee6e8bb
PB
8037 }
8038 break;
600b828c 8039 case NEON_2RM_VCNT:
dd8fbd78 8040 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 8041 break;
600b828c 8042 case NEON_2RM_VQABS:
9ee6e8bb 8043 switch (size) {
02da0b2d
PM
8044 case 0:
8045 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
8046 break;
8047 case 1:
8048 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
8049 break;
8050 case 2:
8051 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
8052 break;
600b828c 8053 default: abort();
9ee6e8bb
PB
8054 }
8055 break;
600b828c 8056 case NEON_2RM_VQNEG:
9ee6e8bb 8057 switch (size) {
02da0b2d
PM
8058 case 0:
8059 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
8060 break;
8061 case 1:
8062 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
8063 break;
8064 case 2:
8065 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
8066 break;
600b828c 8067 default: abort();
9ee6e8bb
PB
8068 }
8069 break;
600b828c 8070 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 8071 tmp2 = tcg_const_i32(0);
9ee6e8bb 8072 switch(size) {
dd8fbd78
FN
8073 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
8074 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
8075 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 8076 default: abort();
9ee6e8bb 8077 }
39d5492a 8078 tcg_temp_free_i32(tmp2);
600b828c 8079 if (op == NEON_2RM_VCLE0) {
dd8fbd78 8080 tcg_gen_not_i32(tmp, tmp);
600b828c 8081 }
9ee6e8bb 8082 break;
600b828c 8083 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 8084 tmp2 = tcg_const_i32(0);
9ee6e8bb 8085 switch(size) {
dd8fbd78
FN
8086 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
8087 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
8088 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 8089 default: abort();
9ee6e8bb 8090 }
39d5492a 8091 tcg_temp_free_i32(tmp2);
600b828c 8092 if (op == NEON_2RM_VCLT0) {
dd8fbd78 8093 tcg_gen_not_i32(tmp, tmp);
600b828c 8094 }
9ee6e8bb 8095 break;
600b828c 8096 case NEON_2RM_VCEQ0:
dd8fbd78 8097 tmp2 = tcg_const_i32(0);
9ee6e8bb 8098 switch(size) {
dd8fbd78
FN
8099 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
8100 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
8101 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 8102 default: abort();
9ee6e8bb 8103 }
39d5492a 8104 tcg_temp_free_i32(tmp2);
9ee6e8bb 8105 break;
600b828c 8106 case NEON_2RM_VABS:
9ee6e8bb 8107 switch(size) {
dd8fbd78
FN
8108 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
8109 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
8110 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 8111 default: abort();
9ee6e8bb
PB
8112 }
8113 break;
600b828c 8114 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
8115 {
8116 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 8117 tmp2 = tcg_const_i32(0);
aa47cfdd 8118 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 8119 tcg_temp_free_i32(tmp2);
aa47cfdd 8120 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8121 break;
aa47cfdd 8122 }
600b828c 8123 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
8124 {
8125 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 8126 tmp2 = tcg_const_i32(0);
aa47cfdd 8127 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 8128 tcg_temp_free_i32(tmp2);
aa47cfdd 8129 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8130 break;
aa47cfdd 8131 }
600b828c 8132 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
8133 {
8134 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 8135 tmp2 = tcg_const_i32(0);
aa47cfdd 8136 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 8137 tcg_temp_free_i32(tmp2);
aa47cfdd 8138 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8139 break;
aa47cfdd 8140 }
600b828c 8141 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
8142 {
8143 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 8144 tmp2 = tcg_const_i32(0);
aa47cfdd 8145 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 8146 tcg_temp_free_i32(tmp2);
aa47cfdd 8147 tcg_temp_free_ptr(fpstatus);
0e326109 8148 break;
aa47cfdd 8149 }
600b828c 8150 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
8151 {
8152 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 8153 tmp2 = tcg_const_i32(0);
aa47cfdd 8154 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 8155 tcg_temp_free_i32(tmp2);
aa47cfdd 8156 tcg_temp_free_ptr(fpstatus);
0e326109 8157 break;
aa47cfdd 8158 }
600b828c 8159 case NEON_2RM_VABS_F:
4373f3ce 8160 gen_vfp_abs(0);
9ee6e8bb 8161 break;
600b828c 8162 case NEON_2RM_VNEG_F:
4373f3ce 8163 gen_vfp_neg(0);
9ee6e8bb 8164 break;
600b828c 8165 case NEON_2RM_VSWP:
dd8fbd78
FN
8166 tmp2 = neon_load_reg(rd, pass);
8167 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 8168 break;
600b828c 8169 case NEON_2RM_VTRN:
dd8fbd78 8170 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 8171 switch (size) {
dd8fbd78
FN
8172 case 0: gen_neon_trn_u8(tmp, tmp2); break;
8173 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 8174 default: abort();
9ee6e8bb 8175 }
dd8fbd78 8176 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 8177 break;
34f7b0a2
WN
8178 case NEON_2RM_VRINTN:
8179 case NEON_2RM_VRINTA:
8180 case NEON_2RM_VRINTM:
8181 case NEON_2RM_VRINTP:
8182 case NEON_2RM_VRINTZ:
8183 {
8184 TCGv_i32 tcg_rmode;
8185 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8186 int rmode;
8187
8188 if (op == NEON_2RM_VRINTZ) {
8189 rmode = FPROUNDING_ZERO;
8190 } else {
8191 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
8192 }
8193
8194 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
8195 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8196 cpu_env);
8197 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
8198 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8199 cpu_env);
8200 tcg_temp_free_ptr(fpstatus);
8201 tcg_temp_free_i32(tcg_rmode);
8202 break;
8203 }
2ce70625
WN
8204 case NEON_2RM_VRINTX:
8205 {
8206 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8207 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
8208 tcg_temp_free_ptr(fpstatus);
8209 break;
8210 }
901ad525
WN
8211 case NEON_2RM_VCVTAU:
8212 case NEON_2RM_VCVTAS:
8213 case NEON_2RM_VCVTNU:
8214 case NEON_2RM_VCVTNS:
8215 case NEON_2RM_VCVTPU:
8216 case NEON_2RM_VCVTPS:
8217 case NEON_2RM_VCVTMU:
8218 case NEON_2RM_VCVTMS:
8219 {
8220 bool is_signed = !extract32(insn, 7, 1);
8221 TCGv_ptr fpst = get_fpstatus_ptr(1);
8222 TCGv_i32 tcg_rmode, tcg_shift;
8223 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
8224
8225 tcg_shift = tcg_const_i32(0);
8226 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
8227 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8228 cpu_env);
8229
8230 if (is_signed) {
8231 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
8232 tcg_shift, fpst);
8233 } else {
8234 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
8235 tcg_shift, fpst);
8236 }
8237
8238 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8239 cpu_env);
8240 tcg_temp_free_i32(tcg_rmode);
8241 tcg_temp_free_i32(tcg_shift);
8242 tcg_temp_free_ptr(fpst);
8243 break;
8244 }
600b828c 8245 case NEON_2RM_VRECPE:
b6d4443a
AB
8246 {
8247 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8248 gen_helper_recpe_u32(tmp, tmp, fpstatus);
8249 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8250 break;
b6d4443a 8251 }
600b828c 8252 case NEON_2RM_VRSQRTE:
c2fb418e
AB
8253 {
8254 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8255 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
8256 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8257 break;
c2fb418e 8258 }
600b828c 8259 case NEON_2RM_VRECPE_F:
b6d4443a
AB
8260 {
8261 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8262 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
8263 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8264 break;
b6d4443a 8265 }
600b828c 8266 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
8267 {
8268 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8269 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
8270 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8271 break;
c2fb418e 8272 }
600b828c 8273 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 8274 gen_vfp_sito(0, 1);
9ee6e8bb 8275 break;
600b828c 8276 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 8277 gen_vfp_uito(0, 1);
9ee6e8bb 8278 break;
600b828c 8279 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 8280 gen_vfp_tosiz(0, 1);
9ee6e8bb 8281 break;
600b828c 8282 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 8283 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
8284 break;
8285 default:
600b828c
PM
8286 /* Reserved op values were caught by the
8287 * neon_2rm_sizes[] check earlier.
8288 */
8289 abort();
9ee6e8bb 8290 }
600b828c 8291 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
8292 tcg_gen_st_f32(cpu_F0s, cpu_env,
8293 neon_reg_offset(rd, pass));
9ee6e8bb 8294 } else {
dd8fbd78 8295 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
8296 }
8297 }
8298 break;
8299 }
8300 } else if ((insn & (1 << 10)) == 0) {
8301 /* VTBL, VTBX. */
56907d77
PM
8302 int n = ((insn >> 8) & 3) + 1;
8303 if ((rn + n) > 32) {
8304 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
8305 * helper function running off the end of the register file.
8306 */
8307 return 1;
8308 }
8309 n <<= 3;
9ee6e8bb 8310 if (insn & (1 << 6)) {
8f8e3aa4 8311 tmp = neon_load_reg(rd, 0);
9ee6e8bb 8312 } else {
7d1b0095 8313 tmp = tcg_temp_new_i32();
8f8e3aa4 8314 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 8315 }
8f8e3aa4 8316 tmp2 = neon_load_reg(rm, 0);
e7c06c4e 8317 ptr1 = vfp_reg_ptr(true, rn);
b75263d6 8318 tmp5 = tcg_const_i32(n);
e7c06c4e 8319 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
7d1b0095 8320 tcg_temp_free_i32(tmp);
9ee6e8bb 8321 if (insn & (1 << 6)) {
8f8e3aa4 8322 tmp = neon_load_reg(rd, 1);
9ee6e8bb 8323 } else {
7d1b0095 8324 tmp = tcg_temp_new_i32();
8f8e3aa4 8325 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 8326 }
8f8e3aa4 8327 tmp3 = neon_load_reg(rm, 1);
e7c06c4e 8328 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
25aeb69b 8329 tcg_temp_free_i32(tmp5);
e7c06c4e 8330 tcg_temp_free_ptr(ptr1);
8f8e3aa4 8331 neon_store_reg(rd, 0, tmp2);
3018f259 8332 neon_store_reg(rd, 1, tmp3);
7d1b0095 8333 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8334 } else if ((insn & 0x380) == 0) {
8335 /* VDUP */
32f91fb7
RH
8336 int element;
8337 TCGMemOp size;
8338
133da6aa
JR
8339 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
8340 return 1;
8341 }
9ee6e8bb 8342 if (insn & (1 << 16)) {
32f91fb7
RH
8343 size = MO_8;
8344 element = (insn >> 17) & 7;
9ee6e8bb 8345 } else if (insn & (1 << 17)) {
32f91fb7
RH
8346 size = MO_16;
8347 element = (insn >> 18) & 3;
8348 } else {
8349 size = MO_32;
8350 element = (insn >> 19) & 1;
9ee6e8bb 8351 }
32f91fb7
RH
8352 tcg_gen_gvec_dup_mem(size, neon_reg_offset(rd, 0),
8353 neon_element_offset(rm, element, size),
8354 q ? 16 : 8, q ? 16 : 8);
9ee6e8bb
PB
8355 } else {
8356 return 1;
8357 }
8358 }
8359 }
8360 return 0;
8361}
8362
8b7209fa
RH
8363/* Advanced SIMD three registers of the same length extension.
8364 * 31 25 23 22 20 16 12 11 10 9 8 3 0
8365 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
8366 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
8367 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
8368 */
8369static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
8370{
26c470a7
RH
8371 gen_helper_gvec_3 *fn_gvec = NULL;
8372 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
8373 int rd, rn, rm, opr_sz;
8374 int data = 0;
8b7209fa
RH
8375 bool q;
8376
8377 q = extract32(insn, 6, 1);
8378 VFP_DREG_D(rd, insn);
8379 VFP_DREG_N(rn, insn);
8380 VFP_DREG_M(rm, insn);
8381 if ((rd | rn | rm) & q) {
8382 return 1;
8383 }
8384
8385 if ((insn & 0xfe200f10) == 0xfc200800) {
8386 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
26c470a7
RH
8387 int size = extract32(insn, 20, 1);
8388 data = extract32(insn, 23, 2); /* rot */
962fcbf2 8389 if (!dc_isar_feature(aa32_vcma, s)
5763190f 8390 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8b7209fa
RH
8391 return 1;
8392 }
8393 fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
8394 } else if ((insn & 0xfea00f10) == 0xfc800800) {
8395 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
26c470a7
RH
8396 int size = extract32(insn, 20, 1);
8397 data = extract32(insn, 24, 1); /* rot */
962fcbf2 8398 if (!dc_isar_feature(aa32_vcma, s)
5763190f 8399 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8b7209fa
RH
8400 return 1;
8401 }
8402 fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
26c470a7
RH
8403 } else if ((insn & 0xfeb00f00) == 0xfc200d00) {
8404 /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
8405 bool u = extract32(insn, 4, 1);
962fcbf2 8406 if (!dc_isar_feature(aa32_dp, s)) {
26c470a7
RH
8407 return 1;
8408 }
8409 fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
8b7209fa
RH
8410 } else {
8411 return 1;
8412 }
8413
8414 if (s->fp_excp_el) {
8415 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 8416 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
8b7209fa
RH
8417 return 0;
8418 }
8419 if (!s->vfp_enabled) {
8420 return 1;
8421 }
8422
8423 opr_sz = (1 + q) * 8;
26c470a7
RH
8424 if (fn_gvec_ptr) {
8425 TCGv_ptr fpst = get_fpstatus_ptr(1);
8426 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
8427 vfp_reg_offset(1, rn),
8428 vfp_reg_offset(1, rm), fpst,
8429 opr_sz, opr_sz, data, fn_gvec_ptr);
8430 tcg_temp_free_ptr(fpst);
8431 } else {
8432 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd),
8433 vfp_reg_offset(1, rn),
8434 vfp_reg_offset(1, rm),
8435 opr_sz, opr_sz, data, fn_gvec);
8436 }
8b7209fa
RH
8437 return 0;
8438}
8439
638808ff
RH
8440/* Advanced SIMD two registers and a scalar extension.
8441 * 31 24 23 22 20 16 12 11 10 9 8 3 0
8442 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
8443 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
8444 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
8445 *
8446 */
8447
8448static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
8449{
26c470a7
RH
8450 gen_helper_gvec_3 *fn_gvec = NULL;
8451 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
2cc99919 8452 int rd, rn, rm, opr_sz, data;
638808ff
RH
8453 bool q;
8454
8455 q = extract32(insn, 6, 1);
8456 VFP_DREG_D(rd, insn);
8457 VFP_DREG_N(rn, insn);
638808ff
RH
8458 if ((rd | rn) & q) {
8459 return 1;
8460 }
8461
8462 if ((insn & 0xff000f10) == 0xfe000800) {
8463 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
2cc99919
RH
8464 int rot = extract32(insn, 20, 2);
8465 int size = extract32(insn, 23, 1);
8466 int index;
8467
962fcbf2 8468 if (!dc_isar_feature(aa32_vcma, s)) {
638808ff
RH
8469 return 1;
8470 }
2cc99919 8471 if (size == 0) {
5763190f 8472 if (!dc_isar_feature(aa32_fp16_arith, s)) {
2cc99919
RH
8473 return 1;
8474 }
8475 /* For fp16, rm is just Vm, and index is M. */
8476 rm = extract32(insn, 0, 4);
8477 index = extract32(insn, 5, 1);
8478 } else {
8479 /* For fp32, rm is the usual M:Vm, and index is 0. */
8480 VFP_DREG_M(rm, insn);
8481 index = 0;
8482 }
8483 data = (index << 2) | rot;
8484 fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx
8485 : gen_helper_gvec_fcmlah_idx);
26c470a7
RH
8486 } else if ((insn & 0xffb00f00) == 0xfe200d00) {
8487 /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
8488 int u = extract32(insn, 4, 1);
962fcbf2 8489 if (!dc_isar_feature(aa32_dp, s)) {
26c470a7
RH
8490 return 1;
8491 }
8492 fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
8493 /* rm is just Vm, and index is M. */
8494 data = extract32(insn, 5, 1); /* index */
8495 rm = extract32(insn, 0, 4);
638808ff
RH
8496 } else {
8497 return 1;
8498 }
8499
8500 if (s->fp_excp_el) {
8501 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 8502 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
638808ff
RH
8503 return 0;
8504 }
8505 if (!s->vfp_enabled) {
8506 return 1;
8507 }
8508
8509 opr_sz = (1 + q) * 8;
26c470a7
RH
8510 if (fn_gvec_ptr) {
8511 TCGv_ptr fpst = get_fpstatus_ptr(1);
8512 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
8513 vfp_reg_offset(1, rn),
8514 vfp_reg_offset(1, rm), fpst,
8515 opr_sz, opr_sz, data, fn_gvec_ptr);
8516 tcg_temp_free_ptr(fpst);
8517 } else {
8518 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd),
8519 vfp_reg_offset(1, rn),
8520 vfp_reg_offset(1, rm),
8521 opr_sz, opr_sz, data, fn_gvec);
8522 }
638808ff
RH
8523 return 0;
8524}
8525
7dcc1f89 8526static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 8527{
4b6a83fb
PM
8528 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
8529 const ARMCPRegInfo *ri;
9ee6e8bb
PB
8530
8531 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
8532
8533 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 8534 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
8535 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
8536 return 1;
8537 }
d614a513 8538 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 8539 return disas_iwmmxt_insn(s, insn);
d614a513 8540 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 8541 return disas_dsp_insn(s, insn);
c0f4af17
PM
8542 }
8543 return 1;
4b6a83fb
PM
8544 }
8545
8546 /* Otherwise treat as a generic register access */
8547 is64 = (insn & (1 << 25)) == 0;
8548 if (!is64 && ((insn & (1 << 4)) == 0)) {
8549 /* cdp */
8550 return 1;
8551 }
8552
8553 crm = insn & 0xf;
8554 if (is64) {
8555 crn = 0;
8556 opc1 = (insn >> 4) & 0xf;
8557 opc2 = 0;
8558 rt2 = (insn >> 16) & 0xf;
8559 } else {
8560 crn = (insn >> 16) & 0xf;
8561 opc1 = (insn >> 21) & 7;
8562 opc2 = (insn >> 5) & 7;
8563 rt2 = 0;
8564 }
8565 isread = (insn >> 20) & 1;
8566 rt = (insn >> 12) & 0xf;
8567
60322b39 8568 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 8569 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
8570 if (ri) {
8571 /* Check access permissions */
dcbff19b 8572 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
8573 return 1;
8574 }
8575
c0f4af17 8576 if (ri->accessfn ||
d614a513 8577 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
8578 /* Emit code to perform further access permissions checks at
8579 * runtime; this may result in an exception.
c0f4af17
PM
8580 * Note that on XScale all cp0..c13 registers do an access check
8581 * call in order to handle c15_cpar.
f59df3f2
PM
8582 */
8583 TCGv_ptr tmpptr;
3f208fd7 8584 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
8585 uint32_t syndrome;
8586
8587 /* Note that since we are an implementation which takes an
8588 * exception on a trapped conditional instruction only if the
8589 * instruction passes its condition code check, we can take
8590 * advantage of the clause in the ARM ARM that allows us to set
8591 * the COND field in the instruction to 0xE in all cases.
8592 * We could fish the actual condition out of the insn (ARM)
8593 * or the condexec bits (Thumb) but it isn't necessary.
8594 */
8595 switch (cpnum) {
8596 case 14:
8597 if (is64) {
8598 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 8599 isread, false);
8bcbf37c
PM
8600 } else {
8601 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 8602 rt, isread, false);
8bcbf37c
PM
8603 }
8604 break;
8605 case 15:
8606 if (is64) {
8607 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 8608 isread, false);
8bcbf37c
PM
8609 } else {
8610 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 8611 rt, isread, false);
8bcbf37c
PM
8612 }
8613 break;
8614 default:
8615 /* ARMv8 defines that only coprocessors 14 and 15 exist,
8616 * so this can only happen if this is an ARMv7 or earlier CPU,
8617 * in which case the syndrome information won't actually be
8618 * guest visible.
8619 */
d614a513 8620 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
8621 syndrome = syn_uncategorized();
8622 break;
8623 }
8624
43bfa4a1 8625 gen_set_condexec(s);
3977ee5d 8626 gen_set_pc_im(s, s->pc - 4);
f59df3f2 8627 tmpptr = tcg_const_ptr(ri);
8bcbf37c 8628 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
8629 tcg_isread = tcg_const_i32(isread);
8630 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
8631 tcg_isread);
f59df3f2 8632 tcg_temp_free_ptr(tmpptr);
8bcbf37c 8633 tcg_temp_free_i32(tcg_syn);
3f208fd7 8634 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
8635 }
8636
4b6a83fb
PM
8637 /* Handle special cases first */
8638 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
8639 case ARM_CP_NOP:
8640 return 0;
8641 case ARM_CP_WFI:
8642 if (isread) {
8643 return 1;
8644 }
eaed129d 8645 gen_set_pc_im(s, s->pc);
dcba3a8d 8646 s->base.is_jmp = DISAS_WFI;
2bee5105 8647 return 0;
4b6a83fb
PM
8648 default:
8649 break;
8650 }
8651
c5a49c63 8652 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
8653 gen_io_start();
8654 }
8655
4b6a83fb
PM
8656 if (isread) {
8657 /* Read */
8658 if (is64) {
8659 TCGv_i64 tmp64;
8660 TCGv_i32 tmp;
8661 if (ri->type & ARM_CP_CONST) {
8662 tmp64 = tcg_const_i64(ri->resetvalue);
8663 } else if (ri->readfn) {
8664 TCGv_ptr tmpptr;
4b6a83fb
PM
8665 tmp64 = tcg_temp_new_i64();
8666 tmpptr = tcg_const_ptr(ri);
8667 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
8668 tcg_temp_free_ptr(tmpptr);
8669 } else {
8670 tmp64 = tcg_temp_new_i64();
8671 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
8672 }
8673 tmp = tcg_temp_new_i32();
ecc7b3aa 8674 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
8675 store_reg(s, rt, tmp);
8676 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 8677 tmp = tcg_temp_new_i32();
ecc7b3aa 8678 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 8679 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
8680 store_reg(s, rt2, tmp);
8681 } else {
39d5492a 8682 TCGv_i32 tmp;
4b6a83fb
PM
8683 if (ri->type & ARM_CP_CONST) {
8684 tmp = tcg_const_i32(ri->resetvalue);
8685 } else if (ri->readfn) {
8686 TCGv_ptr tmpptr;
4b6a83fb
PM
8687 tmp = tcg_temp_new_i32();
8688 tmpptr = tcg_const_ptr(ri);
8689 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
8690 tcg_temp_free_ptr(tmpptr);
8691 } else {
8692 tmp = load_cpu_offset(ri->fieldoffset);
8693 }
8694 if (rt == 15) {
8695 /* Destination register of r15 for 32 bit loads sets
8696 * the condition codes from the high 4 bits of the value
8697 */
8698 gen_set_nzcv(tmp);
8699 tcg_temp_free_i32(tmp);
8700 } else {
8701 store_reg(s, rt, tmp);
8702 }
8703 }
8704 } else {
8705 /* Write */
8706 if (ri->type & ARM_CP_CONST) {
8707 /* If not forbidden by access permissions, treat as WI */
8708 return 0;
8709 }
8710
8711 if (is64) {
39d5492a 8712 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
8713 TCGv_i64 tmp64 = tcg_temp_new_i64();
8714 tmplo = load_reg(s, rt);
8715 tmphi = load_reg(s, rt2);
8716 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
8717 tcg_temp_free_i32(tmplo);
8718 tcg_temp_free_i32(tmphi);
8719 if (ri->writefn) {
8720 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
8721 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
8722 tcg_temp_free_ptr(tmpptr);
8723 } else {
8724 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
8725 }
8726 tcg_temp_free_i64(tmp64);
8727 } else {
8728 if (ri->writefn) {
39d5492a 8729 TCGv_i32 tmp;
4b6a83fb 8730 TCGv_ptr tmpptr;
4b6a83fb
PM
8731 tmp = load_reg(s, rt);
8732 tmpptr = tcg_const_ptr(ri);
8733 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
8734 tcg_temp_free_ptr(tmpptr);
8735 tcg_temp_free_i32(tmp);
8736 } else {
39d5492a 8737 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
8738 store_cpu_offset(tmp, ri->fieldoffset);
8739 }
8740 }
2452731c
PM
8741 }
8742
c5a49c63 8743 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
8744 /* I/O operations must end the TB here (whether read or write) */
8745 gen_io_end();
8746 gen_lookup_tb(s);
8747 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
8748 /* We default to ending the TB on a coprocessor register write,
8749 * but allow this to be suppressed by the register definition
8750 * (usually only necessary to work around guest bugs).
8751 */
2452731c 8752 gen_lookup_tb(s);
4b6a83fb 8753 }
2452731c 8754
4b6a83fb
PM
8755 return 0;
8756 }
8757
626187d8
PM
8758 /* Unknown register; this might be a guest error or a QEMU
8759 * unimplemented feature.
8760 */
8761 if (is64) {
8762 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
8763 "64 bit system register cp:%d opc1: %d crm:%d "
8764 "(%s)\n",
8765 isread ? "read" : "write", cpnum, opc1, crm,
8766 s->ns ? "non-secure" : "secure");
626187d8
PM
8767 } else {
8768 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
8769 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
8770 "(%s)\n",
8771 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
8772 s->ns ? "non-secure" : "secure");
626187d8
PM
8773 }
8774
4a9a539f 8775 return 1;
9ee6e8bb
PB
8776}
8777
5e3f878a
PB
8778
8779/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 8780static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 8781{
39d5492a 8782 TCGv_i32 tmp;
7d1b0095 8783 tmp = tcg_temp_new_i32();
ecc7b3aa 8784 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 8785 store_reg(s, rlow, tmp);
7d1b0095 8786 tmp = tcg_temp_new_i32();
5e3f878a 8787 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 8788 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
8789 store_reg(s, rhigh, tmp);
8790}
8791
8792/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 8793static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 8794{
a7812ae4 8795 TCGv_i64 tmp;
39d5492a 8796 TCGv_i32 tmp2;
5e3f878a 8797
36aa55dc 8798 /* Load value and extend to 64 bits. */
a7812ae4 8799 tmp = tcg_temp_new_i64();
5e3f878a
PB
8800 tmp2 = load_reg(s, rlow);
8801 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 8802 tcg_temp_free_i32(tmp2);
5e3f878a 8803 tcg_gen_add_i64(val, val, tmp);
b75263d6 8804 tcg_temp_free_i64(tmp);
5e3f878a
PB
8805}
8806
8807/* load and add a 64-bit value from a register pair. */
a7812ae4 8808static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 8809{
a7812ae4 8810 TCGv_i64 tmp;
39d5492a
PM
8811 TCGv_i32 tmpl;
8812 TCGv_i32 tmph;
5e3f878a
PB
8813
8814 /* Load 64-bit value rd:rn. */
36aa55dc
PB
8815 tmpl = load_reg(s, rlow);
8816 tmph = load_reg(s, rhigh);
a7812ae4 8817 tmp = tcg_temp_new_i64();
36aa55dc 8818 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
8819 tcg_temp_free_i32(tmpl);
8820 tcg_temp_free_i32(tmph);
5e3f878a 8821 tcg_gen_add_i64(val, val, tmp);
b75263d6 8822 tcg_temp_free_i64(tmp);
5e3f878a
PB
8823}
8824
c9f10124 8825/* Set N and Z flags from hi|lo. */
39d5492a 8826static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 8827{
c9f10124
RH
8828 tcg_gen_mov_i32(cpu_NF, hi);
8829 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
8830}
8831
426f5abc
PB
8832/* Load/Store exclusive instructions are implemented by remembering
8833 the value/address loaded, and seeing if these are the same
354161b3 8834 when the store is performed. This should be sufficient to implement
426f5abc 8835 the architecturally mandated semantics, and avoids having to monitor
354161b3
EC
8836 regular stores. The compare vs the remembered value is done during
8837 the cmpxchg operation, but we must compare the addresses manually. */
426f5abc 8838static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 8839 TCGv_i32 addr, int size)
426f5abc 8840{
94ee24e7 8841 TCGv_i32 tmp = tcg_temp_new_i32();
354161b3 8842 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc 8843
50225ad0
PM
8844 s->is_ldex = true;
8845
426f5abc 8846 if (size == 3) {
39d5492a 8847 TCGv_i32 tmp2 = tcg_temp_new_i32();
354161b3 8848 TCGv_i64 t64 = tcg_temp_new_i64();
03d05e2d 8849
3448d47b
PM
8850 /* For AArch32, architecturally the 32-bit word at the lowest
8851 * address is always Rt and the one at addr+4 is Rt2, even if
8852 * the CPU is big-endian. That means we don't want to do a
8853 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
8854 * for an architecturally 64-bit access, but instead do a
8855 * 64-bit access using MO_BE if appropriate and then split
8856 * the two halves.
8857 * This only makes a difference for BE32 user-mode, where
8858 * frob64() must not flip the two halves of the 64-bit data
8859 * but this code must treat BE32 user-mode like BE32 system.
8860 */
8861 TCGv taddr = gen_aa32_addr(s, addr, opc);
8862
8863 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
8864 tcg_temp_free(taddr);
354161b3 8865 tcg_gen_mov_i64(cpu_exclusive_val, t64);
3448d47b
PM
8866 if (s->be_data == MO_BE) {
8867 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
8868 } else {
8869 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
8870 }
354161b3
EC
8871 tcg_temp_free_i64(t64);
8872
8873 store_reg(s, rt2, tmp2);
03d05e2d 8874 } else {
354161b3 8875 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
03d05e2d 8876 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 8877 }
03d05e2d
PM
8878
8879 store_reg(s, rt, tmp);
8880 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
8881}
8882
8883static void gen_clrex(DisasContext *s)
8884{
03d05e2d 8885 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
8886}
8887
426f5abc 8888static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 8889 TCGv_i32 addr, int size)
426f5abc 8890{
354161b3
EC
8891 TCGv_i32 t0, t1, t2;
8892 TCGv_i64 extaddr;
8893 TCGv taddr;
42a268c2
RH
8894 TCGLabel *done_label;
8895 TCGLabel *fail_label;
354161b3 8896 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc
PB
8897
8898 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
8899 [addr] = {Rt};
8900 {Rd} = 0;
8901 } else {
8902 {Rd} = 1;
8903 } */
8904 fail_label = gen_new_label();
8905 done_label = gen_new_label();
03d05e2d
PM
8906 extaddr = tcg_temp_new_i64();
8907 tcg_gen_extu_i32_i64(extaddr, addr);
8908 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
8909 tcg_temp_free_i64(extaddr);
8910
354161b3
EC
8911 taddr = gen_aa32_addr(s, addr, opc);
8912 t0 = tcg_temp_new_i32();
8913 t1 = load_reg(s, rt);
426f5abc 8914 if (size == 3) {
354161b3
EC
8915 TCGv_i64 o64 = tcg_temp_new_i64();
8916 TCGv_i64 n64 = tcg_temp_new_i64();
03d05e2d 8917
354161b3 8918 t2 = load_reg(s, rt2);
3448d47b
PM
8919 /* For AArch32, architecturally the 32-bit word at the lowest
8920 * address is always Rt and the one at addr+4 is Rt2, even if
8921 * the CPU is big-endian. Since we're going to treat this as a
8922 * single 64-bit BE store, we need to put the two halves in the
8923 * opposite order for BE to LE, so that they end up in the right
8924 * places.
8925 * We don't want gen_aa32_frob64() because that does the wrong
8926 * thing for BE32 usermode.
8927 */
8928 if (s->be_data == MO_BE) {
8929 tcg_gen_concat_i32_i64(n64, t2, t1);
8930 } else {
8931 tcg_gen_concat_i32_i64(n64, t1, t2);
8932 }
354161b3 8933 tcg_temp_free_i32(t2);
03d05e2d 8934
354161b3
EC
8935 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
8936 get_mem_index(s), opc);
8937 tcg_temp_free_i64(n64);
8938
354161b3
EC
8939 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
8940 tcg_gen_extrl_i64_i32(t0, o64);
8941
8942 tcg_temp_free_i64(o64);
8943 } else {
8944 t2 = tcg_temp_new_i32();
8945 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
8946 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
8947 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
8948 tcg_temp_free_i32(t2);
426f5abc 8949 }
354161b3
EC
8950 tcg_temp_free_i32(t1);
8951 tcg_temp_free(taddr);
8952 tcg_gen_mov_i32(cpu_R[rd], t0);
8953 tcg_temp_free_i32(t0);
426f5abc 8954 tcg_gen_br(done_label);
354161b3 8955
426f5abc
PB
8956 gen_set_label(fail_label);
8957 tcg_gen_movi_i32(cpu_R[rd], 1);
8958 gen_set_label(done_label);
03d05e2d 8959 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc 8960}
426f5abc 8961
81465888
PM
8962/* gen_srs:
8963 * @env: CPUARMState
8964 * @s: DisasContext
8965 * @mode: mode field from insn (which stack to store to)
8966 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
8967 * @writeback: true if writeback bit set
8968 *
8969 * Generate code for the SRS (Store Return State) insn.
8970 */
8971static void gen_srs(DisasContext *s,
8972 uint32_t mode, uint32_t amode, bool writeback)
8973{
8974 int32_t offset;
cbc0326b
PM
8975 TCGv_i32 addr, tmp;
8976 bool undef = false;
8977
8978 /* SRS is:
8979 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 8980 * and specified mode is monitor mode
cbc0326b
PM
8981 * - UNDEFINED in Hyp mode
8982 * - UNPREDICTABLE in User or System mode
8983 * - UNPREDICTABLE if the specified mode is:
8984 * -- not implemented
8985 * -- not a valid mode number
8986 * -- a mode that's at a higher exception level
8987 * -- Monitor, if we are Non-secure
f01377f5 8988 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 8989 */
ba63cf47 8990 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
8991 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
8992 return;
8993 }
8994
8995 if (s->current_el == 0 || s->current_el == 2) {
8996 undef = true;
8997 }
8998
8999 switch (mode) {
9000 case ARM_CPU_MODE_USR:
9001 case ARM_CPU_MODE_FIQ:
9002 case ARM_CPU_MODE_IRQ:
9003 case ARM_CPU_MODE_SVC:
9004 case ARM_CPU_MODE_ABT:
9005 case ARM_CPU_MODE_UND:
9006 case ARM_CPU_MODE_SYS:
9007 break;
9008 case ARM_CPU_MODE_HYP:
9009 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
9010 undef = true;
9011 }
9012 break;
9013 case ARM_CPU_MODE_MON:
9014 /* No need to check specifically for "are we non-secure" because
9015 * we've already made EL0 UNDEF and handled the trap for S-EL1;
9016 * so if this isn't EL3 then we must be non-secure.
9017 */
9018 if (s->current_el != 3) {
9019 undef = true;
9020 }
9021 break;
9022 default:
9023 undef = true;
9024 }
9025
9026 if (undef) {
9027 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9028 default_exception_el(s));
9029 return;
9030 }
9031
9032 addr = tcg_temp_new_i32();
9033 tmp = tcg_const_i32(mode);
f01377f5
PM
9034 /* get_r13_banked() will raise an exception if called from System mode */
9035 gen_set_condexec(s);
9036 gen_set_pc_im(s, s->pc - 4);
81465888
PM
9037 gen_helper_get_r13_banked(addr, cpu_env, tmp);
9038 tcg_temp_free_i32(tmp);
9039 switch (amode) {
9040 case 0: /* DA */
9041 offset = -4;
9042 break;
9043 case 1: /* IA */
9044 offset = 0;
9045 break;
9046 case 2: /* DB */
9047 offset = -8;
9048 break;
9049 case 3: /* IB */
9050 offset = 4;
9051 break;
9052 default:
9053 abort();
9054 }
9055 tcg_gen_addi_i32(addr, addr, offset);
9056 tmp = load_reg(s, 14);
12dcc321 9057 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9058 tcg_temp_free_i32(tmp);
81465888
PM
9059 tmp = load_cpu_field(spsr);
9060 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 9061 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9062 tcg_temp_free_i32(tmp);
81465888
PM
9063 if (writeback) {
9064 switch (amode) {
9065 case 0:
9066 offset = -8;
9067 break;
9068 case 1:
9069 offset = 4;
9070 break;
9071 case 2:
9072 offset = -4;
9073 break;
9074 case 3:
9075 offset = 0;
9076 break;
9077 default:
9078 abort();
9079 }
9080 tcg_gen_addi_i32(addr, addr, offset);
9081 tmp = tcg_const_i32(mode);
9082 gen_helper_set_r13_banked(cpu_env, tmp, addr);
9083 tcg_temp_free_i32(tmp);
9084 }
9085 tcg_temp_free_i32(addr);
dcba3a8d 9086 s->base.is_jmp = DISAS_UPDATE;
81465888
PM
9087}
9088
c2d9644e
RK
9089/* Generate a label used for skipping this instruction */
9090static void arm_gen_condlabel(DisasContext *s)
9091{
9092 if (!s->condjmp) {
9093 s->condlabel = gen_new_label();
9094 s->condjmp = 1;
9095 }
9096}
9097
9098/* Skip this instruction if the ARM condition is false */
9099static void arm_skip_unless(DisasContext *s, uint32_t cond)
9100{
9101 arm_gen_condlabel(s);
9102 arm_gen_test_cc(cond ^ 1, s->condlabel);
9103}
9104
f4df2210 9105static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 9106{
f4df2210 9107 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
9108 TCGv_i32 tmp;
9109 TCGv_i32 tmp2;
9110 TCGv_i32 tmp3;
9111 TCGv_i32 addr;
a7812ae4 9112 TCGv_i64 tmp64;
9ee6e8bb 9113
e13886e3
PM
9114 /* M variants do not implement ARM mode; this must raise the INVSTATE
9115 * UsageFault exception.
9116 */
b53d8923 9117 if (arm_dc_feature(s, ARM_FEATURE_M)) {
e13886e3
PM
9118 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
9119 default_exception_el(s));
9120 return;
b53d8923 9121 }
9ee6e8bb
PB
9122 cond = insn >> 28;
9123 if (cond == 0xf){
be5e7a76
DES
9124 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
9125 * choose to UNDEF. In ARMv5 and above the space is used
9126 * for miscellaneous unconditional instructions.
9127 */
9128 ARCH(5);
9129
9ee6e8bb
PB
9130 /* Unconditional instructions. */
9131 if (((insn >> 25) & 7) == 1) {
9132 /* NEON Data processing. */
d614a513 9133 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 9134 goto illegal_op;
d614a513 9135 }
9ee6e8bb 9136
7dcc1f89 9137 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 9138 goto illegal_op;
7dcc1f89 9139 }
9ee6e8bb
PB
9140 return;
9141 }
9142 if ((insn & 0x0f100000) == 0x04000000) {
9143 /* NEON load/store. */
d614a513 9144 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 9145 goto illegal_op;
d614a513 9146 }
9ee6e8bb 9147
7dcc1f89 9148 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 9149 goto illegal_op;
7dcc1f89 9150 }
9ee6e8bb
PB
9151 return;
9152 }
6a57f3eb
WN
9153 if ((insn & 0x0f000e10) == 0x0e000a00) {
9154 /* VFP. */
7dcc1f89 9155 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9156 goto illegal_op;
9157 }
9158 return;
9159 }
3d185e5d
PM
9160 if (((insn & 0x0f30f000) == 0x0510f000) ||
9161 ((insn & 0x0f30f010) == 0x0710f000)) {
9162 if ((insn & (1 << 22)) == 0) {
9163 /* PLDW; v7MP */
d614a513 9164 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
9165 goto illegal_op;
9166 }
9167 }
9168 /* Otherwise PLD; v5TE+ */
be5e7a76 9169 ARCH(5TE);
3d185e5d
PM
9170 return;
9171 }
9172 if (((insn & 0x0f70f000) == 0x0450f000) ||
9173 ((insn & 0x0f70f010) == 0x0650f000)) {
9174 ARCH(7);
9175 return; /* PLI; V7 */
9176 }
9177 if (((insn & 0x0f700000) == 0x04100000) ||
9178 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 9179 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
9180 goto illegal_op;
9181 }
9182 return; /* v7MP: Unallocated memory hint: must NOP */
9183 }
9184
9185 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
9186 ARCH(6);
9187 /* setend */
9886ecdf
PB
9188 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
9189 gen_helper_setend(cpu_env);
dcba3a8d 9190 s->base.is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
9191 }
9192 return;
9193 } else if ((insn & 0x0fffff00) == 0x057ff000) {
9194 switch ((insn >> 4) & 0xf) {
9195 case 1: /* clrex */
9196 ARCH(6K);
426f5abc 9197 gen_clrex(s);
9ee6e8bb
PB
9198 return;
9199 case 4: /* dsb */
9200 case 5: /* dmb */
9ee6e8bb 9201 ARCH(7);
61e4c432 9202 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 9203 return;
6df99dec
SS
9204 case 6: /* isb */
9205 /* We need to break the TB after this insn to execute
9206 * self-modifying code correctly and also to take
9207 * any pending interrupts immediately.
9208 */
0b609cc1 9209 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 9210 return;
9ee6e8bb
PB
9211 default:
9212 goto illegal_op;
9213 }
9214 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
9215 /* srs */
81465888
PM
9216 ARCH(6);
9217 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 9218 return;
ea825eee 9219 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 9220 /* rfe */
c67b6b71 9221 int32_t offset;
9ee6e8bb
PB
9222 if (IS_USER(s))
9223 goto illegal_op;
9224 ARCH(6);
9225 rn = (insn >> 16) & 0xf;
b0109805 9226 addr = load_reg(s, rn);
9ee6e8bb
PB
9227 i = (insn >> 23) & 3;
9228 switch (i) {
b0109805 9229 case 0: offset = -4; break; /* DA */
c67b6b71
FN
9230 case 1: offset = 0; break; /* IA */
9231 case 2: offset = -8; break; /* DB */
b0109805 9232 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
9233 default: abort();
9234 }
9235 if (offset)
b0109805
PB
9236 tcg_gen_addi_i32(addr, addr, offset);
9237 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 9238 tmp = tcg_temp_new_i32();
12dcc321 9239 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9240 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 9241 tmp2 = tcg_temp_new_i32();
12dcc321 9242 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9243 if (insn & (1 << 21)) {
9244 /* Base writeback. */
9245 switch (i) {
b0109805 9246 case 0: offset = -8; break;
c67b6b71
FN
9247 case 1: offset = 4; break;
9248 case 2: offset = -4; break;
b0109805 9249 case 3: offset = 0; break;
9ee6e8bb
PB
9250 default: abort();
9251 }
9252 if (offset)
b0109805
PB
9253 tcg_gen_addi_i32(addr, addr, offset);
9254 store_reg(s, rn, addr);
9255 } else {
7d1b0095 9256 tcg_temp_free_i32(addr);
9ee6e8bb 9257 }
b0109805 9258 gen_rfe(s, tmp, tmp2);
c67b6b71 9259 return;
9ee6e8bb
PB
9260 } else if ((insn & 0x0e000000) == 0x0a000000) {
9261 /* branch link and change to thumb (blx <offset>) */
9262 int32_t offset;
9263
9264 val = (uint32_t)s->pc;
7d1b0095 9265 tmp = tcg_temp_new_i32();
d9ba4830
PB
9266 tcg_gen_movi_i32(tmp, val);
9267 store_reg(s, 14, tmp);
9ee6e8bb
PB
9268 /* Sign-extend the 24-bit offset */
9269 offset = (((int32_t)insn) << 8) >> 8;
9270 /* offset * 4 + bit24 * 2 + (thumb bit) */
9271 val += (offset << 2) | ((insn >> 23) & 2) | 1;
9272 /* pipeline offset */
9273 val += 4;
be5e7a76 9274 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 9275 gen_bx_im(s, val);
9ee6e8bb
PB
9276 return;
9277 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 9278 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 9279 /* iWMMXt register transfer. */
c0f4af17 9280 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 9281 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 9282 return;
c0f4af17
PM
9283 }
9284 }
9ee6e8bb 9285 }
8b7209fa
RH
9286 } else if ((insn & 0x0e000a00) == 0x0c000800
9287 && arm_dc_feature(s, ARM_FEATURE_V8)) {
9288 if (disas_neon_insn_3same_ext(s, insn)) {
9289 goto illegal_op;
9290 }
9291 return;
638808ff
RH
9292 } else if ((insn & 0x0f000a00) == 0x0e000800
9293 && arm_dc_feature(s, ARM_FEATURE_V8)) {
9294 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
9295 goto illegal_op;
9296 }
9297 return;
9ee6e8bb
PB
9298 } else if ((insn & 0x0fe00000) == 0x0c400000) {
9299 /* Coprocessor double register transfer. */
be5e7a76 9300 ARCH(5TE);
9ee6e8bb
PB
9301 } else if ((insn & 0x0f000010) == 0x0e000010) {
9302 /* Additional coprocessor register transfer. */
7997d92f 9303 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
9304 uint32_t mask;
9305 uint32_t val;
9306 /* cps (privileged) */
9307 if (IS_USER(s))
9308 return;
9309 mask = val = 0;
9310 if (insn & (1 << 19)) {
9311 if (insn & (1 << 8))
9312 mask |= CPSR_A;
9313 if (insn & (1 << 7))
9314 mask |= CPSR_I;
9315 if (insn & (1 << 6))
9316 mask |= CPSR_F;
9317 if (insn & (1 << 18))
9318 val |= mask;
9319 }
7997d92f 9320 if (insn & (1 << 17)) {
9ee6e8bb
PB
9321 mask |= CPSR_M;
9322 val |= (insn & 0x1f);
9323 }
9324 if (mask) {
2fbac54b 9325 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
9326 }
9327 return;
9328 }
9329 goto illegal_op;
9330 }
9331 if (cond != 0xe) {
9332 /* if not always execute, we generate a conditional jump to
9333 next instruction */
c2d9644e 9334 arm_skip_unless(s, cond);
9ee6e8bb
PB
9335 }
9336 if ((insn & 0x0f900000) == 0x03000000) {
9337 if ((insn & (1 << 21)) == 0) {
9338 ARCH(6T2);
9339 rd = (insn >> 12) & 0xf;
9340 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
9341 if ((insn & (1 << 22)) == 0) {
9342 /* MOVW */
7d1b0095 9343 tmp = tcg_temp_new_i32();
5e3f878a 9344 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
9345 } else {
9346 /* MOVT */
5e3f878a 9347 tmp = load_reg(s, rd);
86831435 9348 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 9349 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 9350 }
5e3f878a 9351 store_reg(s, rd, tmp);
9ee6e8bb
PB
9352 } else {
9353 if (((insn >> 12) & 0xf) != 0xf)
9354 goto illegal_op;
9355 if (((insn >> 16) & 0xf) == 0) {
9356 gen_nop_hint(s, insn & 0xff);
9357 } else {
9358 /* CPSR = immediate */
9359 val = insn & 0xff;
9360 shift = ((insn >> 8) & 0xf) * 2;
9361 if (shift)
9362 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 9363 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
9364 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
9365 i, val)) {
9ee6e8bb 9366 goto illegal_op;
7dcc1f89 9367 }
9ee6e8bb
PB
9368 }
9369 }
9370 } else if ((insn & 0x0f900000) == 0x01000000
9371 && (insn & 0x00000090) != 0x00000090) {
9372 /* miscellaneous instructions */
9373 op1 = (insn >> 21) & 3;
9374 sh = (insn >> 4) & 0xf;
9375 rm = insn & 0xf;
9376 switch (sh) {
8bfd0550
PM
9377 case 0x0: /* MSR, MRS */
9378 if (insn & (1 << 9)) {
9379 /* MSR (banked) and MRS (banked) */
9380 int sysm = extract32(insn, 16, 4) |
9381 (extract32(insn, 8, 1) << 4);
9382 int r = extract32(insn, 22, 1);
9383
9384 if (op1 & 1) {
9385 /* MSR (banked) */
9386 gen_msr_banked(s, r, sysm, rm);
9387 } else {
9388 /* MRS (banked) */
9389 int rd = extract32(insn, 12, 4);
9390
9391 gen_mrs_banked(s, r, sysm, rd);
9392 }
9393 break;
9394 }
9395
9396 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
9397 if (op1 & 1) {
9398 /* PSR = reg */
2fbac54b 9399 tmp = load_reg(s, rm);
9ee6e8bb 9400 i = ((op1 & 2) != 0);
7dcc1f89 9401 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
9402 goto illegal_op;
9403 } else {
9404 /* reg = PSR */
9405 rd = (insn >> 12) & 0xf;
9406 if (op1 & 2) {
9407 if (IS_USER(s))
9408 goto illegal_op;
d9ba4830 9409 tmp = load_cpu_field(spsr);
9ee6e8bb 9410 } else {
7d1b0095 9411 tmp = tcg_temp_new_i32();
9ef39277 9412 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 9413 }
d9ba4830 9414 store_reg(s, rd, tmp);
9ee6e8bb
PB
9415 }
9416 break;
9417 case 0x1:
9418 if (op1 == 1) {
9419 /* branch/exchange thumb (bx). */
be5e7a76 9420 ARCH(4T);
d9ba4830
PB
9421 tmp = load_reg(s, rm);
9422 gen_bx(s, tmp);
9ee6e8bb
PB
9423 } else if (op1 == 3) {
9424 /* clz */
be5e7a76 9425 ARCH(5);
9ee6e8bb 9426 rd = (insn >> 12) & 0xf;
1497c961 9427 tmp = load_reg(s, rm);
7539a012 9428 tcg_gen_clzi_i32(tmp, tmp, 32);
1497c961 9429 store_reg(s, rd, tmp);
9ee6e8bb
PB
9430 } else {
9431 goto illegal_op;
9432 }
9433 break;
9434 case 0x2:
9435 if (op1 == 1) {
9436 ARCH(5J); /* bxj */
9437 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
9438 tmp = load_reg(s, rm);
9439 gen_bx(s, tmp);
9ee6e8bb
PB
9440 } else {
9441 goto illegal_op;
9442 }
9443 break;
9444 case 0x3:
9445 if (op1 != 1)
9446 goto illegal_op;
9447
be5e7a76 9448 ARCH(5);
9ee6e8bb 9449 /* branch link/exchange thumb (blx) */
d9ba4830 9450 tmp = load_reg(s, rm);
7d1b0095 9451 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
9452 tcg_gen_movi_i32(tmp2, s->pc);
9453 store_reg(s, 14, tmp2);
9454 gen_bx(s, tmp);
9ee6e8bb 9455 break;
eb0ecd5a
WN
9456 case 0x4:
9457 {
9458 /* crc32/crc32c */
9459 uint32_t c = extract32(insn, 8, 4);
9460
9461 /* Check this CPU supports ARMv8 CRC instructions.
9462 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
9463 * Bits 8, 10 and 11 should be zero.
9464 */
962fcbf2 9465 if (!dc_isar_feature(aa32_crc32, s) || op1 == 0x3 || (c & 0xd) != 0) {
eb0ecd5a
WN
9466 goto illegal_op;
9467 }
9468
9469 rn = extract32(insn, 16, 4);
9470 rd = extract32(insn, 12, 4);
9471
9472 tmp = load_reg(s, rn);
9473 tmp2 = load_reg(s, rm);
aa633469
PM
9474 if (op1 == 0) {
9475 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
9476 } else if (op1 == 1) {
9477 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
9478 }
eb0ecd5a
WN
9479 tmp3 = tcg_const_i32(1 << op1);
9480 if (c & 0x2) {
9481 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
9482 } else {
9483 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
9484 }
9485 tcg_temp_free_i32(tmp2);
9486 tcg_temp_free_i32(tmp3);
9487 store_reg(s, rd, tmp);
9488 break;
9489 }
9ee6e8bb 9490 case 0x5: /* saturating add/subtract */
be5e7a76 9491 ARCH(5TE);
9ee6e8bb
PB
9492 rd = (insn >> 12) & 0xf;
9493 rn = (insn >> 16) & 0xf;
b40d0353 9494 tmp = load_reg(s, rm);
5e3f878a 9495 tmp2 = load_reg(s, rn);
9ee6e8bb 9496 if (op1 & 2)
9ef39277 9497 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 9498 if (op1 & 1)
9ef39277 9499 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9500 else
9ef39277 9501 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 9502 tcg_temp_free_i32(tmp2);
5e3f878a 9503 store_reg(s, rd, tmp);
9ee6e8bb 9504 break;
55c544ed
PM
9505 case 0x6: /* ERET */
9506 if (op1 != 3) {
9507 goto illegal_op;
9508 }
9509 if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
9510 goto illegal_op;
9511 }
9512 if ((insn & 0x000fff0f) != 0x0000000e) {
9513 /* UNPREDICTABLE; we choose to UNDEF */
9514 goto illegal_op;
9515 }
9516
9517 if (s->current_el == 2) {
9518 tmp = load_cpu_field(elr_el[2]);
9519 } else {
9520 tmp = load_reg(s, 14);
9521 }
9522 gen_exception_return(s, tmp);
9523 break;
49e14940 9524 case 7:
d4a2dc67
PM
9525 {
9526 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e 9527 switch (op1) {
19a6e31c
PM
9528 case 0:
9529 /* HLT */
9530 gen_hlt(s, imm16);
9531 break;
37e6456e
PM
9532 case 1:
9533 /* bkpt */
9534 ARCH(5);
c900a2e6 9535 gen_exception_bkpt_insn(s, 4, syn_aa32_bkpt(imm16, false));
37e6456e
PM
9536 break;
9537 case 2:
9538 /* Hypervisor call (v7) */
9539 ARCH(7);
9540 if (IS_USER(s)) {
9541 goto illegal_op;
9542 }
9543 gen_hvc(s, imm16);
9544 break;
9545 case 3:
9546 /* Secure monitor call (v6+) */
9547 ARCH(6K);
9548 if (IS_USER(s)) {
9549 goto illegal_op;
9550 }
9551 gen_smc(s);
9552 break;
9553 default:
19a6e31c 9554 g_assert_not_reached();
49e14940 9555 }
9ee6e8bb 9556 break;
d4a2dc67 9557 }
9ee6e8bb
PB
9558 case 0x8: /* signed multiply */
9559 case 0xa:
9560 case 0xc:
9561 case 0xe:
be5e7a76 9562 ARCH(5TE);
9ee6e8bb
PB
9563 rs = (insn >> 8) & 0xf;
9564 rn = (insn >> 12) & 0xf;
9565 rd = (insn >> 16) & 0xf;
9566 if (op1 == 1) {
9567 /* (32 * 16) >> 16 */
5e3f878a
PB
9568 tmp = load_reg(s, rm);
9569 tmp2 = load_reg(s, rs);
9ee6e8bb 9570 if (sh & 4)
5e3f878a 9571 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 9572 else
5e3f878a 9573 gen_sxth(tmp2);
a7812ae4
PB
9574 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9575 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 9576 tmp = tcg_temp_new_i32();
ecc7b3aa 9577 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 9578 tcg_temp_free_i64(tmp64);
9ee6e8bb 9579 if ((sh & 2) == 0) {
5e3f878a 9580 tmp2 = load_reg(s, rn);
9ef39277 9581 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9582 tcg_temp_free_i32(tmp2);
9ee6e8bb 9583 }
5e3f878a 9584 store_reg(s, rd, tmp);
9ee6e8bb
PB
9585 } else {
9586 /* 16 * 16 */
5e3f878a
PB
9587 tmp = load_reg(s, rm);
9588 tmp2 = load_reg(s, rs);
9589 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 9590 tcg_temp_free_i32(tmp2);
9ee6e8bb 9591 if (op1 == 2) {
a7812ae4
PB
9592 tmp64 = tcg_temp_new_i64();
9593 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9594 tcg_temp_free_i32(tmp);
a7812ae4
PB
9595 gen_addq(s, tmp64, rn, rd);
9596 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 9597 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9598 } else {
9599 if (op1 == 0) {
5e3f878a 9600 tmp2 = load_reg(s, rn);
9ef39277 9601 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9602 tcg_temp_free_i32(tmp2);
9ee6e8bb 9603 }
5e3f878a 9604 store_reg(s, rd, tmp);
9ee6e8bb
PB
9605 }
9606 }
9607 break;
9608 default:
9609 goto illegal_op;
9610 }
9611 } else if (((insn & 0x0e000000) == 0 &&
9612 (insn & 0x00000090) != 0x90) ||
9613 ((insn & 0x0e000000) == (1 << 25))) {
9614 int set_cc, logic_cc, shiftop;
9615
9616 op1 = (insn >> 21) & 0xf;
9617 set_cc = (insn >> 20) & 1;
9618 logic_cc = table_logic_cc[op1] & set_cc;
9619
9620 /* data processing instruction */
9621 if (insn & (1 << 25)) {
9622 /* immediate operand */
9623 val = insn & 0xff;
9624 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 9625 if (shift) {
9ee6e8bb 9626 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 9627 }
7d1b0095 9628 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
9629 tcg_gen_movi_i32(tmp2, val);
9630 if (logic_cc && shift) {
9631 gen_set_CF_bit31(tmp2);
9632 }
9ee6e8bb
PB
9633 } else {
9634 /* register */
9635 rm = (insn) & 0xf;
e9bb4aa9 9636 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9637 shiftop = (insn >> 5) & 3;
9638 if (!(insn & (1 << 4))) {
9639 shift = (insn >> 7) & 0x1f;
e9bb4aa9 9640 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
9641 } else {
9642 rs = (insn >> 8) & 0xf;
8984bd2e 9643 tmp = load_reg(s, rs);
e9bb4aa9 9644 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
9645 }
9646 }
9647 if (op1 != 0x0f && op1 != 0x0d) {
9648 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
9649 tmp = load_reg(s, rn);
9650 } else {
f764718d 9651 tmp = NULL;
9ee6e8bb
PB
9652 }
9653 rd = (insn >> 12) & 0xf;
9654 switch(op1) {
9655 case 0x00:
e9bb4aa9
JR
9656 tcg_gen_and_i32(tmp, tmp, tmp2);
9657 if (logic_cc) {
9658 gen_logic_CC(tmp);
9659 }
7dcc1f89 9660 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9661 break;
9662 case 0x01:
e9bb4aa9
JR
9663 tcg_gen_xor_i32(tmp, tmp, tmp2);
9664 if (logic_cc) {
9665 gen_logic_CC(tmp);
9666 }
7dcc1f89 9667 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9668 break;
9669 case 0x02:
9670 if (set_cc && rd == 15) {
9671 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 9672 if (IS_USER(s)) {
9ee6e8bb 9673 goto illegal_op;
e9bb4aa9 9674 }
72485ec4 9675 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 9676 gen_exception_return(s, tmp);
9ee6e8bb 9677 } else {
e9bb4aa9 9678 if (set_cc) {
72485ec4 9679 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9680 } else {
9681 tcg_gen_sub_i32(tmp, tmp, tmp2);
9682 }
7dcc1f89 9683 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9684 }
9685 break;
9686 case 0x03:
e9bb4aa9 9687 if (set_cc) {
72485ec4 9688 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
9689 } else {
9690 tcg_gen_sub_i32(tmp, tmp2, tmp);
9691 }
7dcc1f89 9692 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9693 break;
9694 case 0x04:
e9bb4aa9 9695 if (set_cc) {
72485ec4 9696 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9697 } else {
9698 tcg_gen_add_i32(tmp, tmp, tmp2);
9699 }
7dcc1f89 9700 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9701 break;
9702 case 0x05:
e9bb4aa9 9703 if (set_cc) {
49b4c31e 9704 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9705 } else {
9706 gen_add_carry(tmp, tmp, tmp2);
9707 }
7dcc1f89 9708 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9709 break;
9710 case 0x06:
e9bb4aa9 9711 if (set_cc) {
2de68a49 9712 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9713 } else {
9714 gen_sub_carry(tmp, tmp, tmp2);
9715 }
7dcc1f89 9716 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9717 break;
9718 case 0x07:
e9bb4aa9 9719 if (set_cc) {
2de68a49 9720 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
9721 } else {
9722 gen_sub_carry(tmp, tmp2, tmp);
9723 }
7dcc1f89 9724 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9725 break;
9726 case 0x08:
9727 if (set_cc) {
e9bb4aa9
JR
9728 tcg_gen_and_i32(tmp, tmp, tmp2);
9729 gen_logic_CC(tmp);
9ee6e8bb 9730 }
7d1b0095 9731 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9732 break;
9733 case 0x09:
9734 if (set_cc) {
e9bb4aa9
JR
9735 tcg_gen_xor_i32(tmp, tmp, tmp2);
9736 gen_logic_CC(tmp);
9ee6e8bb 9737 }
7d1b0095 9738 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9739 break;
9740 case 0x0a:
9741 if (set_cc) {
72485ec4 9742 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 9743 }
7d1b0095 9744 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9745 break;
9746 case 0x0b:
9747 if (set_cc) {
72485ec4 9748 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 9749 }
7d1b0095 9750 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9751 break;
9752 case 0x0c:
e9bb4aa9
JR
9753 tcg_gen_or_i32(tmp, tmp, tmp2);
9754 if (logic_cc) {
9755 gen_logic_CC(tmp);
9756 }
7dcc1f89 9757 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9758 break;
9759 case 0x0d:
9760 if (logic_cc && rd == 15) {
9761 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 9762 if (IS_USER(s)) {
9ee6e8bb 9763 goto illegal_op;
e9bb4aa9
JR
9764 }
9765 gen_exception_return(s, tmp2);
9ee6e8bb 9766 } else {
e9bb4aa9
JR
9767 if (logic_cc) {
9768 gen_logic_CC(tmp2);
9769 }
7dcc1f89 9770 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
9771 }
9772 break;
9773 case 0x0e:
f669df27 9774 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
9775 if (logic_cc) {
9776 gen_logic_CC(tmp);
9777 }
7dcc1f89 9778 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9779 break;
9780 default:
9781 case 0x0f:
e9bb4aa9
JR
9782 tcg_gen_not_i32(tmp2, tmp2);
9783 if (logic_cc) {
9784 gen_logic_CC(tmp2);
9785 }
7dcc1f89 9786 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
9787 break;
9788 }
e9bb4aa9 9789 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 9790 tcg_temp_free_i32(tmp2);
e9bb4aa9 9791 }
9ee6e8bb
PB
9792 } else {
9793 /* other instructions */
9794 op1 = (insn >> 24) & 0xf;
9795 switch(op1) {
9796 case 0x0:
9797 case 0x1:
9798 /* multiplies, extra load/stores */
9799 sh = (insn >> 5) & 3;
9800 if (sh == 0) {
9801 if (op1 == 0x0) {
9802 rd = (insn >> 16) & 0xf;
9803 rn = (insn >> 12) & 0xf;
9804 rs = (insn >> 8) & 0xf;
9805 rm = (insn) & 0xf;
9806 op1 = (insn >> 20) & 0xf;
9807 switch (op1) {
9808 case 0: case 1: case 2: case 3: case 6:
9809 /* 32 bit mul */
5e3f878a
PB
9810 tmp = load_reg(s, rs);
9811 tmp2 = load_reg(s, rm);
9812 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 9813 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9814 if (insn & (1 << 22)) {
9815 /* Subtract (mls) */
9816 ARCH(6T2);
5e3f878a
PB
9817 tmp2 = load_reg(s, rn);
9818 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 9819 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9820 } else if (insn & (1 << 21)) {
9821 /* Add */
5e3f878a
PB
9822 tmp2 = load_reg(s, rn);
9823 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9824 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9825 }
9826 if (insn & (1 << 20))
5e3f878a
PB
9827 gen_logic_CC(tmp);
9828 store_reg(s, rd, tmp);
9ee6e8bb 9829 break;
8aac08b1
AJ
9830 case 4:
9831 /* 64 bit mul double accumulate (UMAAL) */
9832 ARCH(6);
9833 tmp = load_reg(s, rs);
9834 tmp2 = load_reg(s, rm);
9835 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9836 gen_addq_lo(s, tmp64, rn);
9837 gen_addq_lo(s, tmp64, rd);
9838 gen_storeq_reg(s, rn, rd, tmp64);
9839 tcg_temp_free_i64(tmp64);
9840 break;
9841 case 8: case 9: case 10: case 11:
9842 case 12: case 13: case 14: case 15:
9843 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
9844 tmp = load_reg(s, rs);
9845 tmp2 = load_reg(s, rm);
8aac08b1 9846 if (insn & (1 << 22)) {
c9f10124 9847 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 9848 } else {
c9f10124 9849 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
9850 }
9851 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
9852 TCGv_i32 al = load_reg(s, rn);
9853 TCGv_i32 ah = load_reg(s, rd);
c9f10124 9854 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
9855 tcg_temp_free_i32(al);
9856 tcg_temp_free_i32(ah);
9ee6e8bb 9857 }
8aac08b1 9858 if (insn & (1 << 20)) {
c9f10124 9859 gen_logicq_cc(tmp, tmp2);
8aac08b1 9860 }
c9f10124
RH
9861 store_reg(s, rn, tmp);
9862 store_reg(s, rd, tmp2);
9ee6e8bb 9863 break;
8aac08b1
AJ
9864 default:
9865 goto illegal_op;
9ee6e8bb
PB
9866 }
9867 } else {
9868 rn = (insn >> 16) & 0xf;
9869 rd = (insn >> 12) & 0xf;
9870 if (insn & (1 << 23)) {
9871 /* load/store exclusive */
96c55295
PM
9872 bool is_ld = extract32(insn, 20, 1);
9873 bool is_lasr = !extract32(insn, 8, 1);
2359bf80 9874 int op2 = (insn >> 8) & 3;
86753403 9875 op1 = (insn >> 21) & 0x3;
2359bf80
MR
9876
9877 switch (op2) {
9878 case 0: /* lda/stl */
9879 if (op1 == 1) {
9880 goto illegal_op;
9881 }
9882 ARCH(8);
9883 break;
9884 case 1: /* reserved */
9885 goto illegal_op;
9886 case 2: /* ldaex/stlex */
9887 ARCH(8);
9888 break;
9889 case 3: /* ldrex/strex */
9890 if (op1) {
9891 ARCH(6K);
9892 } else {
9893 ARCH(6);
9894 }
9895 break;
9896 }
9897
3174f8e9 9898 addr = tcg_temp_local_new_i32();
98a46317 9899 load_reg_var(s, addr, rn);
2359bf80 9900
96c55295
PM
9901 if (is_lasr && !is_ld) {
9902 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
9903 }
9904
2359bf80 9905 if (op2 == 0) {
96c55295 9906 if (is_ld) {
2359bf80
MR
9907 tmp = tcg_temp_new_i32();
9908 switch (op1) {
9909 case 0: /* lda */
9bb6558a
PM
9910 gen_aa32_ld32u_iss(s, tmp, addr,
9911 get_mem_index(s),
9912 rd | ISSIsAcqRel);
2359bf80
MR
9913 break;
9914 case 2: /* ldab */
9bb6558a
PM
9915 gen_aa32_ld8u_iss(s, tmp, addr,
9916 get_mem_index(s),
9917 rd | ISSIsAcqRel);
2359bf80
MR
9918 break;
9919 case 3: /* ldah */
9bb6558a
PM
9920 gen_aa32_ld16u_iss(s, tmp, addr,
9921 get_mem_index(s),
9922 rd | ISSIsAcqRel);
2359bf80
MR
9923 break;
9924 default:
9925 abort();
9926 }
9927 store_reg(s, rd, tmp);
9928 } else {
9929 rm = insn & 0xf;
9930 tmp = load_reg(s, rm);
9931 switch (op1) {
9932 case 0: /* stl */
9bb6558a
PM
9933 gen_aa32_st32_iss(s, tmp, addr,
9934 get_mem_index(s),
9935 rm | ISSIsAcqRel);
2359bf80
MR
9936 break;
9937 case 2: /* stlb */
9bb6558a
PM
9938 gen_aa32_st8_iss(s, tmp, addr,
9939 get_mem_index(s),
9940 rm | ISSIsAcqRel);
2359bf80
MR
9941 break;
9942 case 3: /* stlh */
9bb6558a
PM
9943 gen_aa32_st16_iss(s, tmp, addr,
9944 get_mem_index(s),
9945 rm | ISSIsAcqRel);
2359bf80
MR
9946 break;
9947 default:
9948 abort();
9949 }
9950 tcg_temp_free_i32(tmp);
9951 }
96c55295 9952 } else if (is_ld) {
86753403
PB
9953 switch (op1) {
9954 case 0: /* ldrex */
426f5abc 9955 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
9956 break;
9957 case 1: /* ldrexd */
426f5abc 9958 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
9959 break;
9960 case 2: /* ldrexb */
426f5abc 9961 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
9962 break;
9963 case 3: /* ldrexh */
426f5abc 9964 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
9965 break;
9966 default:
9967 abort();
9968 }
9ee6e8bb
PB
9969 } else {
9970 rm = insn & 0xf;
86753403
PB
9971 switch (op1) {
9972 case 0: /* strex */
426f5abc 9973 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
9974 break;
9975 case 1: /* strexd */
502e64fe 9976 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
9977 break;
9978 case 2: /* strexb */
426f5abc 9979 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
9980 break;
9981 case 3: /* strexh */
426f5abc 9982 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
9983 break;
9984 default:
9985 abort();
9986 }
9ee6e8bb 9987 }
39d5492a 9988 tcg_temp_free_i32(addr);
96c55295
PM
9989
9990 if (is_lasr && is_ld) {
9991 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
9992 }
c4869ca6
OS
9993 } else if ((insn & 0x00300f00) == 0) {
9994 /* 0bcccc_0001_0x00_xxxx_xxxx_0000_1001_xxxx
9995 * - SWP, SWPB
9996 */
9997
cf12bce0
EC
9998 TCGv taddr;
9999 TCGMemOp opc = s->be_data;
10000
9ee6e8bb
PB
10001 rm = (insn) & 0xf;
10002
9ee6e8bb 10003 if (insn & (1 << 22)) {
cf12bce0 10004 opc |= MO_UB;
9ee6e8bb 10005 } else {
cf12bce0 10006 opc |= MO_UL | MO_ALIGN;
9ee6e8bb 10007 }
cf12bce0
EC
10008
10009 addr = load_reg(s, rn);
10010 taddr = gen_aa32_addr(s, addr, opc);
7d1b0095 10011 tcg_temp_free_i32(addr);
cf12bce0
EC
10012
10013 tmp = load_reg(s, rm);
10014 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
10015 get_mem_index(s), opc);
10016 tcg_temp_free(taddr);
10017 store_reg(s, rd, tmp);
c4869ca6
OS
10018 } else {
10019 goto illegal_op;
9ee6e8bb
PB
10020 }
10021 }
10022 } else {
10023 int address_offset;
3960c336 10024 bool load = insn & (1 << 20);
63f26fcf
PM
10025 bool wbit = insn & (1 << 21);
10026 bool pbit = insn & (1 << 24);
3960c336 10027 bool doubleword = false;
9bb6558a
PM
10028 ISSInfo issinfo;
10029
9ee6e8bb
PB
10030 /* Misc load/store */
10031 rn = (insn >> 16) & 0xf;
10032 rd = (insn >> 12) & 0xf;
3960c336 10033
9bb6558a
PM
10034 /* ISS not valid if writeback */
10035 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
10036
3960c336
PM
10037 if (!load && (sh & 2)) {
10038 /* doubleword */
10039 ARCH(5TE);
10040 if (rd & 1) {
10041 /* UNPREDICTABLE; we choose to UNDEF */
10042 goto illegal_op;
10043 }
10044 load = (sh & 1) == 0;
10045 doubleword = true;
10046 }
10047
b0109805 10048 addr = load_reg(s, rn);
63f26fcf 10049 if (pbit) {
b0109805 10050 gen_add_datah_offset(s, insn, 0, addr);
63f26fcf 10051 }
9ee6e8bb 10052 address_offset = 0;
3960c336
PM
10053
10054 if (doubleword) {
10055 if (!load) {
9ee6e8bb 10056 /* store */
b0109805 10057 tmp = load_reg(s, rd);
12dcc321 10058 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 10059 tcg_temp_free_i32(tmp);
b0109805
PB
10060 tcg_gen_addi_i32(addr, addr, 4);
10061 tmp = load_reg(s, rd + 1);
12dcc321 10062 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 10063 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10064 } else {
10065 /* load */
5a839c0d 10066 tmp = tcg_temp_new_i32();
12dcc321 10067 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
10068 store_reg(s, rd, tmp);
10069 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 10070 tmp = tcg_temp_new_i32();
12dcc321 10071 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10072 rd++;
9ee6e8bb
PB
10073 }
10074 address_offset = -4;
3960c336
PM
10075 } else if (load) {
10076 /* load */
10077 tmp = tcg_temp_new_i32();
10078 switch (sh) {
10079 case 1:
9bb6558a
PM
10080 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
10081 issinfo);
3960c336
PM
10082 break;
10083 case 2:
9bb6558a
PM
10084 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
10085 issinfo);
3960c336
PM
10086 break;
10087 default:
10088 case 3:
9bb6558a
PM
10089 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
10090 issinfo);
3960c336
PM
10091 break;
10092 }
9ee6e8bb
PB
10093 } else {
10094 /* store */
b0109805 10095 tmp = load_reg(s, rd);
9bb6558a 10096 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
5a839c0d 10097 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10098 }
10099 /* Perform base writeback before the loaded value to
10100 ensure correct behavior with overlapping index registers.
b6af0975 10101 ldrd with base writeback is undefined if the
9ee6e8bb 10102 destination and index registers overlap. */
63f26fcf 10103 if (!pbit) {
b0109805
PB
10104 gen_add_datah_offset(s, insn, address_offset, addr);
10105 store_reg(s, rn, addr);
63f26fcf 10106 } else if (wbit) {
9ee6e8bb 10107 if (address_offset)
b0109805
PB
10108 tcg_gen_addi_i32(addr, addr, address_offset);
10109 store_reg(s, rn, addr);
10110 } else {
7d1b0095 10111 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10112 }
10113 if (load) {
10114 /* Complete the load. */
b0109805 10115 store_reg(s, rd, tmp);
9ee6e8bb
PB
10116 }
10117 }
10118 break;
10119 case 0x4:
10120 case 0x5:
10121 goto do_ldst;
10122 case 0x6:
10123 case 0x7:
10124 if (insn & (1 << 4)) {
10125 ARCH(6);
10126 /* Armv6 Media instructions. */
10127 rm = insn & 0xf;
10128 rn = (insn >> 16) & 0xf;
2c0262af 10129 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
10130 rs = (insn >> 8) & 0xf;
10131 switch ((insn >> 23) & 3) {
10132 case 0: /* Parallel add/subtract. */
10133 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
10134 tmp = load_reg(s, rn);
10135 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10136 sh = (insn >> 5) & 7;
10137 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
10138 goto illegal_op;
6ddbc6e4 10139 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 10140 tcg_temp_free_i32(tmp2);
6ddbc6e4 10141 store_reg(s, rd, tmp);
9ee6e8bb
PB
10142 break;
10143 case 1:
10144 if ((insn & 0x00700020) == 0) {
6c95676b 10145 /* Halfword pack. */
3670669c
PB
10146 tmp = load_reg(s, rn);
10147 tmp2 = load_reg(s, rm);
9ee6e8bb 10148 shift = (insn >> 7) & 0x1f;
3670669c
PB
10149 if (insn & (1 << 6)) {
10150 /* pkhtb */
22478e79
AZ
10151 if (shift == 0)
10152 shift = 31;
10153 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 10154 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 10155 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
10156 } else {
10157 /* pkhbt */
22478e79
AZ
10158 if (shift)
10159 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 10160 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
10161 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
10162 }
10163 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 10164 tcg_temp_free_i32(tmp2);
3670669c 10165 store_reg(s, rd, tmp);
9ee6e8bb
PB
10166 } else if ((insn & 0x00200020) == 0x00200000) {
10167 /* [us]sat */
6ddbc6e4 10168 tmp = load_reg(s, rm);
9ee6e8bb
PB
10169 shift = (insn >> 7) & 0x1f;
10170 if (insn & (1 << 6)) {
10171 if (shift == 0)
10172 shift = 31;
6ddbc6e4 10173 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 10174 } else {
6ddbc6e4 10175 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
10176 }
10177 sh = (insn >> 16) & 0x1f;
40d3c433
CL
10178 tmp2 = tcg_const_i32(sh);
10179 if (insn & (1 << 22))
9ef39277 10180 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 10181 else
9ef39277 10182 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 10183 tcg_temp_free_i32(tmp2);
6ddbc6e4 10184 store_reg(s, rd, tmp);
9ee6e8bb
PB
10185 } else if ((insn & 0x00300fe0) == 0x00200f20) {
10186 /* [us]sat16 */
6ddbc6e4 10187 tmp = load_reg(s, rm);
9ee6e8bb 10188 sh = (insn >> 16) & 0x1f;
40d3c433
CL
10189 tmp2 = tcg_const_i32(sh);
10190 if (insn & (1 << 22))
9ef39277 10191 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 10192 else
9ef39277 10193 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 10194 tcg_temp_free_i32(tmp2);
6ddbc6e4 10195 store_reg(s, rd, tmp);
9ee6e8bb
PB
10196 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
10197 /* Select bytes. */
6ddbc6e4
PB
10198 tmp = load_reg(s, rn);
10199 tmp2 = load_reg(s, rm);
7d1b0095 10200 tmp3 = tcg_temp_new_i32();
0ecb72a5 10201 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 10202 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10203 tcg_temp_free_i32(tmp3);
10204 tcg_temp_free_i32(tmp2);
6ddbc6e4 10205 store_reg(s, rd, tmp);
9ee6e8bb 10206 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 10207 tmp = load_reg(s, rm);
9ee6e8bb 10208 shift = (insn >> 10) & 3;
1301f322 10209 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
10210 rotate, a shift is sufficient. */
10211 if (shift != 0)
f669df27 10212 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
10213 op1 = (insn >> 20) & 7;
10214 switch (op1) {
5e3f878a
PB
10215 case 0: gen_sxtb16(tmp); break;
10216 case 2: gen_sxtb(tmp); break;
10217 case 3: gen_sxth(tmp); break;
10218 case 4: gen_uxtb16(tmp); break;
10219 case 6: gen_uxtb(tmp); break;
10220 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
10221 default: goto illegal_op;
10222 }
10223 if (rn != 15) {
5e3f878a 10224 tmp2 = load_reg(s, rn);
9ee6e8bb 10225 if ((op1 & 3) == 0) {
5e3f878a 10226 gen_add16(tmp, tmp2);
9ee6e8bb 10227 } else {
5e3f878a 10228 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10229 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10230 }
10231 }
6c95676b 10232 store_reg(s, rd, tmp);
9ee6e8bb
PB
10233 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
10234 /* rev */
b0109805 10235 tmp = load_reg(s, rm);
9ee6e8bb
PB
10236 if (insn & (1 << 22)) {
10237 if (insn & (1 << 7)) {
b0109805 10238 gen_revsh(tmp);
9ee6e8bb
PB
10239 } else {
10240 ARCH(6T2);
b0109805 10241 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
10242 }
10243 } else {
10244 if (insn & (1 << 7))
b0109805 10245 gen_rev16(tmp);
9ee6e8bb 10246 else
66896cb8 10247 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 10248 }
b0109805 10249 store_reg(s, rd, tmp);
9ee6e8bb
PB
10250 } else {
10251 goto illegal_op;
10252 }
10253 break;
10254 case 2: /* Multiplies (Type 3). */
41e9564d
PM
10255 switch ((insn >> 20) & 0x7) {
10256 case 5:
10257 if (((insn >> 6) ^ (insn >> 7)) & 1) {
10258 /* op2 not 00x or 11x : UNDEF */
10259 goto illegal_op;
10260 }
838fa72d
AJ
10261 /* Signed multiply most significant [accumulate].
10262 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
10263 tmp = load_reg(s, rm);
10264 tmp2 = load_reg(s, rs);
a7812ae4 10265 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 10266
955a7dd5 10267 if (rd != 15) {
838fa72d 10268 tmp = load_reg(s, rd);
9ee6e8bb 10269 if (insn & (1 << 6)) {
838fa72d 10270 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 10271 } else {
838fa72d 10272 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
10273 }
10274 }
838fa72d
AJ
10275 if (insn & (1 << 5)) {
10276 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10277 }
10278 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 10279 tmp = tcg_temp_new_i32();
ecc7b3aa 10280 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 10281 tcg_temp_free_i64(tmp64);
955a7dd5 10282 store_reg(s, rn, tmp);
41e9564d
PM
10283 break;
10284 case 0:
10285 case 4:
10286 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
10287 if (insn & (1 << 7)) {
10288 goto illegal_op;
10289 }
10290 tmp = load_reg(s, rm);
10291 tmp2 = load_reg(s, rs);
9ee6e8bb 10292 if (insn & (1 << 5))
5e3f878a
PB
10293 gen_swap_half(tmp2);
10294 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10295 if (insn & (1 << 22)) {
5e3f878a 10296 /* smlald, smlsld */
33bbd75a
PC
10297 TCGv_i64 tmp64_2;
10298
a7812ae4 10299 tmp64 = tcg_temp_new_i64();
33bbd75a 10300 tmp64_2 = tcg_temp_new_i64();
a7812ae4 10301 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 10302 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 10303 tcg_temp_free_i32(tmp);
33bbd75a
PC
10304 tcg_temp_free_i32(tmp2);
10305 if (insn & (1 << 6)) {
10306 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
10307 } else {
10308 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
10309 }
10310 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
10311 gen_addq(s, tmp64, rd, rn);
10312 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 10313 tcg_temp_free_i64(tmp64);
9ee6e8bb 10314 } else {
5e3f878a 10315 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
10316 if (insn & (1 << 6)) {
10317 /* This subtraction cannot overflow. */
10318 tcg_gen_sub_i32(tmp, tmp, tmp2);
10319 } else {
10320 /* This addition cannot overflow 32 bits;
10321 * however it may overflow considered as a
10322 * signed operation, in which case we must set
10323 * the Q flag.
10324 */
10325 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10326 }
10327 tcg_temp_free_i32(tmp2);
22478e79 10328 if (rd != 15)
9ee6e8bb 10329 {
22478e79 10330 tmp2 = load_reg(s, rd);
9ef39277 10331 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10332 tcg_temp_free_i32(tmp2);
9ee6e8bb 10333 }
22478e79 10334 store_reg(s, rn, tmp);
9ee6e8bb 10335 }
41e9564d 10336 break;
b8b8ea05
PM
10337 case 1:
10338 case 3:
10339 /* SDIV, UDIV */
7e0cf8b4 10340 if (!dc_isar_feature(arm_div, s)) {
b8b8ea05
PM
10341 goto illegal_op;
10342 }
10343 if (((insn >> 5) & 7) || (rd != 15)) {
10344 goto illegal_op;
10345 }
10346 tmp = load_reg(s, rm);
10347 tmp2 = load_reg(s, rs);
10348 if (insn & (1 << 21)) {
10349 gen_helper_udiv(tmp, tmp, tmp2);
10350 } else {
10351 gen_helper_sdiv(tmp, tmp, tmp2);
10352 }
10353 tcg_temp_free_i32(tmp2);
10354 store_reg(s, rn, tmp);
10355 break;
41e9564d
PM
10356 default:
10357 goto illegal_op;
9ee6e8bb
PB
10358 }
10359 break;
10360 case 3:
10361 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
10362 switch (op1) {
10363 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
10364 ARCH(6);
10365 tmp = load_reg(s, rm);
10366 tmp2 = load_reg(s, rs);
10367 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10368 tcg_temp_free_i32(tmp2);
ded9d295
AZ
10369 if (rd != 15) {
10370 tmp2 = load_reg(s, rd);
6ddbc6e4 10371 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10372 tcg_temp_free_i32(tmp2);
9ee6e8bb 10373 }
ded9d295 10374 store_reg(s, rn, tmp);
9ee6e8bb
PB
10375 break;
10376 case 0x20: case 0x24: case 0x28: case 0x2c:
10377 /* Bitfield insert/clear. */
10378 ARCH(6T2);
10379 shift = (insn >> 7) & 0x1f;
10380 i = (insn >> 16) & 0x1f;
45140a57
KB
10381 if (i < shift) {
10382 /* UNPREDICTABLE; we choose to UNDEF */
10383 goto illegal_op;
10384 }
9ee6e8bb
PB
10385 i = i + 1 - shift;
10386 if (rm == 15) {
7d1b0095 10387 tmp = tcg_temp_new_i32();
5e3f878a 10388 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 10389 } else {
5e3f878a 10390 tmp = load_reg(s, rm);
9ee6e8bb
PB
10391 }
10392 if (i != 32) {
5e3f878a 10393 tmp2 = load_reg(s, rd);
d593c48e 10394 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 10395 tcg_temp_free_i32(tmp2);
9ee6e8bb 10396 }
5e3f878a 10397 store_reg(s, rd, tmp);
9ee6e8bb
PB
10398 break;
10399 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
10400 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 10401 ARCH(6T2);
5e3f878a 10402 tmp = load_reg(s, rm);
9ee6e8bb
PB
10403 shift = (insn >> 7) & 0x1f;
10404 i = ((insn >> 16) & 0x1f) + 1;
10405 if (shift + i > 32)
10406 goto illegal_op;
10407 if (i < 32) {
10408 if (op1 & 0x20) {
59a71b4c 10409 tcg_gen_extract_i32(tmp, tmp, shift, i);
9ee6e8bb 10410 } else {
59a71b4c 10411 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9ee6e8bb
PB
10412 }
10413 }
5e3f878a 10414 store_reg(s, rd, tmp);
9ee6e8bb
PB
10415 break;
10416 default:
10417 goto illegal_op;
10418 }
10419 break;
10420 }
10421 break;
10422 }
10423 do_ldst:
10424 /* Check for undefined extension instructions
10425 * per the ARM Bible IE:
10426 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
10427 */
10428 sh = (0xf << 20) | (0xf << 4);
10429 if (op1 == 0x7 && ((insn & sh) == sh))
10430 {
10431 goto illegal_op;
10432 }
10433 /* load/store byte/word */
10434 rn = (insn >> 16) & 0xf;
10435 rd = (insn >> 12) & 0xf;
b0109805 10436 tmp2 = load_reg(s, rn);
a99caa48
PM
10437 if ((insn & 0x01200000) == 0x00200000) {
10438 /* ldrt/strt */
579d21cc 10439 i = get_a32_user_mem_index(s);
a99caa48
PM
10440 } else {
10441 i = get_mem_index(s);
10442 }
9ee6e8bb 10443 if (insn & (1 << 24))
b0109805 10444 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
10445 if (insn & (1 << 20)) {
10446 /* load */
5a839c0d 10447 tmp = tcg_temp_new_i32();
9ee6e8bb 10448 if (insn & (1 << 22)) {
9bb6558a 10449 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 10450 } else {
9bb6558a 10451 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 10452 }
9ee6e8bb
PB
10453 } else {
10454 /* store */
b0109805 10455 tmp = load_reg(s, rd);
5a839c0d 10456 if (insn & (1 << 22)) {
9bb6558a 10457 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
5a839c0d 10458 } else {
9bb6558a 10459 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
5a839c0d
PM
10460 }
10461 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10462 }
10463 if (!(insn & (1 << 24))) {
b0109805
PB
10464 gen_add_data_offset(s, insn, tmp2);
10465 store_reg(s, rn, tmp2);
10466 } else if (insn & (1 << 21)) {
10467 store_reg(s, rn, tmp2);
10468 } else {
7d1b0095 10469 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10470 }
10471 if (insn & (1 << 20)) {
10472 /* Complete the load. */
7dcc1f89 10473 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
10474 }
10475 break;
10476 case 0x08:
10477 case 0x09:
10478 {
da3e53dd
PM
10479 int j, n, loaded_base;
10480 bool exc_return = false;
10481 bool is_load = extract32(insn, 20, 1);
10482 bool user = false;
39d5492a 10483 TCGv_i32 loaded_var;
9ee6e8bb
PB
10484 /* load/store multiple words */
10485 /* XXX: store correct base if write back */
9ee6e8bb 10486 if (insn & (1 << 22)) {
da3e53dd 10487 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
10488 if (IS_USER(s))
10489 goto illegal_op; /* only usable in supervisor mode */
10490
da3e53dd
PM
10491 if (is_load && extract32(insn, 15, 1)) {
10492 exc_return = true;
10493 } else {
10494 user = true;
10495 }
9ee6e8bb
PB
10496 }
10497 rn = (insn >> 16) & 0xf;
b0109805 10498 addr = load_reg(s, rn);
9ee6e8bb
PB
10499
10500 /* compute total size */
10501 loaded_base = 0;
f764718d 10502 loaded_var = NULL;
9ee6e8bb
PB
10503 n = 0;
10504 for(i=0;i<16;i++) {
10505 if (insn & (1 << i))
10506 n++;
10507 }
10508 /* XXX: test invalid n == 0 case ? */
10509 if (insn & (1 << 23)) {
10510 if (insn & (1 << 24)) {
10511 /* pre increment */
b0109805 10512 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
10513 } else {
10514 /* post increment */
10515 }
10516 } else {
10517 if (insn & (1 << 24)) {
10518 /* pre decrement */
b0109805 10519 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
10520 } else {
10521 /* post decrement */
10522 if (n != 1)
b0109805 10523 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
10524 }
10525 }
10526 j = 0;
10527 for(i=0;i<16;i++) {
10528 if (insn & (1 << i)) {
da3e53dd 10529 if (is_load) {
9ee6e8bb 10530 /* load */
5a839c0d 10531 tmp = tcg_temp_new_i32();
12dcc321 10532 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 10533 if (user) {
b75263d6 10534 tmp2 = tcg_const_i32(i);
1ce94f81 10535 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 10536 tcg_temp_free_i32(tmp2);
7d1b0095 10537 tcg_temp_free_i32(tmp);
9ee6e8bb 10538 } else if (i == rn) {
b0109805 10539 loaded_var = tmp;
9ee6e8bb 10540 loaded_base = 1;
fb0e8e79
PM
10541 } else if (rn == 15 && exc_return) {
10542 store_pc_exc_ret(s, tmp);
9ee6e8bb 10543 } else {
7dcc1f89 10544 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
10545 }
10546 } else {
10547 /* store */
10548 if (i == 15) {
10549 /* special case: r15 = PC + 8 */
10550 val = (long)s->pc + 4;
7d1b0095 10551 tmp = tcg_temp_new_i32();
b0109805 10552 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 10553 } else if (user) {
7d1b0095 10554 tmp = tcg_temp_new_i32();
b75263d6 10555 tmp2 = tcg_const_i32(i);
9ef39277 10556 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 10557 tcg_temp_free_i32(tmp2);
9ee6e8bb 10558 } else {
b0109805 10559 tmp = load_reg(s, i);
9ee6e8bb 10560 }
12dcc321 10561 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 10562 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10563 }
10564 j++;
10565 /* no need to add after the last transfer */
10566 if (j != n)
b0109805 10567 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
10568 }
10569 }
10570 if (insn & (1 << 21)) {
10571 /* write back */
10572 if (insn & (1 << 23)) {
10573 if (insn & (1 << 24)) {
10574 /* pre increment */
10575 } else {
10576 /* post increment */
b0109805 10577 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
10578 }
10579 } else {
10580 if (insn & (1 << 24)) {
10581 /* pre decrement */
10582 if (n != 1)
b0109805 10583 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
10584 } else {
10585 /* post decrement */
b0109805 10586 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
10587 }
10588 }
b0109805
PB
10589 store_reg(s, rn, addr);
10590 } else {
7d1b0095 10591 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10592 }
10593 if (loaded_base) {
b0109805 10594 store_reg(s, rn, loaded_var);
9ee6e8bb 10595 }
da3e53dd 10596 if (exc_return) {
9ee6e8bb 10597 /* Restore CPSR from SPSR. */
d9ba4830 10598 tmp = load_cpu_field(spsr);
e69ad9df
AL
10599 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
10600 gen_io_start();
10601 }
235ea1f5 10602 gen_helper_cpsr_write_eret(cpu_env, tmp);
e69ad9df
AL
10603 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
10604 gen_io_end();
10605 }
7d1b0095 10606 tcg_temp_free_i32(tmp);
b29fd33d 10607 /* Must exit loop to check un-masked IRQs */
dcba3a8d 10608 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb
PB
10609 }
10610 }
10611 break;
10612 case 0xa:
10613 case 0xb:
10614 {
10615 int32_t offset;
10616
10617 /* branch (and link) */
10618 val = (int32_t)s->pc;
10619 if (insn & (1 << 24)) {
7d1b0095 10620 tmp = tcg_temp_new_i32();
5e3f878a
PB
10621 tcg_gen_movi_i32(tmp, val);
10622 store_reg(s, 14, tmp);
9ee6e8bb 10623 }
534df156
PM
10624 offset = sextract32(insn << 2, 0, 26);
10625 val += offset + 4;
9ee6e8bb
PB
10626 gen_jmp(s, val);
10627 }
10628 break;
10629 case 0xc:
10630 case 0xd:
10631 case 0xe:
6a57f3eb
WN
10632 if (((insn >> 8) & 0xe) == 10) {
10633 /* VFP. */
7dcc1f89 10634 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10635 goto illegal_op;
10636 }
7dcc1f89 10637 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 10638 /* Coprocessor. */
9ee6e8bb 10639 goto illegal_op;
6a57f3eb 10640 }
9ee6e8bb
PB
10641 break;
10642 case 0xf:
10643 /* swi */
eaed129d 10644 gen_set_pc_im(s, s->pc);
d4a2dc67 10645 s->svc_imm = extract32(insn, 0, 24);
dcba3a8d 10646 s->base.is_jmp = DISAS_SWI;
9ee6e8bb
PB
10647 break;
10648 default:
10649 illegal_op:
73710361
GB
10650 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
10651 default_exception_el(s));
9ee6e8bb
PB
10652 break;
10653 }
10654 }
10655}
10656
296e5a0a
PM
10657static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
10658{
10659 /* Return true if this is a 16 bit instruction. We must be precise
10660 * about this (matching the decode). We assume that s->pc still
10661 * points to the first 16 bits of the insn.
10662 */
10663 if ((insn >> 11) < 0x1d) {
10664 /* Definitely a 16-bit instruction */
10665 return true;
10666 }
10667
10668 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
10669 * first half of a 32-bit Thumb insn. Thumb-1 cores might
10670 * end up actually treating this as two 16-bit insns, though,
10671 * if it's half of a bl/blx pair that might span a page boundary.
10672 */
14120108
JS
10673 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
10674 arm_dc_feature(s, ARM_FEATURE_M)) {
296e5a0a
PM
10675 /* Thumb2 cores (including all M profile ones) always treat
10676 * 32-bit insns as 32-bit.
10677 */
10678 return false;
10679 }
10680
bfe7ad5b 10681 if ((insn >> 11) == 0x1e && s->pc - s->page_start < TARGET_PAGE_SIZE - 3) {
296e5a0a
PM
10682 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
10683 * is not on the next page; we merge this into a 32-bit
10684 * insn.
10685 */
10686 return false;
10687 }
10688 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
10689 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
10690 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
10691 * -- handle as single 16 bit insn
10692 */
10693 return true;
10694}
10695
9ee6e8bb
PB
10696/* Return true if this is a Thumb-2 logical op. */
10697static int
10698thumb2_logic_op(int op)
10699{
10700 return (op < 8);
10701}
10702
10703/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
10704 then set condition code flags based on the result of the operation.
10705 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
10706 to the high bit of T1.
10707 Returns zero if the opcode is valid. */
10708
10709static int
39d5492a
PM
10710gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
10711 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
10712{
10713 int logic_cc;
10714
10715 logic_cc = 0;
10716 switch (op) {
10717 case 0: /* and */
396e467c 10718 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
10719 logic_cc = conds;
10720 break;
10721 case 1: /* bic */
f669df27 10722 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
10723 logic_cc = conds;
10724 break;
10725 case 2: /* orr */
396e467c 10726 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
10727 logic_cc = conds;
10728 break;
10729 case 3: /* orn */
29501f1b 10730 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
10731 logic_cc = conds;
10732 break;
10733 case 4: /* eor */
396e467c 10734 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
10735 logic_cc = conds;
10736 break;
10737 case 8: /* add */
10738 if (conds)
72485ec4 10739 gen_add_CC(t0, t0, t1);
9ee6e8bb 10740 else
396e467c 10741 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
10742 break;
10743 case 10: /* adc */
10744 if (conds)
49b4c31e 10745 gen_adc_CC(t0, t0, t1);
9ee6e8bb 10746 else
396e467c 10747 gen_adc(t0, t1);
9ee6e8bb
PB
10748 break;
10749 case 11: /* sbc */
2de68a49
RH
10750 if (conds) {
10751 gen_sbc_CC(t0, t0, t1);
10752 } else {
396e467c 10753 gen_sub_carry(t0, t0, t1);
2de68a49 10754 }
9ee6e8bb
PB
10755 break;
10756 case 13: /* sub */
10757 if (conds)
72485ec4 10758 gen_sub_CC(t0, t0, t1);
9ee6e8bb 10759 else
396e467c 10760 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
10761 break;
10762 case 14: /* rsb */
10763 if (conds)
72485ec4 10764 gen_sub_CC(t0, t1, t0);
9ee6e8bb 10765 else
396e467c 10766 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
10767 break;
10768 default: /* 5, 6, 7, 9, 12, 15. */
10769 return 1;
10770 }
10771 if (logic_cc) {
396e467c 10772 gen_logic_CC(t0);
9ee6e8bb 10773 if (shifter_out)
396e467c 10774 gen_set_CF_bit31(t1);
9ee6e8bb
PB
10775 }
10776 return 0;
10777}
10778
2eea841c
PM
10779/* Translate a 32-bit thumb instruction. */
10780static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 10781{
296e5a0a 10782 uint32_t imm, shift, offset;
9ee6e8bb 10783 uint32_t rd, rn, rm, rs;
39d5492a
PM
10784 TCGv_i32 tmp;
10785 TCGv_i32 tmp2;
10786 TCGv_i32 tmp3;
10787 TCGv_i32 addr;
a7812ae4 10788 TCGv_i64 tmp64;
9ee6e8bb
PB
10789 int op;
10790 int shiftop;
10791 int conds;
10792 int logic_cc;
10793
14120108
JS
10794 /*
10795 * ARMv6-M supports a limited subset of Thumb2 instructions.
10796 * Other Thumb1 architectures allow only 32-bit
10797 * combined BL/BLX prefix and suffix.
296e5a0a 10798 */
14120108
JS
10799 if (arm_dc_feature(s, ARM_FEATURE_M) &&
10800 !arm_dc_feature(s, ARM_FEATURE_V7)) {
10801 int i;
10802 bool found = false;
8297cb13
JS
10803 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
10804 0xf3b08040 /* dsb */,
10805 0xf3b08050 /* dmb */,
10806 0xf3b08060 /* isb */,
10807 0xf3e08000 /* mrs */,
10808 0xf000d000 /* bl */};
10809 static const uint32_t armv6m_mask[] = {0xffe0d000,
10810 0xfff0d0f0,
10811 0xfff0d0f0,
10812 0xfff0d0f0,
10813 0xffe0d000,
10814 0xf800d000};
14120108
JS
10815
10816 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
10817 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
10818 found = true;
10819 break;
10820 }
10821 }
10822 if (!found) {
10823 goto illegal_op;
10824 }
10825 } else if ((insn & 0xf800e800) != 0xf000e800) {
9ee6e8bb
PB
10826 ARCH(6T2);
10827 }
10828
10829 rn = (insn >> 16) & 0xf;
10830 rs = (insn >> 12) & 0xf;
10831 rd = (insn >> 8) & 0xf;
10832 rm = insn & 0xf;
10833 switch ((insn >> 25) & 0xf) {
10834 case 0: case 1: case 2: case 3:
10835 /* 16-bit instructions. Should never happen. */
10836 abort();
10837 case 4:
10838 if (insn & (1 << 22)) {
ebfe27c5
PM
10839 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
10840 * - load/store doubleword, load/store exclusive, ldacq/strel,
5158de24 10841 * table branch, TT.
ebfe27c5 10842 */
76eff04d
PM
10843 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
10844 arm_dc_feature(s, ARM_FEATURE_V8)) {
10845 /* 0b1110_1001_0111_1111_1110_1001_0111_111
10846 * - SG (v8M only)
10847 * The bulk of the behaviour for this instruction is implemented
10848 * in v7m_handle_execute_nsc(), which deals with the insn when
10849 * it is executed by a CPU in non-secure state from memory
10850 * which is Secure & NonSecure-Callable.
10851 * Here we only need to handle the remaining cases:
10852 * * in NS memory (including the "security extension not
10853 * implemented" case) : NOP
10854 * * in S memory but CPU already secure (clear IT bits)
10855 * We know that the attribute for the memory this insn is
10856 * in must match the current CPU state, because otherwise
10857 * get_phys_addr_pmsav8 would have generated an exception.
10858 */
10859 if (s->v8m_secure) {
10860 /* Like the IT insn, we don't need to generate any code */
10861 s->condexec_cond = 0;
10862 s->condexec_mask = 0;
10863 }
10864 } else if (insn & 0x01200000) {
ebfe27c5
PM
10865 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10866 * - load/store dual (post-indexed)
10867 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
10868 * - load/store dual (literal and immediate)
10869 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10870 * - load/store dual (pre-indexed)
10871 */
910d7692
PM
10872 bool wback = extract32(insn, 21, 1);
10873
9ee6e8bb 10874 if (rn == 15) {
ebfe27c5
PM
10875 if (insn & (1 << 21)) {
10876 /* UNPREDICTABLE */
10877 goto illegal_op;
10878 }
7d1b0095 10879 addr = tcg_temp_new_i32();
b0109805 10880 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 10881 } else {
b0109805 10882 addr = load_reg(s, rn);
9ee6e8bb
PB
10883 }
10884 offset = (insn & 0xff) * 4;
910d7692 10885 if ((insn & (1 << 23)) == 0) {
9ee6e8bb 10886 offset = -offset;
910d7692
PM
10887 }
10888
10889 if (s->v8m_stackcheck && rn == 13 && wback) {
10890 /*
10891 * Here 'addr' is the current SP; if offset is +ve we're
10892 * moving SP up, else down. It is UNKNOWN whether the limit
10893 * check triggers when SP starts below the limit and ends
10894 * up above it; check whichever of the current and final
10895 * SP is lower, so QEMU will trigger in that situation.
10896 */
10897 if ((int32_t)offset < 0) {
10898 TCGv_i32 newsp = tcg_temp_new_i32();
10899
10900 tcg_gen_addi_i32(newsp, addr, offset);
10901 gen_helper_v8m_stackcheck(cpu_env, newsp);
10902 tcg_temp_free_i32(newsp);
10903 } else {
10904 gen_helper_v8m_stackcheck(cpu_env, addr);
10905 }
10906 }
10907
9ee6e8bb 10908 if (insn & (1 << 24)) {
b0109805 10909 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
10910 offset = 0;
10911 }
10912 if (insn & (1 << 20)) {
10913 /* ldrd */
e2592fad 10914 tmp = tcg_temp_new_i32();
12dcc321 10915 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
10916 store_reg(s, rs, tmp);
10917 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 10918 tmp = tcg_temp_new_i32();
12dcc321 10919 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 10920 store_reg(s, rd, tmp);
9ee6e8bb
PB
10921 } else {
10922 /* strd */
b0109805 10923 tmp = load_reg(s, rs);
12dcc321 10924 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10925 tcg_temp_free_i32(tmp);
b0109805
PB
10926 tcg_gen_addi_i32(addr, addr, 4);
10927 tmp = load_reg(s, rd);
12dcc321 10928 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10929 tcg_temp_free_i32(tmp);
9ee6e8bb 10930 }
910d7692 10931 if (wback) {
9ee6e8bb 10932 /* Base writeback. */
b0109805
PB
10933 tcg_gen_addi_i32(addr, addr, offset - 4);
10934 store_reg(s, rn, addr);
10935 } else {
7d1b0095 10936 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10937 }
10938 } else if ((insn & (1 << 23)) == 0) {
ebfe27c5
PM
10939 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
10940 * - load/store exclusive word
5158de24 10941 * - TT (v8M only)
ebfe27c5
PM
10942 */
10943 if (rs == 15) {
5158de24
PM
10944 if (!(insn & (1 << 20)) &&
10945 arm_dc_feature(s, ARM_FEATURE_M) &&
10946 arm_dc_feature(s, ARM_FEATURE_V8)) {
10947 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
10948 * - TT (v8M only)
10949 */
10950 bool alt = insn & (1 << 7);
10951 TCGv_i32 addr, op, ttresp;
10952
10953 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
10954 /* we UNDEF for these UNPREDICTABLE cases */
10955 goto illegal_op;
10956 }
10957
10958 if (alt && !s->v8m_secure) {
10959 goto illegal_op;
10960 }
10961
10962 addr = load_reg(s, rn);
10963 op = tcg_const_i32(extract32(insn, 6, 2));
10964 ttresp = tcg_temp_new_i32();
10965 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
10966 tcg_temp_free_i32(addr);
10967 tcg_temp_free_i32(op);
10968 store_reg(s, rd, ttresp);
384c6c03 10969 break;
5158de24 10970 }
ebfe27c5
PM
10971 goto illegal_op;
10972 }
39d5492a 10973 addr = tcg_temp_local_new_i32();
98a46317 10974 load_reg_var(s, addr, rn);
426f5abc 10975 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 10976 if (insn & (1 << 20)) {
426f5abc 10977 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 10978 } else {
426f5abc 10979 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 10980 }
39d5492a 10981 tcg_temp_free_i32(addr);
2359bf80 10982 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
10983 /* Table Branch. */
10984 if (rn == 15) {
7d1b0095 10985 addr = tcg_temp_new_i32();
b0109805 10986 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 10987 } else {
b0109805 10988 addr = load_reg(s, rn);
9ee6e8bb 10989 }
b26eefb6 10990 tmp = load_reg(s, rm);
b0109805 10991 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
10992 if (insn & (1 << 4)) {
10993 /* tbh */
b0109805 10994 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10995 tcg_temp_free_i32(tmp);
e2592fad 10996 tmp = tcg_temp_new_i32();
12dcc321 10997 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10998 } else { /* tbb */
7d1b0095 10999 tcg_temp_free_i32(tmp);
e2592fad 11000 tmp = tcg_temp_new_i32();
12dcc321 11001 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 11002 }
7d1b0095 11003 tcg_temp_free_i32(addr);
b0109805
PB
11004 tcg_gen_shli_i32(tmp, tmp, 1);
11005 tcg_gen_addi_i32(tmp, tmp, s->pc);
11006 store_reg(s, 15, tmp);
9ee6e8bb 11007 } else {
96c55295
PM
11008 bool is_lasr = false;
11009 bool is_ld = extract32(insn, 20, 1);
2359bf80 11010 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 11011 op = (insn >> 4) & 0x3;
2359bf80
MR
11012 switch (op2) {
11013 case 0:
426f5abc 11014 goto illegal_op;
2359bf80
MR
11015 case 1:
11016 /* Load/store exclusive byte/halfword/doubleword */
11017 if (op == 2) {
11018 goto illegal_op;
11019 }
11020 ARCH(7);
11021 break;
11022 case 2:
11023 /* Load-acquire/store-release */
11024 if (op == 3) {
11025 goto illegal_op;
11026 }
11027 /* Fall through */
11028 case 3:
11029 /* Load-acquire/store-release exclusive */
11030 ARCH(8);
96c55295 11031 is_lasr = true;
2359bf80 11032 break;
426f5abc 11033 }
96c55295
PM
11034
11035 if (is_lasr && !is_ld) {
11036 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
11037 }
11038
39d5492a 11039 addr = tcg_temp_local_new_i32();
98a46317 11040 load_reg_var(s, addr, rn);
2359bf80 11041 if (!(op2 & 1)) {
96c55295 11042 if (is_ld) {
2359bf80
MR
11043 tmp = tcg_temp_new_i32();
11044 switch (op) {
11045 case 0: /* ldab */
9bb6558a
PM
11046 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
11047 rs | ISSIsAcqRel);
2359bf80
MR
11048 break;
11049 case 1: /* ldah */
9bb6558a
PM
11050 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
11051 rs | ISSIsAcqRel);
2359bf80
MR
11052 break;
11053 case 2: /* lda */
9bb6558a
PM
11054 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11055 rs | ISSIsAcqRel);
2359bf80
MR
11056 break;
11057 default:
11058 abort();
11059 }
11060 store_reg(s, rs, tmp);
11061 } else {
11062 tmp = load_reg(s, rs);
11063 switch (op) {
11064 case 0: /* stlb */
9bb6558a
PM
11065 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
11066 rs | ISSIsAcqRel);
2359bf80
MR
11067 break;
11068 case 1: /* stlh */
9bb6558a
PM
11069 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
11070 rs | ISSIsAcqRel);
2359bf80
MR
11071 break;
11072 case 2: /* stl */
9bb6558a
PM
11073 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
11074 rs | ISSIsAcqRel);
2359bf80
MR
11075 break;
11076 default:
11077 abort();
11078 }
11079 tcg_temp_free_i32(tmp);
11080 }
96c55295 11081 } else if (is_ld) {
426f5abc 11082 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 11083 } else {
426f5abc 11084 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 11085 }
39d5492a 11086 tcg_temp_free_i32(addr);
96c55295
PM
11087
11088 if (is_lasr && is_ld) {
11089 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
11090 }
9ee6e8bb
PB
11091 }
11092 } else {
11093 /* Load/store multiple, RFE, SRS. */
11094 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 11095 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 11096 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 11097 goto illegal_op;
00115976 11098 }
9ee6e8bb
PB
11099 if (insn & (1 << 20)) {
11100 /* rfe */
b0109805
PB
11101 addr = load_reg(s, rn);
11102 if ((insn & (1 << 24)) == 0)
11103 tcg_gen_addi_i32(addr, addr, -8);
11104 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 11105 tmp = tcg_temp_new_i32();
12dcc321 11106 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11107 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 11108 tmp2 = tcg_temp_new_i32();
12dcc321 11109 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
11110 if (insn & (1 << 21)) {
11111 /* Base writeback. */
b0109805
PB
11112 if (insn & (1 << 24)) {
11113 tcg_gen_addi_i32(addr, addr, 4);
11114 } else {
11115 tcg_gen_addi_i32(addr, addr, -4);
11116 }
11117 store_reg(s, rn, addr);
11118 } else {
7d1b0095 11119 tcg_temp_free_i32(addr);
9ee6e8bb 11120 }
b0109805 11121 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
11122 } else {
11123 /* srs */
81465888
PM
11124 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
11125 insn & (1 << 21));
9ee6e8bb
PB
11126 }
11127 } else {
5856d44e 11128 int i, loaded_base = 0;
39d5492a 11129 TCGv_i32 loaded_var;
7c0ed88e 11130 bool wback = extract32(insn, 21, 1);
9ee6e8bb 11131 /* Load/store multiple. */
b0109805 11132 addr = load_reg(s, rn);
9ee6e8bb
PB
11133 offset = 0;
11134 for (i = 0; i < 16; i++) {
11135 if (insn & (1 << i))
11136 offset += 4;
11137 }
7c0ed88e 11138
9ee6e8bb 11139 if (insn & (1 << 24)) {
b0109805 11140 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
11141 }
11142
7c0ed88e
PM
11143 if (s->v8m_stackcheck && rn == 13 && wback) {
11144 /*
11145 * If the writeback is incrementing SP rather than
11146 * decrementing it, and the initial SP is below the
11147 * stack limit but the final written-back SP would
11148 * be above, then then we must not perform any memory
11149 * accesses, but it is IMPDEF whether we generate
11150 * an exception. We choose to do so in this case.
11151 * At this point 'addr' is the lowest address, so
11152 * either the original SP (if incrementing) or our
11153 * final SP (if decrementing), so that's what we check.
11154 */
11155 gen_helper_v8m_stackcheck(cpu_env, addr);
11156 }
11157
f764718d 11158 loaded_var = NULL;
9ee6e8bb
PB
11159 for (i = 0; i < 16; i++) {
11160 if ((insn & (1 << i)) == 0)
11161 continue;
11162 if (insn & (1 << 20)) {
11163 /* Load. */
e2592fad 11164 tmp = tcg_temp_new_i32();
12dcc321 11165 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 11166 if (i == 15) {
3bb8a96f 11167 gen_bx_excret(s, tmp);
5856d44e
YO
11168 } else if (i == rn) {
11169 loaded_var = tmp;
11170 loaded_base = 1;
9ee6e8bb 11171 } else {
b0109805 11172 store_reg(s, i, tmp);
9ee6e8bb
PB
11173 }
11174 } else {
11175 /* Store. */
b0109805 11176 tmp = load_reg(s, i);
12dcc321 11177 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 11178 tcg_temp_free_i32(tmp);
9ee6e8bb 11179 }
b0109805 11180 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 11181 }
5856d44e
YO
11182 if (loaded_base) {
11183 store_reg(s, rn, loaded_var);
11184 }
7c0ed88e 11185 if (wback) {
9ee6e8bb
PB
11186 /* Base register writeback. */
11187 if (insn & (1 << 24)) {
b0109805 11188 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
11189 }
11190 /* Fault if writeback register is in register list. */
11191 if (insn & (1 << rn))
11192 goto illegal_op;
b0109805
PB
11193 store_reg(s, rn, addr);
11194 } else {
7d1b0095 11195 tcg_temp_free_i32(addr);
9ee6e8bb
PB
11196 }
11197 }
11198 }
11199 break;
2af9ab77
JB
11200 case 5:
11201
9ee6e8bb 11202 op = (insn >> 21) & 0xf;
2af9ab77 11203 if (op == 6) {
62b44f05
AR
11204 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11205 goto illegal_op;
11206 }
2af9ab77
JB
11207 /* Halfword pack. */
11208 tmp = load_reg(s, rn);
11209 tmp2 = load_reg(s, rm);
11210 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
11211 if (insn & (1 << 5)) {
11212 /* pkhtb */
11213 if (shift == 0)
11214 shift = 31;
11215 tcg_gen_sari_i32(tmp2, tmp2, shift);
11216 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
11217 tcg_gen_ext16u_i32(tmp2, tmp2);
11218 } else {
11219 /* pkhbt */
11220 if (shift)
11221 tcg_gen_shli_i32(tmp2, tmp2, shift);
11222 tcg_gen_ext16u_i32(tmp, tmp);
11223 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
11224 }
11225 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 11226 tcg_temp_free_i32(tmp2);
3174f8e9
FN
11227 store_reg(s, rd, tmp);
11228 } else {
2af9ab77
JB
11229 /* Data processing register constant shift. */
11230 if (rn == 15) {
7d1b0095 11231 tmp = tcg_temp_new_i32();
2af9ab77
JB
11232 tcg_gen_movi_i32(tmp, 0);
11233 } else {
11234 tmp = load_reg(s, rn);
11235 }
11236 tmp2 = load_reg(s, rm);
11237
11238 shiftop = (insn >> 4) & 3;
11239 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
11240 conds = (insn & (1 << 20)) != 0;
11241 logic_cc = (conds && thumb2_logic_op(op));
11242 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
11243 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
11244 goto illegal_op;
7d1b0095 11245 tcg_temp_free_i32(tmp2);
55203189
PM
11246 if (rd == 13 &&
11247 ((op == 2 && rn == 15) ||
11248 (op == 8 && rn == 13) ||
11249 (op == 13 && rn == 13))) {
11250 /* MOV SP, ... or ADD SP, SP, ... or SUB SP, SP, ... */
11251 store_sp_checked(s, tmp);
11252 } else if (rd != 15) {
2af9ab77
JB
11253 store_reg(s, rd, tmp);
11254 } else {
7d1b0095 11255 tcg_temp_free_i32(tmp);
2af9ab77 11256 }
3174f8e9 11257 }
9ee6e8bb
PB
11258 break;
11259 case 13: /* Misc data processing. */
11260 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
11261 if (op < 4 && (insn & 0xf000) != 0xf000)
11262 goto illegal_op;
11263 switch (op) {
11264 case 0: /* Register controlled shift. */
8984bd2e
PB
11265 tmp = load_reg(s, rn);
11266 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
11267 if ((insn & 0x70) != 0)
11268 goto illegal_op;
a2d12f0f
PM
11269 /*
11270 * 0b1111_1010_0xxx_xxxx_1111_xxxx_0000_xxxx:
11271 * - MOV, MOVS (register-shifted register), flagsetting
11272 */
9ee6e8bb 11273 op = (insn >> 21) & 3;
8984bd2e
PB
11274 logic_cc = (insn & (1 << 20)) != 0;
11275 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
11276 if (logic_cc)
11277 gen_logic_CC(tmp);
bedb8a6b 11278 store_reg(s, rd, tmp);
9ee6e8bb
PB
11279 break;
11280 case 1: /* Sign/zero extend. */
62b44f05
AR
11281 op = (insn >> 20) & 7;
11282 switch (op) {
11283 case 0: /* SXTAH, SXTH */
11284 case 1: /* UXTAH, UXTH */
11285 case 4: /* SXTAB, SXTB */
11286 case 5: /* UXTAB, UXTB */
11287 break;
11288 case 2: /* SXTAB16, SXTB16 */
11289 case 3: /* UXTAB16, UXTB16 */
11290 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11291 goto illegal_op;
11292 }
11293 break;
11294 default:
11295 goto illegal_op;
11296 }
11297 if (rn != 15) {
11298 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11299 goto illegal_op;
11300 }
11301 }
5e3f878a 11302 tmp = load_reg(s, rm);
9ee6e8bb 11303 shift = (insn >> 4) & 3;
1301f322 11304 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
11305 rotate, a shift is sufficient. */
11306 if (shift != 0)
f669df27 11307 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
11308 op = (insn >> 20) & 7;
11309 switch (op) {
5e3f878a
PB
11310 case 0: gen_sxth(tmp); break;
11311 case 1: gen_uxth(tmp); break;
11312 case 2: gen_sxtb16(tmp); break;
11313 case 3: gen_uxtb16(tmp); break;
11314 case 4: gen_sxtb(tmp); break;
11315 case 5: gen_uxtb(tmp); break;
62b44f05
AR
11316 default:
11317 g_assert_not_reached();
9ee6e8bb
PB
11318 }
11319 if (rn != 15) {
5e3f878a 11320 tmp2 = load_reg(s, rn);
9ee6e8bb 11321 if ((op >> 1) == 1) {
5e3f878a 11322 gen_add16(tmp, tmp2);
9ee6e8bb 11323 } else {
5e3f878a 11324 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11325 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
11326 }
11327 }
5e3f878a 11328 store_reg(s, rd, tmp);
9ee6e8bb
PB
11329 break;
11330 case 2: /* SIMD add/subtract. */
62b44f05
AR
11331 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11332 goto illegal_op;
11333 }
9ee6e8bb
PB
11334 op = (insn >> 20) & 7;
11335 shift = (insn >> 4) & 7;
11336 if ((op & 3) == 3 || (shift & 3) == 3)
11337 goto illegal_op;
6ddbc6e4
PB
11338 tmp = load_reg(s, rn);
11339 tmp2 = load_reg(s, rm);
11340 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 11341 tcg_temp_free_i32(tmp2);
6ddbc6e4 11342 store_reg(s, rd, tmp);
9ee6e8bb
PB
11343 break;
11344 case 3: /* Other data processing. */
11345 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
11346 if (op < 4) {
11347 /* Saturating add/subtract. */
62b44f05
AR
11348 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11349 goto illegal_op;
11350 }
d9ba4830
PB
11351 tmp = load_reg(s, rn);
11352 tmp2 = load_reg(s, rm);
9ee6e8bb 11353 if (op & 1)
9ef39277 11354 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 11355 if (op & 2)
9ef39277 11356 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 11357 else
9ef39277 11358 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 11359 tcg_temp_free_i32(tmp2);
9ee6e8bb 11360 } else {
62b44f05
AR
11361 switch (op) {
11362 case 0x0a: /* rbit */
11363 case 0x08: /* rev */
11364 case 0x09: /* rev16 */
11365 case 0x0b: /* revsh */
11366 case 0x18: /* clz */
11367 break;
11368 case 0x10: /* sel */
11369 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11370 goto illegal_op;
11371 }
11372 break;
11373 case 0x20: /* crc32/crc32c */
11374 case 0x21:
11375 case 0x22:
11376 case 0x28:
11377 case 0x29:
11378 case 0x2a:
962fcbf2 11379 if (!dc_isar_feature(aa32_crc32, s)) {
62b44f05
AR
11380 goto illegal_op;
11381 }
11382 break;
11383 default:
11384 goto illegal_op;
11385 }
d9ba4830 11386 tmp = load_reg(s, rn);
9ee6e8bb
PB
11387 switch (op) {
11388 case 0x0a: /* rbit */
d9ba4830 11389 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
11390 break;
11391 case 0x08: /* rev */
66896cb8 11392 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
11393 break;
11394 case 0x09: /* rev16 */
d9ba4830 11395 gen_rev16(tmp);
9ee6e8bb
PB
11396 break;
11397 case 0x0b: /* revsh */
d9ba4830 11398 gen_revsh(tmp);
9ee6e8bb
PB
11399 break;
11400 case 0x10: /* sel */
d9ba4830 11401 tmp2 = load_reg(s, rm);
7d1b0095 11402 tmp3 = tcg_temp_new_i32();
0ecb72a5 11403 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 11404 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
11405 tcg_temp_free_i32(tmp3);
11406 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
11407 break;
11408 case 0x18: /* clz */
7539a012 11409 tcg_gen_clzi_i32(tmp, tmp, 32);
9ee6e8bb 11410 break;
eb0ecd5a
WN
11411 case 0x20:
11412 case 0x21:
11413 case 0x22:
11414 case 0x28:
11415 case 0x29:
11416 case 0x2a:
11417 {
11418 /* crc32/crc32c */
11419 uint32_t sz = op & 0x3;
11420 uint32_t c = op & 0x8;
11421
eb0ecd5a 11422 tmp2 = load_reg(s, rm);
aa633469
PM
11423 if (sz == 0) {
11424 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
11425 } else if (sz == 1) {
11426 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
11427 }
eb0ecd5a
WN
11428 tmp3 = tcg_const_i32(1 << sz);
11429 if (c) {
11430 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
11431 } else {
11432 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
11433 }
11434 tcg_temp_free_i32(tmp2);
11435 tcg_temp_free_i32(tmp3);
11436 break;
11437 }
9ee6e8bb 11438 default:
62b44f05 11439 g_assert_not_reached();
9ee6e8bb
PB
11440 }
11441 }
d9ba4830 11442 store_reg(s, rd, tmp);
9ee6e8bb
PB
11443 break;
11444 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
11445 switch ((insn >> 20) & 7) {
11446 case 0: /* 32 x 32 -> 32 */
11447 case 7: /* Unsigned sum of absolute differences. */
11448 break;
11449 case 1: /* 16 x 16 -> 32 */
11450 case 2: /* Dual multiply add. */
11451 case 3: /* 32 * 16 -> 32msb */
11452 case 4: /* Dual multiply subtract. */
11453 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
11454 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11455 goto illegal_op;
11456 }
11457 break;
11458 }
9ee6e8bb 11459 op = (insn >> 4) & 0xf;
d9ba4830
PB
11460 tmp = load_reg(s, rn);
11461 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
11462 switch ((insn >> 20) & 7) {
11463 case 0: /* 32 x 32 -> 32 */
d9ba4830 11464 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 11465 tcg_temp_free_i32(tmp2);
9ee6e8bb 11466 if (rs != 15) {
d9ba4830 11467 tmp2 = load_reg(s, rs);
9ee6e8bb 11468 if (op)
d9ba4830 11469 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 11470 else
d9ba4830 11471 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11472 tcg_temp_free_i32(tmp2);
9ee6e8bb 11473 }
9ee6e8bb
PB
11474 break;
11475 case 1: /* 16 x 16 -> 32 */
d9ba4830 11476 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 11477 tcg_temp_free_i32(tmp2);
9ee6e8bb 11478 if (rs != 15) {
d9ba4830 11479 tmp2 = load_reg(s, rs);
9ef39277 11480 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 11481 tcg_temp_free_i32(tmp2);
9ee6e8bb 11482 }
9ee6e8bb
PB
11483 break;
11484 case 2: /* Dual multiply add. */
11485 case 4: /* Dual multiply subtract. */
11486 if (op)
d9ba4830
PB
11487 gen_swap_half(tmp2);
11488 gen_smul_dual(tmp, tmp2);
9ee6e8bb 11489 if (insn & (1 << 22)) {
e1d177b9 11490 /* This subtraction cannot overflow. */
d9ba4830 11491 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 11492 } else {
e1d177b9
PM
11493 /* This addition cannot overflow 32 bits;
11494 * however it may overflow considered as a signed
11495 * operation, in which case we must set the Q flag.
11496 */
9ef39277 11497 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 11498 }
7d1b0095 11499 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
11500 if (rs != 15)
11501 {
d9ba4830 11502 tmp2 = load_reg(s, rs);
9ef39277 11503 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 11504 tcg_temp_free_i32(tmp2);
9ee6e8bb 11505 }
9ee6e8bb
PB
11506 break;
11507 case 3: /* 32 * 16 -> 32msb */
11508 if (op)
d9ba4830 11509 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 11510 else
d9ba4830 11511 gen_sxth(tmp2);
a7812ae4
PB
11512 tmp64 = gen_muls_i64_i32(tmp, tmp2);
11513 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 11514 tmp = tcg_temp_new_i32();
ecc7b3aa 11515 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 11516 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
11517 if (rs != 15)
11518 {
d9ba4830 11519 tmp2 = load_reg(s, rs);
9ef39277 11520 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 11521 tcg_temp_free_i32(tmp2);
9ee6e8bb 11522 }
9ee6e8bb 11523 break;
838fa72d
AJ
11524 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
11525 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 11526 if (rs != 15) {
838fa72d
AJ
11527 tmp = load_reg(s, rs);
11528 if (insn & (1 << 20)) {
11529 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 11530 } else {
838fa72d 11531 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 11532 }
2c0262af 11533 }
838fa72d
AJ
11534 if (insn & (1 << 4)) {
11535 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
11536 }
11537 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 11538 tmp = tcg_temp_new_i32();
ecc7b3aa 11539 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 11540 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
11541 break;
11542 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 11543 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 11544 tcg_temp_free_i32(tmp2);
9ee6e8bb 11545 if (rs != 15) {
d9ba4830
PB
11546 tmp2 = load_reg(s, rs);
11547 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11548 tcg_temp_free_i32(tmp2);
5fd46862 11549 }
9ee6e8bb 11550 break;
2c0262af 11551 }
d9ba4830 11552 store_reg(s, rd, tmp);
2c0262af 11553 break;
9ee6e8bb
PB
11554 case 6: case 7: /* 64-bit multiply, Divide. */
11555 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
11556 tmp = load_reg(s, rn);
11557 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
11558 if ((op & 0x50) == 0x10) {
11559 /* sdiv, udiv */
7e0cf8b4 11560 if (!dc_isar_feature(thumb_div, s)) {
9ee6e8bb 11561 goto illegal_op;
47789990 11562 }
9ee6e8bb 11563 if (op & 0x20)
5e3f878a 11564 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 11565 else
5e3f878a 11566 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 11567 tcg_temp_free_i32(tmp2);
5e3f878a 11568 store_reg(s, rd, tmp);
9ee6e8bb
PB
11569 } else if ((op & 0xe) == 0xc) {
11570 /* Dual multiply accumulate long. */
62b44f05
AR
11571 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11572 tcg_temp_free_i32(tmp);
11573 tcg_temp_free_i32(tmp2);
11574 goto illegal_op;
11575 }
9ee6e8bb 11576 if (op & 1)
5e3f878a
PB
11577 gen_swap_half(tmp2);
11578 gen_smul_dual(tmp, tmp2);
9ee6e8bb 11579 if (op & 0x10) {
5e3f878a 11580 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 11581 } else {
5e3f878a 11582 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 11583 }
7d1b0095 11584 tcg_temp_free_i32(tmp2);
a7812ae4
PB
11585 /* BUGFIX */
11586 tmp64 = tcg_temp_new_i64();
11587 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 11588 tcg_temp_free_i32(tmp);
a7812ae4
PB
11589 gen_addq(s, tmp64, rs, rd);
11590 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 11591 tcg_temp_free_i64(tmp64);
2c0262af 11592 } else {
9ee6e8bb
PB
11593 if (op & 0x20) {
11594 /* Unsigned 64-bit multiply */
a7812ae4 11595 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 11596 } else {
9ee6e8bb
PB
11597 if (op & 8) {
11598 /* smlalxy */
62b44f05
AR
11599 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11600 tcg_temp_free_i32(tmp2);
11601 tcg_temp_free_i32(tmp);
11602 goto illegal_op;
11603 }
5e3f878a 11604 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 11605 tcg_temp_free_i32(tmp2);
a7812ae4
PB
11606 tmp64 = tcg_temp_new_i64();
11607 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 11608 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11609 } else {
11610 /* Signed 64-bit multiply */
a7812ae4 11611 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 11612 }
b5ff1b31 11613 }
9ee6e8bb
PB
11614 if (op & 4) {
11615 /* umaal */
62b44f05
AR
11616 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11617 tcg_temp_free_i64(tmp64);
11618 goto illegal_op;
11619 }
a7812ae4
PB
11620 gen_addq_lo(s, tmp64, rs);
11621 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
11622 } else if (op & 0x40) {
11623 /* 64-bit accumulate. */
a7812ae4 11624 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 11625 }
a7812ae4 11626 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 11627 tcg_temp_free_i64(tmp64);
5fd46862 11628 }
2c0262af 11629 break;
9ee6e8bb
PB
11630 }
11631 break;
11632 case 6: case 7: case 14: case 15:
11633 /* Coprocessor. */
7517748e
PM
11634 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11635 /* We don't currently implement M profile FP support,
b1e5336a
PM
11636 * so this entire space should give a NOCP fault, with
11637 * the exception of the v8M VLLDM and VLSTM insns, which
11638 * must be NOPs in Secure state and UNDEF in Nonsecure state.
7517748e 11639 */
b1e5336a
PM
11640 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
11641 (insn & 0xffa00f00) == 0xec200a00) {
11642 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
11643 * - VLLDM, VLSTM
11644 * We choose to UNDEF if the RAZ bits are non-zero.
11645 */
11646 if (!s->v8m_secure || (insn & 0x0040f0ff)) {
11647 goto illegal_op;
11648 }
11649 /* Just NOP since FP support is not implemented */
11650 break;
11651 }
11652 /* All other insns: NOCP */
7517748e
PM
11653 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
11654 default_exception_el(s));
11655 break;
11656 }
0052087e
RH
11657 if ((insn & 0xfe000a00) == 0xfc000800
11658 && arm_dc_feature(s, ARM_FEATURE_V8)) {
11659 /* The Thumb2 and ARM encodings are identical. */
11660 if (disas_neon_insn_3same_ext(s, insn)) {
11661 goto illegal_op;
11662 }
11663 } else if ((insn & 0xff000a00) == 0xfe000800
11664 && arm_dc_feature(s, ARM_FEATURE_V8)) {
11665 /* The Thumb2 and ARM encodings are identical. */
11666 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
11667 goto illegal_op;
11668 }
11669 } else if (((insn >> 24) & 3) == 3) {
9ee6e8bb 11670 /* Translate into the equivalent ARM encoding. */
f06053e3 11671 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 11672 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 11673 goto illegal_op;
7dcc1f89 11674 }
6a57f3eb 11675 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 11676 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
11677 goto illegal_op;
11678 }
9ee6e8bb
PB
11679 } else {
11680 if (insn & (1 << 28))
11681 goto illegal_op;
7dcc1f89 11682 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 11683 goto illegal_op;
7dcc1f89 11684 }
9ee6e8bb
PB
11685 }
11686 break;
11687 case 8: case 9: case 10: case 11:
11688 if (insn & (1 << 15)) {
11689 /* Branches, misc control. */
11690 if (insn & 0x5000) {
11691 /* Unconditional branch. */
11692 /* signextend(hw1[10:0]) -> offset[:12]. */
11693 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
11694 /* hw1[10:0] -> offset[11:1]. */
11695 offset |= (insn & 0x7ff) << 1;
11696 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
11697 offset[24:22] already have the same value because of the
11698 sign extension above. */
11699 offset ^= ((~insn) & (1 << 13)) << 10;
11700 offset ^= ((~insn) & (1 << 11)) << 11;
11701
9ee6e8bb
PB
11702 if (insn & (1 << 14)) {
11703 /* Branch and link. */
3174f8e9 11704 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 11705 }
3b46e624 11706
b0109805 11707 offset += s->pc;
9ee6e8bb
PB
11708 if (insn & (1 << 12)) {
11709 /* b/bl */
b0109805 11710 gen_jmp(s, offset);
9ee6e8bb
PB
11711 } else {
11712 /* blx */
b0109805 11713 offset &= ~(uint32_t)2;
be5e7a76 11714 /* thumb2 bx, no need to check */
b0109805 11715 gen_bx_im(s, offset);
2c0262af 11716 }
9ee6e8bb
PB
11717 } else if (((insn >> 23) & 7) == 7) {
11718 /* Misc control */
11719 if (insn & (1 << 13))
11720 goto illegal_op;
11721
11722 if (insn & (1 << 26)) {
001b3cab
PM
11723 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11724 goto illegal_op;
11725 }
37e6456e
PM
11726 if (!(insn & (1 << 20))) {
11727 /* Hypervisor call (v7) */
11728 int imm16 = extract32(insn, 16, 4) << 12
11729 | extract32(insn, 0, 12);
11730 ARCH(7);
11731 if (IS_USER(s)) {
11732 goto illegal_op;
11733 }
11734 gen_hvc(s, imm16);
11735 } else {
11736 /* Secure monitor call (v6+) */
11737 ARCH(6K);
11738 if (IS_USER(s)) {
11739 goto illegal_op;
11740 }
11741 gen_smc(s);
11742 }
2c0262af 11743 } else {
9ee6e8bb
PB
11744 op = (insn >> 20) & 7;
11745 switch (op) {
11746 case 0: /* msr cpsr. */
b53d8923 11747 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e 11748 tmp = load_reg(s, rn);
b28b3377
PM
11749 /* the constant is the mask and SYSm fields */
11750 addr = tcg_const_i32(insn & 0xfff);
8984bd2e 11751 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 11752 tcg_temp_free_i32(addr);
7d1b0095 11753 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11754 gen_lookup_tb(s);
11755 break;
11756 }
11757 /* fall through */
11758 case 1: /* msr spsr. */
b53d8923 11759 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 11760 goto illegal_op;
b53d8923 11761 }
8bfd0550
PM
11762
11763 if (extract32(insn, 5, 1)) {
11764 /* MSR (banked) */
11765 int sysm = extract32(insn, 8, 4) |
11766 (extract32(insn, 4, 1) << 4);
11767 int r = op & 1;
11768
11769 gen_msr_banked(s, r, sysm, rm);
11770 break;
11771 }
11772
11773 /* MSR (for PSRs) */
2fbac54b
FN
11774 tmp = load_reg(s, rn);
11775 if (gen_set_psr(s,
7dcc1f89 11776 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 11777 op == 1, tmp))
9ee6e8bb
PB
11778 goto illegal_op;
11779 break;
11780 case 2: /* cps, nop-hint. */
11781 if (((insn >> 8) & 7) == 0) {
11782 gen_nop_hint(s, insn & 0xff);
11783 }
11784 /* Implemented as NOP in user mode. */
11785 if (IS_USER(s))
11786 break;
11787 offset = 0;
11788 imm = 0;
11789 if (insn & (1 << 10)) {
11790 if (insn & (1 << 7))
11791 offset |= CPSR_A;
11792 if (insn & (1 << 6))
11793 offset |= CPSR_I;
11794 if (insn & (1 << 5))
11795 offset |= CPSR_F;
11796 if (insn & (1 << 9))
11797 imm = CPSR_A | CPSR_I | CPSR_F;
11798 }
11799 if (insn & (1 << 8)) {
11800 offset |= 0x1f;
11801 imm |= (insn & 0x1f);
11802 }
11803 if (offset) {
2fbac54b 11804 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
11805 }
11806 break;
11807 case 3: /* Special control operations. */
14120108 11808 if (!arm_dc_feature(s, ARM_FEATURE_V7) &&
8297cb13 11809 !arm_dc_feature(s, ARM_FEATURE_M)) {
14120108
JS
11810 goto illegal_op;
11811 }
9ee6e8bb
PB
11812 op = (insn >> 4) & 0xf;
11813 switch (op) {
11814 case 2: /* clrex */
426f5abc 11815 gen_clrex(s);
9ee6e8bb
PB
11816 break;
11817 case 4: /* dsb */
11818 case 5: /* dmb */
61e4c432 11819 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 11820 break;
6df99dec
SS
11821 case 6: /* isb */
11822 /* We need to break the TB after this insn
11823 * to execute self-modifying code correctly
11824 * and also to take any pending interrupts
11825 * immediately.
11826 */
0b609cc1 11827 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 11828 break;
9ee6e8bb
PB
11829 default:
11830 goto illegal_op;
11831 }
11832 break;
11833 case 4: /* bxj */
9d7c59c8
PM
11834 /* Trivial implementation equivalent to bx.
11835 * This instruction doesn't exist at all for M-profile.
11836 */
11837 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11838 goto illegal_op;
11839 }
d9ba4830
PB
11840 tmp = load_reg(s, rn);
11841 gen_bx(s, tmp);
9ee6e8bb
PB
11842 break;
11843 case 5: /* Exception return. */
b8b45b68
RV
11844 if (IS_USER(s)) {
11845 goto illegal_op;
11846 }
11847 if (rn != 14 || rd != 15) {
11848 goto illegal_op;
11849 }
55c544ed
PM
11850 if (s->current_el == 2) {
11851 /* ERET from Hyp uses ELR_Hyp, not LR */
11852 if (insn & 0xff) {
11853 goto illegal_op;
11854 }
11855 tmp = load_cpu_field(elr_el[2]);
11856 } else {
11857 tmp = load_reg(s, rn);
11858 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
11859 }
b8b45b68
RV
11860 gen_exception_return(s, tmp);
11861 break;
8bfd0550 11862 case 6: /* MRS */
43ac6574
PM
11863 if (extract32(insn, 5, 1) &&
11864 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
11865 /* MRS (banked) */
11866 int sysm = extract32(insn, 16, 4) |
11867 (extract32(insn, 4, 1) << 4);
11868
11869 gen_mrs_banked(s, 0, sysm, rd);
11870 break;
11871 }
11872
3d54026f
PM
11873 if (extract32(insn, 16, 4) != 0xf) {
11874 goto illegal_op;
11875 }
11876 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
11877 extract32(insn, 0, 8) != 0) {
11878 goto illegal_op;
11879 }
11880
8bfd0550 11881 /* mrs cpsr */
7d1b0095 11882 tmp = tcg_temp_new_i32();
b53d8923 11883 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
11884 addr = tcg_const_i32(insn & 0xff);
11885 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 11886 tcg_temp_free_i32(addr);
9ee6e8bb 11887 } else {
9ef39277 11888 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 11889 }
8984bd2e 11890 store_reg(s, rd, tmp);
9ee6e8bb 11891 break;
8bfd0550 11892 case 7: /* MRS */
43ac6574
PM
11893 if (extract32(insn, 5, 1) &&
11894 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
11895 /* MRS (banked) */
11896 int sysm = extract32(insn, 16, 4) |
11897 (extract32(insn, 4, 1) << 4);
11898
11899 gen_mrs_banked(s, 1, sysm, rd);
11900 break;
11901 }
11902
11903 /* mrs spsr. */
9ee6e8bb 11904 /* Not accessible in user mode. */
b53d8923 11905 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 11906 goto illegal_op;
b53d8923 11907 }
3d54026f
PM
11908
11909 if (extract32(insn, 16, 4) != 0xf ||
11910 extract32(insn, 0, 8) != 0) {
11911 goto illegal_op;
11912 }
11913
d9ba4830
PB
11914 tmp = load_cpu_field(spsr);
11915 store_reg(s, rd, tmp);
9ee6e8bb 11916 break;
2c0262af
FB
11917 }
11918 }
9ee6e8bb
PB
11919 } else {
11920 /* Conditional branch. */
11921 op = (insn >> 22) & 0xf;
11922 /* Generate a conditional jump to next instruction. */
c2d9644e 11923 arm_skip_unless(s, op);
9ee6e8bb
PB
11924
11925 /* offset[11:1] = insn[10:0] */
11926 offset = (insn & 0x7ff) << 1;
11927 /* offset[17:12] = insn[21:16]. */
11928 offset |= (insn & 0x003f0000) >> 4;
11929 /* offset[31:20] = insn[26]. */
11930 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
11931 /* offset[18] = insn[13]. */
11932 offset |= (insn & (1 << 13)) << 5;
11933 /* offset[19] = insn[11]. */
11934 offset |= (insn & (1 << 11)) << 8;
11935
11936 /* jump to the offset */
b0109805 11937 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
11938 }
11939 } else {
55203189
PM
11940 /*
11941 * 0b1111_0xxx_xxxx_0xxx_xxxx_xxxx
11942 * - Data-processing (modified immediate, plain binary immediate)
11943 */
9ee6e8bb 11944 if (insn & (1 << 25)) {
55203189
PM
11945 /*
11946 * 0b1111_0x1x_xxxx_0xxx_xxxx_xxxx
11947 * - Data-processing (plain binary immediate)
11948 */
9ee6e8bb
PB
11949 if (insn & (1 << 24)) {
11950 if (insn & (1 << 20))
11951 goto illegal_op;
11952 /* Bitfield/Saturate. */
11953 op = (insn >> 21) & 7;
11954 imm = insn & 0x1f;
11955 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 11956 if (rn == 15) {
7d1b0095 11957 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
11958 tcg_gen_movi_i32(tmp, 0);
11959 } else {
11960 tmp = load_reg(s, rn);
11961 }
9ee6e8bb
PB
11962 switch (op) {
11963 case 2: /* Signed bitfield extract. */
11964 imm++;
11965 if (shift + imm > 32)
11966 goto illegal_op;
59a71b4c
RH
11967 if (imm < 32) {
11968 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
11969 }
9ee6e8bb
PB
11970 break;
11971 case 6: /* Unsigned bitfield extract. */
11972 imm++;
11973 if (shift + imm > 32)
11974 goto illegal_op;
59a71b4c
RH
11975 if (imm < 32) {
11976 tcg_gen_extract_i32(tmp, tmp, shift, imm);
11977 }
9ee6e8bb
PB
11978 break;
11979 case 3: /* Bitfield insert/clear. */
11980 if (imm < shift)
11981 goto illegal_op;
11982 imm = imm + 1 - shift;
11983 if (imm != 32) {
6ddbc6e4 11984 tmp2 = load_reg(s, rd);
d593c48e 11985 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 11986 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
11987 }
11988 break;
11989 case 7:
11990 goto illegal_op;
11991 default: /* Saturate. */
9ee6e8bb
PB
11992 if (shift) {
11993 if (op & 1)
6ddbc6e4 11994 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 11995 else
6ddbc6e4 11996 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 11997 }
6ddbc6e4 11998 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
11999 if (op & 4) {
12000 /* Unsigned. */
62b44f05
AR
12001 if ((op & 1) && shift == 0) {
12002 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
12003 tcg_temp_free_i32(tmp);
12004 tcg_temp_free_i32(tmp2);
12005 goto illegal_op;
12006 }
9ef39277 12007 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 12008 } else {
9ef39277 12009 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 12010 }
2c0262af 12011 } else {
9ee6e8bb 12012 /* Signed. */
62b44f05
AR
12013 if ((op & 1) && shift == 0) {
12014 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
12015 tcg_temp_free_i32(tmp);
12016 tcg_temp_free_i32(tmp2);
12017 goto illegal_op;
12018 }
9ef39277 12019 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 12020 } else {
9ef39277 12021 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 12022 }
2c0262af 12023 }
b75263d6 12024 tcg_temp_free_i32(tmp2);
9ee6e8bb 12025 break;
2c0262af 12026 }
6ddbc6e4 12027 store_reg(s, rd, tmp);
9ee6e8bb
PB
12028 } else {
12029 imm = ((insn & 0x04000000) >> 15)
12030 | ((insn & 0x7000) >> 4) | (insn & 0xff);
12031 if (insn & (1 << 22)) {
12032 /* 16-bit immediate. */
12033 imm |= (insn >> 4) & 0xf000;
12034 if (insn & (1 << 23)) {
12035 /* movt */
5e3f878a 12036 tmp = load_reg(s, rd);
86831435 12037 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 12038 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 12039 } else {
9ee6e8bb 12040 /* movw */
7d1b0095 12041 tmp = tcg_temp_new_i32();
5e3f878a 12042 tcg_gen_movi_i32(tmp, imm);
2c0262af 12043 }
55203189 12044 store_reg(s, rd, tmp);
2c0262af 12045 } else {
9ee6e8bb
PB
12046 /* Add/sub 12-bit immediate. */
12047 if (rn == 15) {
b0109805 12048 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 12049 if (insn & (1 << 23))
b0109805 12050 offset -= imm;
9ee6e8bb 12051 else
b0109805 12052 offset += imm;
7d1b0095 12053 tmp = tcg_temp_new_i32();
5e3f878a 12054 tcg_gen_movi_i32(tmp, offset);
55203189 12055 store_reg(s, rd, tmp);
2c0262af 12056 } else {
5e3f878a 12057 tmp = load_reg(s, rn);
9ee6e8bb 12058 if (insn & (1 << 23))
5e3f878a 12059 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 12060 else
5e3f878a 12061 tcg_gen_addi_i32(tmp, tmp, imm);
55203189
PM
12062 if (rn == 13 && rd == 13) {
12063 /* ADD SP, SP, imm or SUB SP, SP, imm */
12064 store_sp_checked(s, tmp);
12065 } else {
12066 store_reg(s, rd, tmp);
12067 }
2c0262af 12068 }
9ee6e8bb 12069 }
191abaa2 12070 }
9ee6e8bb 12071 } else {
55203189
PM
12072 /*
12073 * 0b1111_0x0x_xxxx_0xxx_xxxx_xxxx
12074 * - Data-processing (modified immediate)
12075 */
9ee6e8bb
PB
12076 int shifter_out = 0;
12077 /* modified 12-bit immediate. */
12078 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
12079 imm = (insn & 0xff);
12080 switch (shift) {
12081 case 0: /* XY */
12082 /* Nothing to do. */
12083 break;
12084 case 1: /* 00XY00XY */
12085 imm |= imm << 16;
12086 break;
12087 case 2: /* XY00XY00 */
12088 imm |= imm << 16;
12089 imm <<= 8;
12090 break;
12091 case 3: /* XYXYXYXY */
12092 imm |= imm << 16;
12093 imm |= imm << 8;
12094 break;
12095 default: /* Rotated constant. */
12096 shift = (shift << 1) | (imm >> 7);
12097 imm |= 0x80;
12098 imm = imm << (32 - shift);
12099 shifter_out = 1;
12100 break;
b5ff1b31 12101 }
7d1b0095 12102 tmp2 = tcg_temp_new_i32();
3174f8e9 12103 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 12104 rn = (insn >> 16) & 0xf;
3174f8e9 12105 if (rn == 15) {
7d1b0095 12106 tmp = tcg_temp_new_i32();
3174f8e9
FN
12107 tcg_gen_movi_i32(tmp, 0);
12108 } else {
12109 tmp = load_reg(s, rn);
12110 }
9ee6e8bb
PB
12111 op = (insn >> 21) & 0xf;
12112 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 12113 shifter_out, tmp, tmp2))
9ee6e8bb 12114 goto illegal_op;
7d1b0095 12115 tcg_temp_free_i32(tmp2);
9ee6e8bb 12116 rd = (insn >> 8) & 0xf;
55203189
PM
12117 if (rd == 13 && rn == 13
12118 && (op == 8 || op == 13)) {
12119 /* ADD(S) SP, SP, imm or SUB(S) SP, SP, imm */
12120 store_sp_checked(s, tmp);
12121 } else if (rd != 15) {
3174f8e9
FN
12122 store_reg(s, rd, tmp);
12123 } else {
7d1b0095 12124 tcg_temp_free_i32(tmp);
2c0262af 12125 }
2c0262af 12126 }
9ee6e8bb
PB
12127 }
12128 break;
12129 case 12: /* Load/store single data item. */
12130 {
12131 int postinc = 0;
12132 int writeback = 0;
a99caa48 12133 int memidx;
9bb6558a
PM
12134 ISSInfo issinfo;
12135
9ee6e8bb 12136 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 12137 if (disas_neon_ls_insn(s, insn)) {
c1713132 12138 goto illegal_op;
7dcc1f89 12139 }
9ee6e8bb
PB
12140 break;
12141 }
a2fdc890
PM
12142 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
12143 if (rs == 15) {
12144 if (!(insn & (1 << 20))) {
12145 goto illegal_op;
12146 }
12147 if (op != 2) {
12148 /* Byte or halfword load space with dest == r15 : memory hints.
12149 * Catch them early so we don't emit pointless addressing code.
12150 * This space is a mix of:
12151 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
12152 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
12153 * cores)
12154 * unallocated hints, which must be treated as NOPs
12155 * UNPREDICTABLE space, which we NOP or UNDEF depending on
12156 * which is easiest for the decoding logic
12157 * Some space which must UNDEF
12158 */
12159 int op1 = (insn >> 23) & 3;
12160 int op2 = (insn >> 6) & 0x3f;
12161 if (op & 2) {
12162 goto illegal_op;
12163 }
12164 if (rn == 15) {
02afbf64
PM
12165 /* UNPREDICTABLE, unallocated hint or
12166 * PLD/PLDW/PLI (literal)
12167 */
2eea841c 12168 return;
a2fdc890
PM
12169 }
12170 if (op1 & 1) {
2eea841c 12171 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
12172 }
12173 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
2eea841c 12174 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
12175 }
12176 /* UNDEF space, or an UNPREDICTABLE */
2eea841c 12177 goto illegal_op;
a2fdc890
PM
12178 }
12179 }
a99caa48 12180 memidx = get_mem_index(s);
9ee6e8bb 12181 if (rn == 15) {
7d1b0095 12182 addr = tcg_temp_new_i32();
9ee6e8bb
PB
12183 /* PC relative. */
12184 /* s->pc has already been incremented by 4. */
12185 imm = s->pc & 0xfffffffc;
12186 if (insn & (1 << 23))
12187 imm += insn & 0xfff;
12188 else
12189 imm -= insn & 0xfff;
b0109805 12190 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 12191 } else {
b0109805 12192 addr = load_reg(s, rn);
9ee6e8bb
PB
12193 if (insn & (1 << 23)) {
12194 /* Positive offset. */
12195 imm = insn & 0xfff;
b0109805 12196 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 12197 } else {
9ee6e8bb 12198 imm = insn & 0xff;
2a0308c5
PM
12199 switch ((insn >> 8) & 0xf) {
12200 case 0x0: /* Shifted Register. */
9ee6e8bb 12201 shift = (insn >> 4) & 0xf;
2a0308c5
PM
12202 if (shift > 3) {
12203 tcg_temp_free_i32(addr);
18c9b560 12204 goto illegal_op;
2a0308c5 12205 }
b26eefb6 12206 tmp = load_reg(s, rm);
9ee6e8bb 12207 if (shift)
b26eefb6 12208 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 12209 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 12210 tcg_temp_free_i32(tmp);
9ee6e8bb 12211 break;
2a0308c5 12212 case 0xc: /* Negative offset. */
b0109805 12213 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 12214 break;
2a0308c5 12215 case 0xe: /* User privilege. */
b0109805 12216 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 12217 memidx = get_a32_user_mem_index(s);
9ee6e8bb 12218 break;
2a0308c5 12219 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
12220 imm = -imm;
12221 /* Fall through. */
2a0308c5 12222 case 0xb: /* Post-increment. */
9ee6e8bb
PB
12223 postinc = 1;
12224 writeback = 1;
12225 break;
2a0308c5 12226 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
12227 imm = -imm;
12228 /* Fall through. */
2a0308c5 12229 case 0xf: /* Pre-increment. */
9ee6e8bb
PB
12230 writeback = 1;
12231 break;
12232 default:
2a0308c5 12233 tcg_temp_free_i32(addr);
b7bcbe95 12234 goto illegal_op;
9ee6e8bb
PB
12235 }
12236 }
12237 }
9bb6558a
PM
12238
12239 issinfo = writeback ? ISSInvalid : rs;
12240
0bc003ba
PM
12241 if (s->v8m_stackcheck && rn == 13 && writeback) {
12242 /*
12243 * Stackcheck. Here we know 'addr' is the current SP;
12244 * if imm is +ve we're moving SP up, else down. It is
12245 * UNKNOWN whether the limit check triggers when SP starts
12246 * below the limit and ends up above it; we chose to do so.
12247 */
12248 if ((int32_t)imm < 0) {
12249 TCGv_i32 newsp = tcg_temp_new_i32();
12250
12251 tcg_gen_addi_i32(newsp, addr, imm);
12252 gen_helper_v8m_stackcheck(cpu_env, newsp);
12253 tcg_temp_free_i32(newsp);
12254 } else {
12255 gen_helper_v8m_stackcheck(cpu_env, addr);
12256 }
12257 }
12258
12259 if (writeback && !postinc) {
12260 tcg_gen_addi_i32(addr, addr, imm);
12261 }
12262
9ee6e8bb
PB
12263 if (insn & (1 << 20)) {
12264 /* Load. */
5a839c0d 12265 tmp = tcg_temp_new_i32();
a2fdc890 12266 switch (op) {
5a839c0d 12267 case 0:
9bb6558a 12268 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12269 break;
12270 case 4:
9bb6558a 12271 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12272 break;
12273 case 1:
9bb6558a 12274 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12275 break;
12276 case 5:
9bb6558a 12277 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12278 break;
12279 case 2:
9bb6558a 12280 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 12281 break;
2a0308c5 12282 default:
5a839c0d 12283 tcg_temp_free_i32(tmp);
2a0308c5
PM
12284 tcg_temp_free_i32(addr);
12285 goto illegal_op;
a2fdc890
PM
12286 }
12287 if (rs == 15) {
3bb8a96f 12288 gen_bx_excret(s, tmp);
9ee6e8bb 12289 } else {
a2fdc890 12290 store_reg(s, rs, tmp);
9ee6e8bb
PB
12291 }
12292 } else {
12293 /* Store. */
b0109805 12294 tmp = load_reg(s, rs);
9ee6e8bb 12295 switch (op) {
5a839c0d 12296 case 0:
9bb6558a 12297 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12298 break;
12299 case 1:
9bb6558a 12300 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12301 break;
12302 case 2:
9bb6558a 12303 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 12304 break;
2a0308c5 12305 default:
5a839c0d 12306 tcg_temp_free_i32(tmp);
2a0308c5
PM
12307 tcg_temp_free_i32(addr);
12308 goto illegal_op;
b7bcbe95 12309 }
5a839c0d 12310 tcg_temp_free_i32(tmp);
2c0262af 12311 }
9ee6e8bb 12312 if (postinc)
b0109805
PB
12313 tcg_gen_addi_i32(addr, addr, imm);
12314 if (writeback) {
12315 store_reg(s, rn, addr);
12316 } else {
7d1b0095 12317 tcg_temp_free_i32(addr);
b0109805 12318 }
9ee6e8bb
PB
12319 }
12320 break;
12321 default:
12322 goto illegal_op;
2c0262af 12323 }
2eea841c 12324 return;
9ee6e8bb 12325illegal_op:
2eea841c
PM
12326 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
12327 default_exception_el(s));
2c0262af
FB
12328}
12329
296e5a0a 12330static void disas_thumb_insn(DisasContext *s, uint32_t insn)
99c475ab 12331{
296e5a0a 12332 uint32_t val, op, rm, rn, rd, shift, cond;
99c475ab
FB
12333 int32_t offset;
12334 int i;
39d5492a
PM
12335 TCGv_i32 tmp;
12336 TCGv_i32 tmp2;
12337 TCGv_i32 addr;
99c475ab 12338
99c475ab
FB
12339 switch (insn >> 12) {
12340 case 0: case 1:
396e467c 12341
99c475ab
FB
12342 rd = insn & 7;
12343 op = (insn >> 11) & 3;
12344 if (op == 3) {
a2d12f0f
PM
12345 /*
12346 * 0b0001_1xxx_xxxx_xxxx
12347 * - Add, subtract (three low registers)
12348 * - Add, subtract (two low registers and immediate)
12349 */
99c475ab 12350 rn = (insn >> 3) & 7;
396e467c 12351 tmp = load_reg(s, rn);
99c475ab
FB
12352 if (insn & (1 << 10)) {
12353 /* immediate */
7d1b0095 12354 tmp2 = tcg_temp_new_i32();
396e467c 12355 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
12356 } else {
12357 /* reg */
12358 rm = (insn >> 6) & 7;
396e467c 12359 tmp2 = load_reg(s, rm);
99c475ab 12360 }
9ee6e8bb
PB
12361 if (insn & (1 << 9)) {
12362 if (s->condexec_mask)
396e467c 12363 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 12364 else
72485ec4 12365 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
12366 } else {
12367 if (s->condexec_mask)
396e467c 12368 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 12369 else
72485ec4 12370 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 12371 }
7d1b0095 12372 tcg_temp_free_i32(tmp2);
396e467c 12373 store_reg(s, rd, tmp);
99c475ab
FB
12374 } else {
12375 /* shift immediate */
12376 rm = (insn >> 3) & 7;
12377 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
12378 tmp = load_reg(s, rm);
12379 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
12380 if (!s->condexec_mask)
12381 gen_logic_CC(tmp);
12382 store_reg(s, rd, tmp);
99c475ab
FB
12383 }
12384 break;
12385 case 2: case 3:
a2d12f0f
PM
12386 /*
12387 * 0b001x_xxxx_xxxx_xxxx
12388 * - Add, subtract, compare, move (one low register and immediate)
12389 */
99c475ab
FB
12390 op = (insn >> 11) & 3;
12391 rd = (insn >> 8) & 0x7;
396e467c 12392 if (op == 0) { /* mov */
7d1b0095 12393 tmp = tcg_temp_new_i32();
396e467c 12394 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 12395 if (!s->condexec_mask)
396e467c
FN
12396 gen_logic_CC(tmp);
12397 store_reg(s, rd, tmp);
12398 } else {
12399 tmp = load_reg(s, rd);
7d1b0095 12400 tmp2 = tcg_temp_new_i32();
396e467c
FN
12401 tcg_gen_movi_i32(tmp2, insn & 0xff);
12402 switch (op) {
12403 case 1: /* cmp */
72485ec4 12404 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
12405 tcg_temp_free_i32(tmp);
12406 tcg_temp_free_i32(tmp2);
396e467c
FN
12407 break;
12408 case 2: /* add */
12409 if (s->condexec_mask)
12410 tcg_gen_add_i32(tmp, tmp, tmp2);
12411 else
72485ec4 12412 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 12413 tcg_temp_free_i32(tmp2);
396e467c
FN
12414 store_reg(s, rd, tmp);
12415 break;
12416 case 3: /* sub */
12417 if (s->condexec_mask)
12418 tcg_gen_sub_i32(tmp, tmp, tmp2);
12419 else
72485ec4 12420 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 12421 tcg_temp_free_i32(tmp2);
396e467c
FN
12422 store_reg(s, rd, tmp);
12423 break;
12424 }
99c475ab 12425 }
99c475ab
FB
12426 break;
12427 case 4:
12428 if (insn & (1 << 11)) {
12429 rd = (insn >> 8) & 7;
5899f386
FB
12430 /* load pc-relative. Bit 1 of PC is ignored. */
12431 val = s->pc + 2 + ((insn & 0xff) * 4);
12432 val &= ~(uint32_t)2;
7d1b0095 12433 addr = tcg_temp_new_i32();
b0109805 12434 tcg_gen_movi_i32(addr, val);
c40c8556 12435 tmp = tcg_temp_new_i32();
9bb6558a
PM
12436 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
12437 rd | ISSIs16Bit);
7d1b0095 12438 tcg_temp_free_i32(addr);
b0109805 12439 store_reg(s, rd, tmp);
99c475ab
FB
12440 break;
12441 }
12442 if (insn & (1 << 10)) {
ebfe27c5
PM
12443 /* 0b0100_01xx_xxxx_xxxx
12444 * - data processing extended, branch and exchange
12445 */
99c475ab
FB
12446 rd = (insn & 7) | ((insn >> 4) & 8);
12447 rm = (insn >> 3) & 0xf;
12448 op = (insn >> 8) & 3;
12449 switch (op) {
12450 case 0: /* add */
396e467c
FN
12451 tmp = load_reg(s, rd);
12452 tmp2 = load_reg(s, rm);
12453 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 12454 tcg_temp_free_i32(tmp2);
55203189
PM
12455 if (rd == 13) {
12456 /* ADD SP, SP, reg */
12457 store_sp_checked(s, tmp);
12458 } else {
12459 store_reg(s, rd, tmp);
12460 }
99c475ab
FB
12461 break;
12462 case 1: /* cmp */
396e467c
FN
12463 tmp = load_reg(s, rd);
12464 tmp2 = load_reg(s, rm);
72485ec4 12465 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
12466 tcg_temp_free_i32(tmp2);
12467 tcg_temp_free_i32(tmp);
99c475ab
FB
12468 break;
12469 case 2: /* mov/cpy */
396e467c 12470 tmp = load_reg(s, rm);
55203189
PM
12471 if (rd == 13) {
12472 /* MOV SP, reg */
12473 store_sp_checked(s, tmp);
12474 } else {
12475 store_reg(s, rd, tmp);
12476 }
99c475ab 12477 break;
ebfe27c5
PM
12478 case 3:
12479 {
12480 /* 0b0100_0111_xxxx_xxxx
12481 * - branch [and link] exchange thumb register
12482 */
12483 bool link = insn & (1 << 7);
12484
fb602cb7 12485 if (insn & 3) {
ebfe27c5
PM
12486 goto undef;
12487 }
12488 if (link) {
be5e7a76 12489 ARCH(5);
ebfe27c5 12490 }
fb602cb7
PM
12491 if ((insn & 4)) {
12492 /* BXNS/BLXNS: only exists for v8M with the
12493 * security extensions, and always UNDEF if NonSecure.
12494 * We don't implement these in the user-only mode
12495 * either (in theory you can use them from Secure User
12496 * mode but they are too tied in to system emulation.)
12497 */
12498 if (!s->v8m_secure || IS_USER_ONLY) {
12499 goto undef;
12500 }
12501 if (link) {
3e3fa230 12502 gen_blxns(s, rm);
fb602cb7
PM
12503 } else {
12504 gen_bxns(s, rm);
12505 }
12506 break;
12507 }
12508 /* BLX/BX */
ebfe27c5
PM
12509 tmp = load_reg(s, rm);
12510 if (link) {
99c475ab 12511 val = (uint32_t)s->pc | 1;
7d1b0095 12512 tmp2 = tcg_temp_new_i32();
b0109805
PB
12513 tcg_gen_movi_i32(tmp2, val);
12514 store_reg(s, 14, tmp2);
3bb8a96f
PM
12515 gen_bx(s, tmp);
12516 } else {
12517 /* Only BX works as exception-return, not BLX */
12518 gen_bx_excret(s, tmp);
99c475ab 12519 }
99c475ab
FB
12520 break;
12521 }
ebfe27c5 12522 }
99c475ab
FB
12523 break;
12524 }
12525
a2d12f0f
PM
12526 /*
12527 * 0b0100_00xx_xxxx_xxxx
12528 * - Data-processing (two low registers)
12529 */
99c475ab
FB
12530 rd = insn & 7;
12531 rm = (insn >> 3) & 7;
12532 op = (insn >> 6) & 0xf;
12533 if (op == 2 || op == 3 || op == 4 || op == 7) {
12534 /* the shift/rotate ops want the operands backwards */
12535 val = rm;
12536 rm = rd;
12537 rd = val;
12538 val = 1;
12539 } else {
12540 val = 0;
12541 }
12542
396e467c 12543 if (op == 9) { /* neg */
7d1b0095 12544 tmp = tcg_temp_new_i32();
396e467c
FN
12545 tcg_gen_movi_i32(tmp, 0);
12546 } else if (op != 0xf) { /* mvn doesn't read its first operand */
12547 tmp = load_reg(s, rd);
12548 } else {
f764718d 12549 tmp = NULL;
396e467c 12550 }
99c475ab 12551
396e467c 12552 tmp2 = load_reg(s, rm);
5899f386 12553 switch (op) {
99c475ab 12554 case 0x0: /* and */
396e467c 12555 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 12556 if (!s->condexec_mask)
396e467c 12557 gen_logic_CC(tmp);
99c475ab
FB
12558 break;
12559 case 0x1: /* eor */
396e467c 12560 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 12561 if (!s->condexec_mask)
396e467c 12562 gen_logic_CC(tmp);
99c475ab
FB
12563 break;
12564 case 0x2: /* lsl */
9ee6e8bb 12565 if (s->condexec_mask) {
365af80e 12566 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 12567 } else {
9ef39277 12568 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 12569 gen_logic_CC(tmp2);
9ee6e8bb 12570 }
99c475ab
FB
12571 break;
12572 case 0x3: /* lsr */
9ee6e8bb 12573 if (s->condexec_mask) {
365af80e 12574 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 12575 } else {
9ef39277 12576 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 12577 gen_logic_CC(tmp2);
9ee6e8bb 12578 }
99c475ab
FB
12579 break;
12580 case 0x4: /* asr */
9ee6e8bb 12581 if (s->condexec_mask) {
365af80e 12582 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 12583 } else {
9ef39277 12584 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 12585 gen_logic_CC(tmp2);
9ee6e8bb 12586 }
99c475ab
FB
12587 break;
12588 case 0x5: /* adc */
49b4c31e 12589 if (s->condexec_mask) {
396e467c 12590 gen_adc(tmp, tmp2);
49b4c31e
RH
12591 } else {
12592 gen_adc_CC(tmp, tmp, tmp2);
12593 }
99c475ab
FB
12594 break;
12595 case 0x6: /* sbc */
2de68a49 12596 if (s->condexec_mask) {
396e467c 12597 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
12598 } else {
12599 gen_sbc_CC(tmp, tmp, tmp2);
12600 }
99c475ab
FB
12601 break;
12602 case 0x7: /* ror */
9ee6e8bb 12603 if (s->condexec_mask) {
f669df27
AJ
12604 tcg_gen_andi_i32(tmp, tmp, 0x1f);
12605 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 12606 } else {
9ef39277 12607 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 12608 gen_logic_CC(tmp2);
9ee6e8bb 12609 }
99c475ab
FB
12610 break;
12611 case 0x8: /* tst */
396e467c
FN
12612 tcg_gen_and_i32(tmp, tmp, tmp2);
12613 gen_logic_CC(tmp);
99c475ab 12614 rd = 16;
5899f386 12615 break;
99c475ab 12616 case 0x9: /* neg */
9ee6e8bb 12617 if (s->condexec_mask)
396e467c 12618 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 12619 else
72485ec4 12620 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
12621 break;
12622 case 0xa: /* cmp */
72485ec4 12623 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
12624 rd = 16;
12625 break;
12626 case 0xb: /* cmn */
72485ec4 12627 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
12628 rd = 16;
12629 break;
12630 case 0xc: /* orr */
396e467c 12631 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 12632 if (!s->condexec_mask)
396e467c 12633 gen_logic_CC(tmp);
99c475ab
FB
12634 break;
12635 case 0xd: /* mul */
7b2919a0 12636 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 12637 if (!s->condexec_mask)
396e467c 12638 gen_logic_CC(tmp);
99c475ab
FB
12639 break;
12640 case 0xe: /* bic */
f669df27 12641 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 12642 if (!s->condexec_mask)
396e467c 12643 gen_logic_CC(tmp);
99c475ab
FB
12644 break;
12645 case 0xf: /* mvn */
396e467c 12646 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 12647 if (!s->condexec_mask)
396e467c 12648 gen_logic_CC(tmp2);
99c475ab 12649 val = 1;
5899f386 12650 rm = rd;
99c475ab
FB
12651 break;
12652 }
12653 if (rd != 16) {
396e467c
FN
12654 if (val) {
12655 store_reg(s, rm, tmp2);
12656 if (op != 0xf)
7d1b0095 12657 tcg_temp_free_i32(tmp);
396e467c
FN
12658 } else {
12659 store_reg(s, rd, tmp);
7d1b0095 12660 tcg_temp_free_i32(tmp2);
396e467c
FN
12661 }
12662 } else {
7d1b0095
PM
12663 tcg_temp_free_i32(tmp);
12664 tcg_temp_free_i32(tmp2);
99c475ab
FB
12665 }
12666 break;
12667
12668 case 5:
12669 /* load/store register offset. */
12670 rd = insn & 7;
12671 rn = (insn >> 3) & 7;
12672 rm = (insn >> 6) & 7;
12673 op = (insn >> 9) & 7;
b0109805 12674 addr = load_reg(s, rn);
b26eefb6 12675 tmp = load_reg(s, rm);
b0109805 12676 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 12677 tcg_temp_free_i32(tmp);
99c475ab 12678
c40c8556 12679 if (op < 3) { /* store */
b0109805 12680 tmp = load_reg(s, rd);
c40c8556
PM
12681 } else {
12682 tmp = tcg_temp_new_i32();
12683 }
99c475ab
FB
12684
12685 switch (op) {
12686 case 0: /* str */
9bb6558a 12687 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12688 break;
12689 case 1: /* strh */
9bb6558a 12690 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12691 break;
12692 case 2: /* strb */
9bb6558a 12693 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12694 break;
12695 case 3: /* ldrsb */
9bb6558a 12696 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12697 break;
12698 case 4: /* ldr */
9bb6558a 12699 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12700 break;
12701 case 5: /* ldrh */
9bb6558a 12702 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12703 break;
12704 case 6: /* ldrb */
9bb6558a 12705 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12706 break;
12707 case 7: /* ldrsh */
9bb6558a 12708 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12709 break;
12710 }
c40c8556 12711 if (op >= 3) { /* load */
b0109805 12712 store_reg(s, rd, tmp);
c40c8556
PM
12713 } else {
12714 tcg_temp_free_i32(tmp);
12715 }
7d1b0095 12716 tcg_temp_free_i32(addr);
99c475ab
FB
12717 break;
12718
12719 case 6:
12720 /* load/store word immediate offset */
12721 rd = insn & 7;
12722 rn = (insn >> 3) & 7;
b0109805 12723 addr = load_reg(s, rn);
99c475ab 12724 val = (insn >> 4) & 0x7c;
b0109805 12725 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12726
12727 if (insn & (1 << 11)) {
12728 /* load */
c40c8556 12729 tmp = tcg_temp_new_i32();
12dcc321 12730 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 12731 store_reg(s, rd, tmp);
99c475ab
FB
12732 } else {
12733 /* store */
b0109805 12734 tmp = load_reg(s, rd);
12dcc321 12735 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12736 tcg_temp_free_i32(tmp);
99c475ab 12737 }
7d1b0095 12738 tcg_temp_free_i32(addr);
99c475ab
FB
12739 break;
12740
12741 case 7:
12742 /* load/store byte immediate offset */
12743 rd = insn & 7;
12744 rn = (insn >> 3) & 7;
b0109805 12745 addr = load_reg(s, rn);
99c475ab 12746 val = (insn >> 6) & 0x1f;
b0109805 12747 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12748
12749 if (insn & (1 << 11)) {
12750 /* load */
c40c8556 12751 tmp = tcg_temp_new_i32();
9bb6558a 12752 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 12753 store_reg(s, rd, tmp);
99c475ab
FB
12754 } else {
12755 /* store */
b0109805 12756 tmp = load_reg(s, rd);
9bb6558a 12757 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12758 tcg_temp_free_i32(tmp);
99c475ab 12759 }
7d1b0095 12760 tcg_temp_free_i32(addr);
99c475ab
FB
12761 break;
12762
12763 case 8:
12764 /* load/store halfword immediate offset */
12765 rd = insn & 7;
12766 rn = (insn >> 3) & 7;
b0109805 12767 addr = load_reg(s, rn);
99c475ab 12768 val = (insn >> 5) & 0x3e;
b0109805 12769 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12770
12771 if (insn & (1 << 11)) {
12772 /* load */
c40c8556 12773 tmp = tcg_temp_new_i32();
9bb6558a 12774 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 12775 store_reg(s, rd, tmp);
99c475ab
FB
12776 } else {
12777 /* store */
b0109805 12778 tmp = load_reg(s, rd);
9bb6558a 12779 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12780 tcg_temp_free_i32(tmp);
99c475ab 12781 }
7d1b0095 12782 tcg_temp_free_i32(addr);
99c475ab
FB
12783 break;
12784
12785 case 9:
12786 /* load/store from stack */
12787 rd = (insn >> 8) & 7;
b0109805 12788 addr = load_reg(s, 13);
99c475ab 12789 val = (insn & 0xff) * 4;
b0109805 12790 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12791
12792 if (insn & (1 << 11)) {
12793 /* load */
c40c8556 12794 tmp = tcg_temp_new_i32();
9bb6558a 12795 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 12796 store_reg(s, rd, tmp);
99c475ab
FB
12797 } else {
12798 /* store */
b0109805 12799 tmp = load_reg(s, rd);
9bb6558a 12800 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12801 tcg_temp_free_i32(tmp);
99c475ab 12802 }
7d1b0095 12803 tcg_temp_free_i32(addr);
99c475ab
FB
12804 break;
12805
12806 case 10:
55203189
PM
12807 /*
12808 * 0b1010_xxxx_xxxx_xxxx
12809 * - Add PC/SP (immediate)
12810 */
99c475ab 12811 rd = (insn >> 8) & 7;
5899f386
FB
12812 if (insn & (1 << 11)) {
12813 /* SP */
5e3f878a 12814 tmp = load_reg(s, 13);
5899f386
FB
12815 } else {
12816 /* PC. bit 1 is ignored. */
7d1b0095 12817 tmp = tcg_temp_new_i32();
5e3f878a 12818 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 12819 }
99c475ab 12820 val = (insn & 0xff) * 4;
5e3f878a
PB
12821 tcg_gen_addi_i32(tmp, tmp, val);
12822 store_reg(s, rd, tmp);
99c475ab
FB
12823 break;
12824
12825 case 11:
12826 /* misc */
12827 op = (insn >> 8) & 0xf;
12828 switch (op) {
12829 case 0:
55203189
PM
12830 /*
12831 * 0b1011_0000_xxxx_xxxx
12832 * - ADD (SP plus immediate)
12833 * - SUB (SP minus immediate)
12834 */
b26eefb6 12835 tmp = load_reg(s, 13);
99c475ab
FB
12836 val = (insn & 0x7f) * 4;
12837 if (insn & (1 << 7))
6a0d8a1d 12838 val = -(int32_t)val;
b26eefb6 12839 tcg_gen_addi_i32(tmp, tmp, val);
55203189 12840 store_sp_checked(s, tmp);
99c475ab
FB
12841 break;
12842
9ee6e8bb
PB
12843 case 2: /* sign/zero extend. */
12844 ARCH(6);
12845 rd = insn & 7;
12846 rm = (insn >> 3) & 7;
b0109805 12847 tmp = load_reg(s, rm);
9ee6e8bb 12848 switch ((insn >> 6) & 3) {
b0109805
PB
12849 case 0: gen_sxth(tmp); break;
12850 case 1: gen_sxtb(tmp); break;
12851 case 2: gen_uxth(tmp); break;
12852 case 3: gen_uxtb(tmp); break;
9ee6e8bb 12853 }
b0109805 12854 store_reg(s, rd, tmp);
9ee6e8bb 12855 break;
99c475ab 12856 case 4: case 5: case 0xc: case 0xd:
aa369e5c
PM
12857 /*
12858 * 0b1011_x10x_xxxx_xxxx
12859 * - push/pop
12860 */
b0109805 12861 addr = load_reg(s, 13);
5899f386
FB
12862 if (insn & (1 << 8))
12863 offset = 4;
99c475ab 12864 else
5899f386
FB
12865 offset = 0;
12866 for (i = 0; i < 8; i++) {
12867 if (insn & (1 << i))
12868 offset += 4;
12869 }
12870 if ((insn & (1 << 11)) == 0) {
b0109805 12871 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 12872 }
aa369e5c
PM
12873
12874 if (s->v8m_stackcheck) {
12875 /*
12876 * Here 'addr' is the lower of "old SP" and "new SP";
12877 * if this is a pop that starts below the limit and ends
12878 * above it, it is UNKNOWN whether the limit check triggers;
12879 * we choose to trigger.
12880 */
12881 gen_helper_v8m_stackcheck(cpu_env, addr);
12882 }
12883
99c475ab
FB
12884 for (i = 0; i < 8; i++) {
12885 if (insn & (1 << i)) {
12886 if (insn & (1 << 11)) {
12887 /* pop */
c40c8556 12888 tmp = tcg_temp_new_i32();
12dcc321 12889 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 12890 store_reg(s, i, tmp);
99c475ab
FB
12891 } else {
12892 /* push */
b0109805 12893 tmp = load_reg(s, i);
12dcc321 12894 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12895 tcg_temp_free_i32(tmp);
99c475ab 12896 }
5899f386 12897 /* advance to the next address. */
b0109805 12898 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
12899 }
12900 }
f764718d 12901 tmp = NULL;
99c475ab
FB
12902 if (insn & (1 << 8)) {
12903 if (insn & (1 << 11)) {
12904 /* pop pc */
c40c8556 12905 tmp = tcg_temp_new_i32();
12dcc321 12906 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
12907 /* don't set the pc until the rest of the instruction
12908 has completed */
12909 } else {
12910 /* push lr */
b0109805 12911 tmp = load_reg(s, 14);
12dcc321 12912 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12913 tcg_temp_free_i32(tmp);
99c475ab 12914 }
b0109805 12915 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 12916 }
5899f386 12917 if ((insn & (1 << 11)) == 0) {
b0109805 12918 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 12919 }
99c475ab 12920 /* write back the new stack pointer */
b0109805 12921 store_reg(s, 13, addr);
99c475ab 12922 /* set the new PC value */
be5e7a76 12923 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 12924 store_reg_from_load(s, 15, tmp);
be5e7a76 12925 }
99c475ab
FB
12926 break;
12927
9ee6e8bb
PB
12928 case 1: case 3: case 9: case 11: /* czb */
12929 rm = insn & 7;
d9ba4830 12930 tmp = load_reg(s, rm);
c2d9644e 12931 arm_gen_condlabel(s);
9ee6e8bb 12932 if (insn & (1 << 11))
cb63669a 12933 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 12934 else
cb63669a 12935 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 12936 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
12937 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
12938 val = (uint32_t)s->pc + 2;
12939 val += offset;
12940 gen_jmp(s, val);
12941 break;
12942
12943 case 15: /* IT, nop-hint. */
12944 if ((insn & 0xf) == 0) {
12945 gen_nop_hint(s, (insn >> 4) & 0xf);
12946 break;
12947 }
12948 /* If Then. */
12949 s->condexec_cond = (insn >> 4) & 0xe;
12950 s->condexec_mask = insn & 0x1f;
12951 /* No actual code generated for this insn, just setup state. */
12952 break;
12953
06c949e6 12954 case 0xe: /* bkpt */
d4a2dc67
PM
12955 {
12956 int imm8 = extract32(insn, 0, 8);
be5e7a76 12957 ARCH(5);
c900a2e6 12958 gen_exception_bkpt_insn(s, 2, syn_aa32_bkpt(imm8, true));
06c949e6 12959 break;
d4a2dc67 12960 }
06c949e6 12961
19a6e31c
PM
12962 case 0xa: /* rev, and hlt */
12963 {
12964 int op1 = extract32(insn, 6, 2);
12965
12966 if (op1 == 2) {
12967 /* HLT */
12968 int imm6 = extract32(insn, 0, 6);
12969
12970 gen_hlt(s, imm6);
12971 break;
12972 }
12973
12974 /* Otherwise this is rev */
9ee6e8bb
PB
12975 ARCH(6);
12976 rn = (insn >> 3) & 0x7;
12977 rd = insn & 0x7;
b0109805 12978 tmp = load_reg(s, rn);
19a6e31c 12979 switch (op1) {
66896cb8 12980 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
12981 case 1: gen_rev16(tmp); break;
12982 case 3: gen_revsh(tmp); break;
19a6e31c
PM
12983 default:
12984 g_assert_not_reached();
9ee6e8bb 12985 }
b0109805 12986 store_reg(s, rd, tmp);
9ee6e8bb 12987 break;
19a6e31c 12988 }
9ee6e8bb 12989
d9e028c1
PM
12990 case 6:
12991 switch ((insn >> 5) & 7) {
12992 case 2:
12993 /* setend */
12994 ARCH(6);
9886ecdf
PB
12995 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
12996 gen_helper_setend(cpu_env);
dcba3a8d 12997 s->base.is_jmp = DISAS_UPDATE;
d9e028c1 12998 }
9ee6e8bb 12999 break;
d9e028c1
PM
13000 case 3:
13001 /* cps */
13002 ARCH(6);
13003 if (IS_USER(s)) {
13004 break;
8984bd2e 13005 }
b53d8923 13006 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
13007 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
13008 /* FAULTMASK */
13009 if (insn & 1) {
13010 addr = tcg_const_i32(19);
13011 gen_helper_v7m_msr(cpu_env, addr, tmp);
13012 tcg_temp_free_i32(addr);
13013 }
13014 /* PRIMASK */
13015 if (insn & 2) {
13016 addr = tcg_const_i32(16);
13017 gen_helper_v7m_msr(cpu_env, addr, tmp);
13018 tcg_temp_free_i32(addr);
13019 }
13020 tcg_temp_free_i32(tmp);
13021 gen_lookup_tb(s);
13022 } else {
13023 if (insn & (1 << 4)) {
13024 shift = CPSR_A | CPSR_I | CPSR_F;
13025 } else {
13026 shift = 0;
13027 }
13028 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 13029 }
d9e028c1
PM
13030 break;
13031 default:
13032 goto undef;
9ee6e8bb
PB
13033 }
13034 break;
13035
99c475ab
FB
13036 default:
13037 goto undef;
13038 }
13039 break;
13040
13041 case 12:
a7d3970d 13042 {
99c475ab 13043 /* load/store multiple */
f764718d 13044 TCGv_i32 loaded_var = NULL;
99c475ab 13045 rn = (insn >> 8) & 0x7;
b0109805 13046 addr = load_reg(s, rn);
99c475ab
FB
13047 for (i = 0; i < 8; i++) {
13048 if (insn & (1 << i)) {
99c475ab
FB
13049 if (insn & (1 << 11)) {
13050 /* load */
c40c8556 13051 tmp = tcg_temp_new_i32();
12dcc321 13052 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
13053 if (i == rn) {
13054 loaded_var = tmp;
13055 } else {
13056 store_reg(s, i, tmp);
13057 }
99c475ab
FB
13058 } else {
13059 /* store */
b0109805 13060 tmp = load_reg(s, i);
12dcc321 13061 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 13062 tcg_temp_free_i32(tmp);
99c475ab 13063 }
5899f386 13064 /* advance to the next address */
b0109805 13065 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
13066 }
13067 }
b0109805 13068 if ((insn & (1 << rn)) == 0) {
a7d3970d 13069 /* base reg not in list: base register writeback */
b0109805
PB
13070 store_reg(s, rn, addr);
13071 } else {
a7d3970d
PM
13072 /* base reg in list: if load, complete it now */
13073 if (insn & (1 << 11)) {
13074 store_reg(s, rn, loaded_var);
13075 }
7d1b0095 13076 tcg_temp_free_i32(addr);
b0109805 13077 }
99c475ab 13078 break;
a7d3970d 13079 }
99c475ab
FB
13080 case 13:
13081 /* conditional branch or swi */
13082 cond = (insn >> 8) & 0xf;
13083 if (cond == 0xe)
13084 goto undef;
13085
13086 if (cond == 0xf) {
13087 /* swi */
eaed129d 13088 gen_set_pc_im(s, s->pc);
d4a2dc67 13089 s->svc_imm = extract32(insn, 0, 8);
dcba3a8d 13090 s->base.is_jmp = DISAS_SWI;
99c475ab
FB
13091 break;
13092 }
13093 /* generate a conditional jump to next instruction */
c2d9644e 13094 arm_skip_unless(s, cond);
99c475ab
FB
13095
13096 /* jump to the offset */
5899f386 13097 val = (uint32_t)s->pc + 2;
99c475ab 13098 offset = ((int32_t)insn << 24) >> 24;
5899f386 13099 val += offset << 1;
8aaca4c0 13100 gen_jmp(s, val);
99c475ab
FB
13101 break;
13102
13103 case 14:
358bf29e 13104 if (insn & (1 << 11)) {
296e5a0a
PM
13105 /* thumb_insn_is_16bit() ensures we can't get here for
13106 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
13107 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
13108 */
13109 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
13110 ARCH(5);
13111 offset = ((insn & 0x7ff) << 1);
13112 tmp = load_reg(s, 14);
13113 tcg_gen_addi_i32(tmp, tmp, offset);
13114 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
13115
13116 tmp2 = tcg_temp_new_i32();
13117 tcg_gen_movi_i32(tmp2, s->pc | 1);
13118 store_reg(s, 14, tmp2);
13119 gen_bx(s, tmp);
358bf29e
PB
13120 break;
13121 }
9ee6e8bb 13122 /* unconditional branch */
99c475ab
FB
13123 val = (uint32_t)s->pc;
13124 offset = ((int32_t)insn << 21) >> 21;
13125 val += (offset << 1) + 2;
8aaca4c0 13126 gen_jmp(s, val);
99c475ab
FB
13127 break;
13128
13129 case 15:
296e5a0a
PM
13130 /* thumb_insn_is_16bit() ensures we can't get here for
13131 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
13132 */
13133 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
13134
13135 if (insn & (1 << 11)) {
13136 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
13137 offset = ((insn & 0x7ff) << 1) | 1;
13138 tmp = load_reg(s, 14);
13139 tcg_gen_addi_i32(tmp, tmp, offset);
13140
13141 tmp2 = tcg_temp_new_i32();
13142 tcg_gen_movi_i32(tmp2, s->pc | 1);
13143 store_reg(s, 14, tmp2);
13144 gen_bx(s, tmp);
13145 } else {
13146 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
13147 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
13148
13149 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
13150 }
9ee6e8bb 13151 break;
99c475ab
FB
13152 }
13153 return;
9ee6e8bb 13154illegal_op:
99c475ab 13155undef:
73710361
GB
13156 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
13157 default_exception_el(s));
99c475ab
FB
13158}
13159
541ebcd4
PM
13160static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
13161{
13162 /* Return true if the insn at dc->pc might cross a page boundary.
13163 * (False positives are OK, false negatives are not.)
5b8d7289
PM
13164 * We know this is a Thumb insn, and our caller ensures we are
13165 * only called if dc->pc is less than 4 bytes from the page
13166 * boundary, so we cross the page if the first 16 bits indicate
13167 * that this is a 32 bit insn.
541ebcd4 13168 */
5b8d7289 13169 uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4 13170
5b8d7289 13171 return !thumb_insn_is_16bit(s, insn);
541ebcd4
PM
13172}
13173
b542683d 13174static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2c0262af 13175{
1d8a5535 13176 DisasContext *dc = container_of(dcbase, DisasContext, base);
9c489ea6 13177 CPUARMState *env = cs->env_ptr;
4e5e1215 13178 ARMCPU *cpu = arm_env_get_cpu(env);
aad821ac
RH
13179 uint32_t tb_flags = dc->base.tb->flags;
13180 uint32_t condexec, core_mmu_idx;
3b46e624 13181
962fcbf2 13182 dc->isar = &cpu->isar;
dcba3a8d 13183 dc->pc = dc->base.pc_first;
e50e6a20 13184 dc->condjmp = 0;
3926cc84 13185
40f860cd 13186 dc->aarch64 = 0;
cef9ee70
SS
13187 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
13188 * there is no secure EL1, so we route exceptions to EL3.
13189 */
13190 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
13191 !arm_el_is_aa64(env, 3);
aad821ac
RH
13192 dc->thumb = FIELD_EX32(tb_flags, TBFLAG_A32, THUMB);
13193 dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
13194 dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
13195 condexec = FIELD_EX32(tb_flags, TBFLAG_A32, CONDEXEC);
13196 dc->condexec_mask = (condexec & 0xf) << 1;
13197 dc->condexec_cond = condexec >> 4;
13198 core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
13199 dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
c1e37810 13200 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 13201#if !defined(CONFIG_USER_ONLY)
c1e37810 13202 dc->user = (dc->current_el == 0);
3926cc84 13203#endif
aad821ac
RH
13204 dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
13205 dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
13206 dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
13207 dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
13208 dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
13209 dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
13210 dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_A32, HANDLER);
fb602cb7
PM
13211 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
13212 regime_is_secure(env, dc->mmu_idx);
aad821ac 13213 dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_A32, STACKCHECK);
60322b39 13214 dc->cp_regs = cpu->cp_regs;
a984e42c 13215 dc->features = env->features;
40f860cd 13216
50225ad0
PM
13217 /* Single step state. The code-generation logic here is:
13218 * SS_ACTIVE == 0:
13219 * generate code with no special handling for single-stepping (except
13220 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
13221 * this happens anyway because those changes are all system register or
13222 * PSTATE writes).
13223 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
13224 * emit code for one insn
13225 * emit code to clear PSTATE.SS
13226 * emit code to generate software step exception for completed step
13227 * end TB (as usual for having generated an exception)
13228 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
13229 * emit code to generate a software step exception
13230 * end the TB
13231 */
aad821ac
RH
13232 dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
13233 dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
50225ad0
PM
13234 dc->is_ldex = false;
13235 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
13236
bfe7ad5b 13237 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
1d8a5535 13238
f7708456
RH
13239 /* If architectural single step active, limit to 1. */
13240 if (is_singlestepping(dc)) {
b542683d 13241 dc->base.max_insns = 1;
f7708456
RH
13242 }
13243
d0264d86
RH
13244 /* ARM is a fixed-length ISA. Bound the number of insns to execute
13245 to those left on the page. */
13246 if (!dc->thumb) {
bfe7ad5b 13247 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
b542683d 13248 dc->base.max_insns = MIN(dc->base.max_insns, bound);
d0264d86
RH
13249 }
13250
a7812ae4
PB
13251 cpu_F0s = tcg_temp_new_i32();
13252 cpu_F1s = tcg_temp_new_i32();
13253 cpu_F0d = tcg_temp_new_i64();
13254 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
13255 cpu_V0 = cpu_F0d;
13256 cpu_V1 = cpu_F1d;
e677137d 13257 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 13258 cpu_M0 = tcg_temp_new_i64();
1d8a5535
LV
13259}
13260
b1476854
LV
13261static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
13262{
13263 DisasContext *dc = container_of(dcbase, DisasContext, base);
13264
13265 /* A note on handling of the condexec (IT) bits:
13266 *
13267 * We want to avoid the overhead of having to write the updated condexec
13268 * bits back to the CPUARMState for every instruction in an IT block. So:
13269 * (1) if the condexec bits are not already zero then we write
13270 * zero back into the CPUARMState now. This avoids complications trying
13271 * to do it at the end of the block. (For example if we don't do this
13272 * it's hard to identify whether we can safely skip writing condexec
13273 * at the end of the TB, which we definitely want to do for the case
13274 * where a TB doesn't do anything with the IT state at all.)
13275 * (2) if we are going to leave the TB then we call gen_set_condexec()
13276 * which will write the correct value into CPUARMState if zero is wrong.
13277 * This is done both for leaving the TB at the end, and for leaving
13278 * it because of an exception we know will happen, which is done in
13279 * gen_exception_insn(). The latter is necessary because we need to
13280 * leave the TB with the PC/IT state just prior to execution of the
13281 * instruction which caused the exception.
13282 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
13283 * then the CPUARMState will be wrong and we need to reset it.
13284 * This is handled in the same way as restoration of the
13285 * PC in these situations; we save the value of the condexec bits
13286 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
13287 * then uses this to restore them after an exception.
13288 *
13289 * Note that there are no instructions which can read the condexec
13290 * bits, and none which can write non-static values to them, so
13291 * we don't need to care about whether CPUARMState is correct in the
13292 * middle of a TB.
13293 */
13294
13295 /* Reset the conditional execution bits immediately. This avoids
13296 complications trying to do it at the end of the block. */
13297 if (dc->condexec_mask || dc->condexec_cond) {
13298 TCGv_i32 tmp = tcg_temp_new_i32();
13299 tcg_gen_movi_i32(tmp, 0);
13300 store_cpu_field(tmp, condexec_bits);
13301 }
13302}
13303
f62bd897
LV
13304static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
13305{
13306 DisasContext *dc = container_of(dcbase, DisasContext, base);
13307
f62bd897
LV
13308 tcg_gen_insn_start(dc->pc,
13309 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
13310 0);
15fa08f8 13311 dc->insn_start = tcg_last_op();
f62bd897
LV
13312}
13313
a68956ad
LV
13314static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
13315 const CPUBreakpoint *bp)
13316{
13317 DisasContext *dc = container_of(dcbase, DisasContext, base);
13318
13319 if (bp->flags & BP_CPU) {
13320 gen_set_condexec(dc);
13321 gen_set_pc_im(dc, dc->pc);
13322 gen_helper_check_breakpoints(cpu_env);
13323 /* End the TB early; it's likely not going to be executed */
13324 dc->base.is_jmp = DISAS_TOO_MANY;
13325 } else {
13326 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
13327 /* The address covered by the breakpoint must be
13328 included in [tb->pc, tb->pc + tb->size) in order
13329 to for it to be properly cleared -- thus we
13330 increment the PC here so that the logic setting
13331 tb->size below does the right thing. */
13332 /* TODO: Advance PC by correct instruction length to
13333 * avoid disassembler error messages */
13334 dc->pc += 2;
13335 dc->base.is_jmp = DISAS_NORETURN;
13336 }
13337
13338 return true;
13339}
13340
722ef0a5 13341static bool arm_pre_translate_insn(DisasContext *dc)
13189a90 13342{
13189a90
LV
13343#ifdef CONFIG_USER_ONLY
13344 /* Intercept jump to the magic kernel page. */
13345 if (dc->pc >= 0xffff0000) {
13346 /* We always get here via a jump, so know we are not in a
13347 conditional execution block. */
13348 gen_exception_internal(EXCP_KERNEL_TRAP);
13349 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 13350 return true;
13189a90
LV
13351 }
13352#endif
13353
13354 if (dc->ss_active && !dc->pstate_ss) {
13355 /* Singlestep state is Active-pending.
13356 * If we're in this state at the start of a TB then either
13357 * a) we just took an exception to an EL which is being debugged
13358 * and this is the first insn in the exception handler
13359 * b) debug exceptions were masked and we just unmasked them
13360 * without changing EL (eg by clearing PSTATE.D)
13361 * In either case we're going to take a swstep exception in the
13362 * "did not step an insn" case, and so the syndrome ISV and EX
13363 * bits should be zero.
13364 */
13365 assert(dc->base.num_insns == 1);
13366 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
13367 default_exception_el(dc));
13368 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 13369 return true;
13189a90
LV
13370 }
13371
722ef0a5
RH
13372 return false;
13373}
13189a90 13374
d0264d86 13375static void arm_post_translate_insn(DisasContext *dc)
722ef0a5 13376{
13189a90
LV
13377 if (dc->condjmp && !dc->base.is_jmp) {
13378 gen_set_label(dc->condlabel);
13379 dc->condjmp = 0;
13380 }
13189a90 13381 dc->base.pc_next = dc->pc;
23169224 13382 translator_loop_temp_check(&dc->base);
13189a90
LV
13383}
13384
722ef0a5
RH
13385static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
13386{
13387 DisasContext *dc = container_of(dcbase, DisasContext, base);
13388 CPUARMState *env = cpu->env_ptr;
13389 unsigned int insn;
13390
13391 if (arm_pre_translate_insn(dc)) {
13392 return;
13393 }
13394
13395 insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
58803318 13396 dc->insn = insn;
722ef0a5
RH
13397 dc->pc += 4;
13398 disas_arm_insn(dc, insn);
13399
d0264d86
RH
13400 arm_post_translate_insn(dc);
13401
13402 /* ARM is a fixed-length ISA. We performed the cross-page check
13403 in init_disas_context by adjusting max_insns. */
722ef0a5
RH
13404}
13405
dcf14dfb
PM
13406static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
13407{
13408 /* Return true if this Thumb insn is always unconditional,
13409 * even inside an IT block. This is true of only a very few
13410 * instructions: BKPT, HLT, and SG.
13411 *
13412 * A larger class of instructions are UNPREDICTABLE if used
13413 * inside an IT block; we do not need to detect those here, because
13414 * what we do by default (perform the cc check and update the IT
13415 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
13416 * choice for those situations.
13417 *
13418 * insn is either a 16-bit or a 32-bit instruction; the two are
13419 * distinguishable because for the 16-bit case the top 16 bits
13420 * are zeroes, and that isn't a valid 32-bit encoding.
13421 */
13422 if ((insn & 0xffffff00) == 0xbe00) {
13423 /* BKPT */
13424 return true;
13425 }
13426
13427 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
13428 !arm_dc_feature(s, ARM_FEATURE_M)) {
13429 /* HLT: v8A only. This is unconditional even when it is going to
13430 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
13431 * For v7 cores this was a plain old undefined encoding and so
13432 * honours its cc check. (We might be using the encoding as
13433 * a semihosting trap, but we don't change the cc check behaviour
13434 * on that account, because a debugger connected to a real v7A
13435 * core and emulating semihosting traps by catching the UNDEF
13436 * exception would also only see cases where the cc check passed.
13437 * No guest code should be trying to do a HLT semihosting trap
13438 * in an IT block anyway.
13439 */
13440 return true;
13441 }
13442
13443 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
13444 arm_dc_feature(s, ARM_FEATURE_M)) {
13445 /* SG: v8M only */
13446 return true;
13447 }
13448
13449 return false;
13450}
13451
722ef0a5
RH
13452static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
13453{
13454 DisasContext *dc = container_of(dcbase, DisasContext, base);
13455 CPUARMState *env = cpu->env_ptr;
296e5a0a
PM
13456 uint32_t insn;
13457 bool is_16bit;
722ef0a5
RH
13458
13459 if (arm_pre_translate_insn(dc)) {
13460 return;
13461 }
13462
296e5a0a
PM
13463 insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
13464 is_16bit = thumb_insn_is_16bit(dc, insn);
13465 dc->pc += 2;
13466 if (!is_16bit) {
13467 uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
13468
13469 insn = insn << 16 | insn2;
13470 dc->pc += 2;
13471 }
58803318 13472 dc->insn = insn;
296e5a0a 13473
dcf14dfb 13474 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
296e5a0a
PM
13475 uint32_t cond = dc->condexec_cond;
13476
13477 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
c2d9644e 13478 arm_skip_unless(dc, cond);
296e5a0a
PM
13479 }
13480 }
13481
13482 if (is_16bit) {
13483 disas_thumb_insn(dc, insn);
13484 } else {
2eea841c 13485 disas_thumb2_insn(dc, insn);
296e5a0a 13486 }
722ef0a5
RH
13487
13488 /* Advance the Thumb condexec condition. */
13489 if (dc->condexec_mask) {
13490 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
13491 ((dc->condexec_mask >> 4) & 1));
13492 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
13493 if (dc->condexec_mask == 0) {
13494 dc->condexec_cond = 0;
13495 }
13496 }
13497
d0264d86
RH
13498 arm_post_translate_insn(dc);
13499
13500 /* Thumb is a variable-length ISA. Stop translation when the next insn
13501 * will touch a new page. This ensures that prefetch aborts occur at
13502 * the right place.
13503 *
13504 * We want to stop the TB if the next insn starts in a new page,
13505 * or if it spans between this page and the next. This means that
13506 * if we're looking at the last halfword in the page we need to
13507 * see if it's a 16-bit Thumb insn (which will fit in this TB)
13508 * or a 32-bit Thumb insn (which won't).
13509 * This is to avoid generating a silly TB with a single 16-bit insn
13510 * in it at the end of this page (which would execute correctly
13511 * but isn't very efficient).
13512 */
13513 if (dc->base.is_jmp == DISAS_NEXT
bfe7ad5b
EC
13514 && (dc->pc - dc->page_start >= TARGET_PAGE_SIZE
13515 || (dc->pc - dc->page_start >= TARGET_PAGE_SIZE - 3
d0264d86
RH
13516 && insn_crosses_page(env, dc)))) {
13517 dc->base.is_jmp = DISAS_TOO_MANY;
13518 }
722ef0a5
RH
13519}
13520
70d3c035 13521static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1d8a5535 13522{
70d3c035 13523 DisasContext *dc = container_of(dcbase, DisasContext, base);
2e70f6ef 13524
c5a49c63 13525 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
70d3c035
LV
13526 /* FIXME: This can theoretically happen with self-modifying code. */
13527 cpu_abort(cpu, "IO on conditional branch instruction");
2e70f6ef 13528 }
9ee6e8bb 13529
b5ff1b31 13530 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
13531 instruction was a conditional branch or trap, and the PC has
13532 already been written. */
f021b2c4 13533 gen_set_condexec(dc);
dcba3a8d 13534 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
3bb8a96f
PM
13535 /* Exception return branches need some special case code at the
13536 * end of the TB, which is complex enough that it has to
13537 * handle the single-step vs not and the condition-failed
13538 * insn codepath itself.
13539 */
13540 gen_bx_excret_final_code(dc);
13541 } else if (unlikely(is_singlestepping(dc))) {
7999a5c8 13542 /* Unconditional and "condition passed" instruction codepath. */
dcba3a8d 13543 switch (dc->base.is_jmp) {
7999a5c8 13544 case DISAS_SWI:
50225ad0 13545 gen_ss_advance(dc);
73710361
GB
13546 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
13547 default_exception_el(dc));
7999a5c8
SF
13548 break;
13549 case DISAS_HVC:
37e6456e 13550 gen_ss_advance(dc);
73710361 13551 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
13552 break;
13553 case DISAS_SMC:
37e6456e 13554 gen_ss_advance(dc);
73710361 13555 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
13556 break;
13557 case DISAS_NEXT:
a68956ad 13558 case DISAS_TOO_MANY:
7999a5c8
SF
13559 case DISAS_UPDATE:
13560 gen_set_pc_im(dc, dc->pc);
13561 /* fall through */
13562 default:
5425415e
PM
13563 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
13564 gen_singlestep_exception(dc);
a0c231e6
RH
13565 break;
13566 case DISAS_NORETURN:
13567 break;
7999a5c8 13568 }
8aaca4c0 13569 } else {
9ee6e8bb
PB
13570 /* While branches must always occur at the end of an IT block,
13571 there are a few other things that can cause us to terminate
65626741 13572 the TB in the middle of an IT block:
9ee6e8bb
PB
13573 - Exception generating instructions (bkpt, swi, undefined).
13574 - Page boundaries.
13575 - Hardware watchpoints.
13576 Hardware breakpoints have already been handled and skip this code.
13577 */
dcba3a8d 13578 switch(dc->base.is_jmp) {
8aaca4c0 13579 case DISAS_NEXT:
a68956ad 13580 case DISAS_TOO_MANY:
6e256c93 13581 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 13582 break;
577bf808 13583 case DISAS_JUMP:
8a6b28c7
EC
13584 gen_goto_ptr();
13585 break;
e8d52302
AB
13586 case DISAS_UPDATE:
13587 gen_set_pc_im(dc, dc->pc);
13588 /* fall through */
577bf808 13589 default:
8aaca4c0 13590 /* indicate that the hash table must be used to find the next TB */
07ea28b4 13591 tcg_gen_exit_tb(NULL, 0);
8aaca4c0 13592 break;
a0c231e6 13593 case DISAS_NORETURN:
8aaca4c0
FB
13594 /* nothing more to generate */
13595 break;
9ee6e8bb 13596 case DISAS_WFI:
58803318
SS
13597 {
13598 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
13599 !(dc->insn & (1U << 31))) ? 2 : 4);
13600
13601 gen_helper_wfi(cpu_env, tmp);
13602 tcg_temp_free_i32(tmp);
84549b6d
PM
13603 /* The helper doesn't necessarily throw an exception, but we
13604 * must go back to the main loop to check for interrupts anyway.
13605 */
07ea28b4 13606 tcg_gen_exit_tb(NULL, 0);
9ee6e8bb 13607 break;
58803318 13608 }
72c1d3af
PM
13609 case DISAS_WFE:
13610 gen_helper_wfe(cpu_env);
13611 break;
c87e5a61
PM
13612 case DISAS_YIELD:
13613 gen_helper_yield(cpu_env);
13614 break;
9ee6e8bb 13615 case DISAS_SWI:
73710361
GB
13616 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
13617 default_exception_el(dc));
9ee6e8bb 13618 break;
37e6456e 13619 case DISAS_HVC:
73710361 13620 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
13621 break;
13622 case DISAS_SMC:
73710361 13623 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 13624 break;
8aaca4c0 13625 }
f021b2c4
PM
13626 }
13627
13628 if (dc->condjmp) {
13629 /* "Condition failed" instruction codepath for the branch/trap insn */
13630 gen_set_label(dc->condlabel);
13631 gen_set_condexec(dc);
b636649f 13632 if (unlikely(is_singlestepping(dc))) {
f021b2c4
PM
13633 gen_set_pc_im(dc, dc->pc);
13634 gen_singlestep_exception(dc);
13635 } else {
6e256c93 13636 gen_goto_tb(dc, 1, dc->pc);
e50e6a20 13637 }
2c0262af 13638 }
23169224
LV
13639
13640 /* Functions above can change dc->pc, so re-align db->pc_next */
13641 dc->base.pc_next = dc->pc;
70d3c035
LV
13642}
13643
4013f7fc
LV
13644static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
13645{
13646 DisasContext *dc = container_of(dcbase, DisasContext, base);
13647
13648 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
1d48474d 13649 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
4013f7fc
LV
13650}
13651
23169224
LV
13652static const TranslatorOps arm_translator_ops = {
13653 .init_disas_context = arm_tr_init_disas_context,
13654 .tb_start = arm_tr_tb_start,
13655 .insn_start = arm_tr_insn_start,
13656 .breakpoint_check = arm_tr_breakpoint_check,
13657 .translate_insn = arm_tr_translate_insn,
13658 .tb_stop = arm_tr_tb_stop,
13659 .disas_log = arm_tr_disas_log,
13660};
13661
722ef0a5
RH
13662static const TranslatorOps thumb_translator_ops = {
13663 .init_disas_context = arm_tr_init_disas_context,
13664 .tb_start = arm_tr_tb_start,
13665 .insn_start = arm_tr_insn_start,
13666 .breakpoint_check = arm_tr_breakpoint_check,
13667 .translate_insn = thumb_tr_translate_insn,
13668 .tb_stop = arm_tr_tb_stop,
13669 .disas_log = arm_tr_disas_log,
13670};
13671
70d3c035 13672/* generate intermediate code for basic block 'tb'. */
23169224 13673void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
70d3c035 13674{
23169224
LV
13675 DisasContext dc;
13676 const TranslatorOps *ops = &arm_translator_ops;
70d3c035 13677
aad821ac 13678 if (FIELD_EX32(tb->flags, TBFLAG_A32, THUMB)) {
722ef0a5
RH
13679 ops = &thumb_translator_ops;
13680 }
23169224 13681#ifdef TARGET_AARCH64
aad821ac 13682 if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) {
23169224 13683 ops = &aarch64_translator_ops;
2c0262af
FB
13684 }
13685#endif
23169224
LV
13686
13687 translator_loop(ops, &dc.base, cpu, tb);
2c0262af
FB
13688}
13689
878096ee
AF
13690void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
13691 int flags)
2c0262af 13692{
878096ee
AF
13693 ARMCPU *cpu = ARM_CPU(cs);
13694 CPUARMState *env = &cpu->env;
2c0262af
FB
13695 int i;
13696
17731115
PM
13697 if (is_a64(env)) {
13698 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
13699 return;
13700 }
13701
2c0262af 13702 for(i=0;i<16;i++) {
7fe48483 13703 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 13704 if ((i % 4) == 3)
7fe48483 13705 cpu_fprintf(f, "\n");
2c0262af 13706 else
7fe48483 13707 cpu_fprintf(f, " ");
2c0262af 13708 }
06e5cf7a 13709
5b906f35
PM
13710 if (arm_feature(env, ARM_FEATURE_M)) {
13711 uint32_t xpsr = xpsr_read(env);
13712 const char *mode;
1e577cc7
PM
13713 const char *ns_status = "";
13714
13715 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
13716 ns_status = env->v7m.secure ? "S " : "NS ";
13717 }
5b906f35
PM
13718
13719 if (xpsr & XPSR_EXCP) {
13720 mode = "handler";
13721 } else {
8bfc26ea 13722 if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
5b906f35
PM
13723 mode = "unpriv-thread";
13724 } else {
13725 mode = "priv-thread";
13726 }
13727 }
13728
1e577cc7 13729 cpu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
5b906f35
PM
13730 xpsr,
13731 xpsr & XPSR_N ? 'N' : '-',
13732 xpsr & XPSR_Z ? 'Z' : '-',
13733 xpsr & XPSR_C ? 'C' : '-',
13734 xpsr & XPSR_V ? 'V' : '-',
13735 xpsr & XPSR_T ? 'T' : 'A',
1e577cc7 13736 ns_status,
5b906f35 13737 mode);
06e5cf7a 13738 } else {
5b906f35
PM
13739 uint32_t psr = cpsr_read(env);
13740 const char *ns_status = "";
13741
13742 if (arm_feature(env, ARM_FEATURE_EL3) &&
13743 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
13744 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
13745 }
13746
13747 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
13748 psr,
13749 psr & CPSR_N ? 'N' : '-',
13750 psr & CPSR_Z ? 'Z' : '-',
13751 psr & CPSR_C ? 'C' : '-',
13752 psr & CPSR_V ? 'V' : '-',
13753 psr & CPSR_T ? 'T' : 'A',
13754 ns_status,
81e37284 13755 aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26);
5b906f35 13756 }
b7bcbe95 13757
f2617cfc
PM
13758 if (flags & CPU_DUMP_FPU) {
13759 int numvfpregs = 0;
13760 if (arm_feature(env, ARM_FEATURE_VFP)) {
13761 numvfpregs += 16;
13762 }
13763 if (arm_feature(env, ARM_FEATURE_VFP3)) {
13764 numvfpregs += 16;
13765 }
13766 for (i = 0; i < numvfpregs; i++) {
9a2b5256 13767 uint64_t v = *aa32_vfp_dreg(env, i);
f2617cfc
PM
13768 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
13769 i * 2, (uint32_t)v,
13770 i * 2 + 1, (uint32_t)(v >> 32),
13771 i, v);
13772 }
ec527e4e 13773 cpu_fprintf(f, "FPSCR: %08x\n", vfp_get_fpscr(env));
b7bcbe95 13774 }
2c0262af 13775}
a6b025d3 13776
bad729e2
RH
13777void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
13778 target_ulong *data)
d2856f1a 13779{
3926cc84 13780 if (is_a64(env)) {
bad729e2 13781 env->pc = data[0];
40f860cd 13782 env->condexec_bits = 0;
aaa1f954 13783 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 13784 } else {
bad729e2
RH
13785 env->regs[15] = data[0];
13786 env->condexec_bits = data[1];
aaa1f954 13787 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 13788 }
d2856f1a 13789}
This page took 4.067254 seconds and 4 git commands to generate.