]> Git Repo - qemu.git/blame - target/arm/translate.c
target/arm: Fix int128_make128 lo, hi order in paired_cmpxchg64_be
[qemu.git] / target / arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
36a71934 28#include "tcg-op-gvec.h"
1de7afc9 29#include "qemu/log.h"
534df156 30#include "qemu/bitops.h"
1d854765 31#include "arm_ldst.h"
19a6e31c 32#include "exec/semihost.h"
1497c961 33
2ef6175a
RH
34#include "exec/helper-proto.h"
35#include "exec/helper-gen.h"
2c0262af 36
a7e30d84 37#include "trace-tcg.h"
508127e2 38#include "exec/log.h"
a7e30d84
LV
39
40
2b51668f
PM
41#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 43/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 44#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
09cbd501 45#define ENABLE_ARCH_5J dc_isar_feature(jazelle, s)
2b51668f
PM
46#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 51
86753403 52#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 53
f570c61e 54#include "translate.h"
e12ce78d 55
b5ff1b31
FB
56#if defined(CONFIG_USER_ONLY)
57#define IS_USER(s) 1
58#else
59#define IS_USER(s) (s->user)
60#endif
61
ad69471c 62/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 63static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 64static TCGv_i32 cpu_R[16];
78bcaa3e
RH
65TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66TCGv_i64 cpu_exclusive_addr;
67TCGv_i64 cpu_exclusive_val;
ad69471c 68
b26eefb6 69/* FIXME: These should be removed. */
39d5492a 70static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 71static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 72
022c62cb 73#include "exec/gen-icount.h"
2e70f6ef 74
308e5636 75static const char * const regnames[] =
155c3eac
FN
76 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
78
61adacc8
RH
79/* Function prototypes for gen_ functions calling Neon helpers. */
80typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
81 TCGv_i32, TCGv_i32);
82
b26eefb6
PB
83/* initialize TCG globals. */
84void arm_translate_init(void)
85{
155c3eac
FN
86 int i;
87
155c3eac 88 for (i = 0; i < 16; i++) {
e1ccc054 89 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 90 offsetof(CPUARMState, regs[i]),
155c3eac
FN
91 regnames[i]);
92 }
e1ccc054
RH
93 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
94 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
95 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
96 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 97
e1ccc054 98 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 99 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 100 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 101 offsetof(CPUARMState, exclusive_val), "exclusive_val");
155c3eac 102
14ade10f 103 a64_translate_init();
b26eefb6
PB
104}
105
9bb6558a
PM
106/* Flags for the disas_set_da_iss info argument:
107 * lower bits hold the Rt register number, higher bits are flags.
108 */
109typedef enum ISSInfo {
110 ISSNone = 0,
111 ISSRegMask = 0x1f,
112 ISSInvalid = (1 << 5),
113 ISSIsAcqRel = (1 << 6),
114 ISSIsWrite = (1 << 7),
115 ISSIs16Bit = (1 << 8),
116} ISSInfo;
117
118/* Save the syndrome information for a Data Abort */
119static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
120{
121 uint32_t syn;
122 int sas = memop & MO_SIZE;
123 bool sse = memop & MO_SIGN;
124 bool is_acqrel = issinfo & ISSIsAcqRel;
125 bool is_write = issinfo & ISSIsWrite;
126 bool is_16bit = issinfo & ISSIs16Bit;
127 int srt = issinfo & ISSRegMask;
128
129 if (issinfo & ISSInvalid) {
130 /* Some callsites want to conditionally provide ISS info,
131 * eg "only if this was not a writeback"
132 */
133 return;
134 }
135
136 if (srt == 15) {
137 /* For AArch32, insns where the src/dest is R15 never generate
138 * ISS information. Catching that here saves checking at all
139 * the call sites.
140 */
141 return;
142 }
143
144 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
145 0, 0, 0, is_write, 0, is_16bit);
146 disas_set_insn_syndrome(s, syn);
147}
148
8bd5c820 149static inline int get_a32_user_mem_index(DisasContext *s)
579d21cc 150{
8bd5c820 151 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
579d21cc
PM
152 * insns:
153 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
154 * otherwise, access as if at PL0.
155 */
156 switch (s->mmu_idx) {
157 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
158 case ARMMMUIdx_S12NSE0:
159 case ARMMMUIdx_S12NSE1:
8bd5c820 160 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
579d21cc
PM
161 case ARMMMUIdx_S1E3:
162 case ARMMMUIdx_S1SE0:
163 case ARMMMUIdx_S1SE1:
8bd5c820 164 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
e7b921c2
PM
165 case ARMMMUIdx_MUser:
166 case ARMMMUIdx_MPriv:
167 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
62593718
PM
168 case ARMMMUIdx_MUserNegPri:
169 case ARMMMUIdx_MPrivNegPri:
170 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
b9f587d6
PM
171 case ARMMMUIdx_MSUser:
172 case ARMMMUIdx_MSPriv:
b9f587d6 173 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
62593718
PM
174 case ARMMMUIdx_MSUserNegPri:
175 case ARMMMUIdx_MSPrivNegPri:
176 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
579d21cc
PM
177 case ARMMMUIdx_S2NS:
178 default:
179 g_assert_not_reached();
180 }
181}
182
39d5492a 183static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 184{
39d5492a 185 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
186 tcg_gen_ld_i32(tmp, cpu_env, offset);
187 return tmp;
188}
189
0ecb72a5 190#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 191
39d5492a 192static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
193{
194 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 195 tcg_temp_free_i32(var);
d9ba4830
PB
196}
197
198#define store_cpu_field(var, name) \
0ecb72a5 199 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 200
b26eefb6 201/* Set a variable to the value of a CPU register. */
39d5492a 202static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
203{
204 if (reg == 15) {
205 uint32_t addr;
b90372ad 206 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
207 if (s->thumb)
208 addr = (long)s->pc + 2;
209 else
210 addr = (long)s->pc + 4;
211 tcg_gen_movi_i32(var, addr);
212 } else {
155c3eac 213 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
214 }
215}
216
217/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 218static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 219{
39d5492a 220 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
221 load_reg_var(s, tmp, reg);
222 return tmp;
223}
224
225/* Set a CPU register. The source must be a temporary and will be
226 marked as dead. */
39d5492a 227static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
228{
229 if (reg == 15) {
9b6a3ea7
PM
230 /* In Thumb mode, we must ignore bit 0.
231 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
232 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
233 * We choose to ignore [1:0] in ARM mode for all architecture versions.
234 */
235 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
dcba3a8d 236 s->base.is_jmp = DISAS_JUMP;
b26eefb6 237 }
155c3eac 238 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 239 tcg_temp_free_i32(var);
b26eefb6
PB
240}
241
55203189
PM
242/*
243 * Variant of store_reg which applies v8M stack-limit checks before updating
244 * SP. If the check fails this will result in an exception being taken.
245 * We disable the stack checks for CONFIG_USER_ONLY because we have
246 * no idea what the stack limits should be in that case.
247 * If stack checking is not being done this just acts like store_reg().
248 */
249static void store_sp_checked(DisasContext *s, TCGv_i32 var)
250{
251#ifndef CONFIG_USER_ONLY
252 if (s->v8m_stackcheck) {
253 gen_helper_v8m_stackcheck(cpu_env, var);
254 }
255#endif
256 store_reg(s, 13, var);
257}
258
b26eefb6 259/* Value extensions. */
86831435
PB
260#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
261#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
262#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
263#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
264
1497c961
PB
265#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
266#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 267
b26eefb6 268
39d5492a 269static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 270{
39d5492a 271 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 272 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
273 tcg_temp_free_i32(tmp_mask);
274}
d9ba4830
PB
275/* Set NZCV flags from the high 4 bits of var. */
276#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
277
d4a2dc67 278static void gen_exception_internal(int excp)
d9ba4830 279{
d4a2dc67
PM
280 TCGv_i32 tcg_excp = tcg_const_i32(excp);
281
282 assert(excp_is_internal(excp));
283 gen_helper_exception_internal(cpu_env, tcg_excp);
284 tcg_temp_free_i32(tcg_excp);
285}
286
73710361 287static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
288{
289 TCGv_i32 tcg_excp = tcg_const_i32(excp);
290 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 291 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 292
73710361
GB
293 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
294 tcg_syn, tcg_el);
295
296 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
297 tcg_temp_free_i32(tcg_syn);
298 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
299}
300
50225ad0
PM
301static void gen_ss_advance(DisasContext *s)
302{
303 /* If the singlestep state is Active-not-pending, advance to
304 * Active-pending.
305 */
306 if (s->ss_active) {
307 s->pstate_ss = 0;
308 gen_helper_clear_pstate_ss(cpu_env);
309 }
310}
311
312static void gen_step_complete_exception(DisasContext *s)
313{
314 /* We just completed step of an insn. Move from Active-not-pending
315 * to Active-pending, and then also take the swstep exception.
316 * This corresponds to making the (IMPDEF) choice to prioritize
317 * swstep exceptions over asynchronous exceptions taken to an exception
318 * level where debug is disabled. This choice has the advantage that
319 * we do not need to maintain internal state corresponding to the
320 * ISV/EX syndrome bits between completion of the step and generation
321 * of the exception, and our syndrome information is always correct.
322 */
323 gen_ss_advance(s);
73710361
GB
324 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
325 default_exception_el(s));
dcba3a8d 326 s->base.is_jmp = DISAS_NORETURN;
50225ad0
PM
327}
328
5425415e
PM
329static void gen_singlestep_exception(DisasContext *s)
330{
331 /* Generate the right kind of exception for singlestep, which is
332 * either the architectural singlestep or EXCP_DEBUG for QEMU's
333 * gdb singlestepping.
334 */
335 if (s->ss_active) {
336 gen_step_complete_exception(s);
337 } else {
338 gen_exception_internal(EXCP_DEBUG);
339 }
340}
341
b636649f
PM
342static inline bool is_singlestepping(DisasContext *s)
343{
344 /* Return true if we are singlestepping either because of
345 * architectural singlestep or QEMU gdbstub singlestep. This does
346 * not include the command line '-singlestep' mode which is rather
347 * misnamed as it only means "one instruction per TB" and doesn't
348 * affect the code we generate.
349 */
dcba3a8d 350 return s->base.singlestep_enabled || s->ss_active;
b636649f
PM
351}
352
39d5492a 353static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 354{
39d5492a
PM
355 TCGv_i32 tmp1 = tcg_temp_new_i32();
356 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
357 tcg_gen_ext16s_i32(tmp1, a);
358 tcg_gen_ext16s_i32(tmp2, b);
3670669c 359 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 360 tcg_temp_free_i32(tmp2);
3670669c
PB
361 tcg_gen_sari_i32(a, a, 16);
362 tcg_gen_sari_i32(b, b, 16);
363 tcg_gen_mul_i32(b, b, a);
364 tcg_gen_mov_i32(a, tmp1);
7d1b0095 365 tcg_temp_free_i32(tmp1);
3670669c
PB
366}
367
368/* Byteswap each halfword. */
39d5492a 369static void gen_rev16(TCGv_i32 var)
3670669c 370{
39d5492a 371 TCGv_i32 tmp = tcg_temp_new_i32();
68cedf73 372 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
3670669c 373 tcg_gen_shri_i32(tmp, var, 8);
68cedf73
AJ
374 tcg_gen_and_i32(tmp, tmp, mask);
375 tcg_gen_and_i32(var, var, mask);
3670669c 376 tcg_gen_shli_i32(var, var, 8);
3670669c 377 tcg_gen_or_i32(var, var, tmp);
68cedf73 378 tcg_temp_free_i32(mask);
7d1b0095 379 tcg_temp_free_i32(tmp);
3670669c
PB
380}
381
382/* Byteswap low halfword and sign extend. */
39d5492a 383static void gen_revsh(TCGv_i32 var)
3670669c 384{
1a855029
AJ
385 tcg_gen_ext16u_i32(var, var);
386 tcg_gen_bswap16_i32(var, var);
387 tcg_gen_ext16s_i32(var, var);
3670669c
PB
388}
389
838fa72d 390/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 391static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 392{
838fa72d
AJ
393 TCGv_i64 tmp64 = tcg_temp_new_i64();
394
395 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 396 tcg_temp_free_i32(b);
838fa72d
AJ
397 tcg_gen_shli_i64(tmp64, tmp64, 32);
398 tcg_gen_add_i64(a, tmp64, a);
399
400 tcg_temp_free_i64(tmp64);
401 return a;
402}
403
404/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 405static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
406{
407 TCGv_i64 tmp64 = tcg_temp_new_i64();
408
409 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 410 tcg_temp_free_i32(b);
838fa72d
AJ
411 tcg_gen_shli_i64(tmp64, tmp64, 32);
412 tcg_gen_sub_i64(a, tmp64, a);
413
414 tcg_temp_free_i64(tmp64);
415 return a;
3670669c
PB
416}
417
5e3f878a 418/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 419static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 420{
39d5492a
PM
421 TCGv_i32 lo = tcg_temp_new_i32();
422 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 423 TCGv_i64 ret;
5e3f878a 424
831d7fe8 425 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 426 tcg_temp_free_i32(a);
7d1b0095 427 tcg_temp_free_i32(b);
831d7fe8
RH
428
429 ret = tcg_temp_new_i64();
430 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
431 tcg_temp_free_i32(lo);
432 tcg_temp_free_i32(hi);
831d7fe8
RH
433
434 return ret;
5e3f878a
PB
435}
436
39d5492a 437static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 438{
39d5492a
PM
439 TCGv_i32 lo = tcg_temp_new_i32();
440 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 441 TCGv_i64 ret;
5e3f878a 442
831d7fe8 443 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 444 tcg_temp_free_i32(a);
7d1b0095 445 tcg_temp_free_i32(b);
831d7fe8
RH
446
447 ret = tcg_temp_new_i64();
448 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
449 tcg_temp_free_i32(lo);
450 tcg_temp_free_i32(hi);
831d7fe8
RH
451
452 return ret;
5e3f878a
PB
453}
454
8f01245e 455/* Swap low and high halfwords. */
39d5492a 456static void gen_swap_half(TCGv_i32 var)
8f01245e 457{
39d5492a 458 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
459 tcg_gen_shri_i32(tmp, var, 16);
460 tcg_gen_shli_i32(var, var, 16);
461 tcg_gen_or_i32(var, var, tmp);
7d1b0095 462 tcg_temp_free_i32(tmp);
8f01245e
PB
463}
464
b26eefb6
PB
465/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
466 tmp = (t0 ^ t1) & 0x8000;
467 t0 &= ~0x8000;
468 t1 &= ~0x8000;
469 t0 = (t0 + t1) ^ tmp;
470 */
471
39d5492a 472static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 473{
39d5492a 474 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
475 tcg_gen_xor_i32(tmp, t0, t1);
476 tcg_gen_andi_i32(tmp, tmp, 0x8000);
477 tcg_gen_andi_i32(t0, t0, ~0x8000);
478 tcg_gen_andi_i32(t1, t1, ~0x8000);
479 tcg_gen_add_i32(t0, t0, t1);
480 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
481 tcg_temp_free_i32(tmp);
482 tcg_temp_free_i32(t1);
b26eefb6
PB
483}
484
485/* Set CF to the top bit of var. */
39d5492a 486static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 487{
66c374de 488 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
489}
490
491/* Set N and Z flags from var. */
39d5492a 492static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 493{
66c374de
AJ
494 tcg_gen_mov_i32(cpu_NF, var);
495 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
496}
497
498/* T0 += T1 + CF. */
39d5492a 499static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 500{
396e467c 501 tcg_gen_add_i32(t0, t0, t1);
66c374de 502 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
503}
504
e9bb4aa9 505/* dest = T0 + T1 + CF. */
39d5492a 506static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 507{
e9bb4aa9 508 tcg_gen_add_i32(dest, t0, t1);
66c374de 509 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
510}
511
3670669c 512/* dest = T0 - T1 + CF - 1. */
39d5492a 513static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 514{
3670669c 515 tcg_gen_sub_i32(dest, t0, t1);
66c374de 516 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 517 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
518}
519
72485ec4 520/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 521static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 522{
39d5492a 523 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
524 tcg_gen_movi_i32(tmp, 0);
525 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 526 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 527 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
528 tcg_gen_xor_i32(tmp, t0, t1);
529 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
530 tcg_temp_free_i32(tmp);
531 tcg_gen_mov_i32(dest, cpu_NF);
532}
533
49b4c31e 534/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 535static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 536{
39d5492a 537 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
538 if (TCG_TARGET_HAS_add2_i32) {
539 tcg_gen_movi_i32(tmp, 0);
540 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 541 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
542 } else {
543 TCGv_i64 q0 = tcg_temp_new_i64();
544 TCGv_i64 q1 = tcg_temp_new_i64();
545 tcg_gen_extu_i32_i64(q0, t0);
546 tcg_gen_extu_i32_i64(q1, t1);
547 tcg_gen_add_i64(q0, q0, q1);
548 tcg_gen_extu_i32_i64(q1, cpu_CF);
549 tcg_gen_add_i64(q0, q0, q1);
550 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
551 tcg_temp_free_i64(q0);
552 tcg_temp_free_i64(q1);
553 }
554 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
555 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
556 tcg_gen_xor_i32(tmp, t0, t1);
557 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
558 tcg_temp_free_i32(tmp);
559 tcg_gen_mov_i32(dest, cpu_NF);
560}
561
72485ec4 562/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 563static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 564{
39d5492a 565 TCGv_i32 tmp;
72485ec4
AJ
566 tcg_gen_sub_i32(cpu_NF, t0, t1);
567 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
568 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
569 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
570 tmp = tcg_temp_new_i32();
571 tcg_gen_xor_i32(tmp, t0, t1);
572 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
573 tcg_temp_free_i32(tmp);
574 tcg_gen_mov_i32(dest, cpu_NF);
575}
576
e77f0832 577/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 578static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 579{
39d5492a 580 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
581 tcg_gen_not_i32(tmp, t1);
582 gen_adc_CC(dest, t0, tmp);
39d5492a 583 tcg_temp_free_i32(tmp);
2de68a49
RH
584}
585
365af80e 586#define GEN_SHIFT(name) \
39d5492a 587static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 588{ \
39d5492a 589 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
590 tmp1 = tcg_temp_new_i32(); \
591 tcg_gen_andi_i32(tmp1, t1, 0xff); \
592 tmp2 = tcg_const_i32(0); \
593 tmp3 = tcg_const_i32(0x1f); \
594 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
595 tcg_temp_free_i32(tmp3); \
596 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
597 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
598 tcg_temp_free_i32(tmp2); \
599 tcg_temp_free_i32(tmp1); \
600}
601GEN_SHIFT(shl)
602GEN_SHIFT(shr)
603#undef GEN_SHIFT
604
39d5492a 605static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 606{
39d5492a 607 TCGv_i32 tmp1, tmp2;
365af80e
AJ
608 tmp1 = tcg_temp_new_i32();
609 tcg_gen_andi_i32(tmp1, t1, 0xff);
610 tmp2 = tcg_const_i32(0x1f);
611 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
612 tcg_temp_free_i32(tmp2);
613 tcg_gen_sar_i32(dest, t0, tmp1);
614 tcg_temp_free_i32(tmp1);
615}
616
39d5492a 617static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 618{
39d5492a
PM
619 TCGv_i32 c0 = tcg_const_i32(0);
620 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
621 tcg_gen_neg_i32(tmp, src);
622 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
623 tcg_temp_free_i32(c0);
624 tcg_temp_free_i32(tmp);
625}
ad69471c 626
39d5492a 627static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 628{
9a119ff6 629 if (shift == 0) {
66c374de 630 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 631 } else {
66c374de
AJ
632 tcg_gen_shri_i32(cpu_CF, var, shift);
633 if (shift != 31) {
634 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
635 }
9a119ff6 636 }
9a119ff6 637}
b26eefb6 638
9a119ff6 639/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
640static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
641 int shift, int flags)
9a119ff6
PB
642{
643 switch (shiftop) {
644 case 0: /* LSL */
645 if (shift != 0) {
646 if (flags)
647 shifter_out_im(var, 32 - shift);
648 tcg_gen_shli_i32(var, var, shift);
649 }
650 break;
651 case 1: /* LSR */
652 if (shift == 0) {
653 if (flags) {
66c374de 654 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
655 }
656 tcg_gen_movi_i32(var, 0);
657 } else {
658 if (flags)
659 shifter_out_im(var, shift - 1);
660 tcg_gen_shri_i32(var, var, shift);
661 }
662 break;
663 case 2: /* ASR */
664 if (shift == 0)
665 shift = 32;
666 if (flags)
667 shifter_out_im(var, shift - 1);
668 if (shift == 32)
669 shift = 31;
670 tcg_gen_sari_i32(var, var, shift);
671 break;
672 case 3: /* ROR/RRX */
673 if (shift != 0) {
674 if (flags)
675 shifter_out_im(var, shift - 1);
f669df27 676 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 677 } else {
39d5492a 678 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 679 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
680 if (flags)
681 shifter_out_im(var, 0);
682 tcg_gen_shri_i32(var, var, 1);
b26eefb6 683 tcg_gen_or_i32(var, var, tmp);
7d1b0095 684 tcg_temp_free_i32(tmp);
b26eefb6
PB
685 }
686 }
687};
688
39d5492a
PM
689static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
690 TCGv_i32 shift, int flags)
8984bd2e
PB
691{
692 if (flags) {
693 switch (shiftop) {
9ef39277
BS
694 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
695 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
696 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
697 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
698 }
699 } else {
700 switch (shiftop) {
365af80e
AJ
701 case 0:
702 gen_shl(var, var, shift);
703 break;
704 case 1:
705 gen_shr(var, var, shift);
706 break;
707 case 2:
708 gen_sar(var, var, shift);
709 break;
f669df27
AJ
710 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
711 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
712 }
713 }
7d1b0095 714 tcg_temp_free_i32(shift);
8984bd2e
PB
715}
716
6ddbc6e4
PB
717#define PAS_OP(pfx) \
718 switch (op2) { \
719 case 0: gen_pas_helper(glue(pfx,add16)); break; \
720 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
721 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
722 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
723 case 4: gen_pas_helper(glue(pfx,add8)); break; \
724 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
725 }
39d5492a 726static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 727{
a7812ae4 728 TCGv_ptr tmp;
6ddbc6e4
PB
729
730 switch (op1) {
731#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
732 case 1:
a7812ae4 733 tmp = tcg_temp_new_ptr();
0ecb72a5 734 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 735 PAS_OP(s)
b75263d6 736 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
737 break;
738 case 5:
a7812ae4 739 tmp = tcg_temp_new_ptr();
0ecb72a5 740 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 741 PAS_OP(u)
b75263d6 742 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
743 break;
744#undef gen_pas_helper
745#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
746 case 2:
747 PAS_OP(q);
748 break;
749 case 3:
750 PAS_OP(sh);
751 break;
752 case 6:
753 PAS_OP(uq);
754 break;
755 case 7:
756 PAS_OP(uh);
757 break;
758#undef gen_pas_helper
759 }
760}
9ee6e8bb
PB
761#undef PAS_OP
762
6ddbc6e4
PB
763/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
764#define PAS_OP(pfx) \
ed89a2f1 765 switch (op1) { \
6ddbc6e4
PB
766 case 0: gen_pas_helper(glue(pfx,add8)); break; \
767 case 1: gen_pas_helper(glue(pfx,add16)); break; \
768 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
769 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
770 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
771 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
772 }
39d5492a 773static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 774{
a7812ae4 775 TCGv_ptr tmp;
6ddbc6e4 776
ed89a2f1 777 switch (op2) {
6ddbc6e4
PB
778#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
779 case 0:
a7812ae4 780 tmp = tcg_temp_new_ptr();
0ecb72a5 781 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 782 PAS_OP(s)
b75263d6 783 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
784 break;
785 case 4:
a7812ae4 786 tmp = tcg_temp_new_ptr();
0ecb72a5 787 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 788 PAS_OP(u)
b75263d6 789 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
790 break;
791#undef gen_pas_helper
792#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
793 case 1:
794 PAS_OP(q);
795 break;
796 case 2:
797 PAS_OP(sh);
798 break;
799 case 5:
800 PAS_OP(uq);
801 break;
802 case 6:
803 PAS_OP(uh);
804 break;
805#undef gen_pas_helper
806 }
807}
9ee6e8bb
PB
808#undef PAS_OP
809
39fb730a 810/*
6c2c63d3 811 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
812 * This is common between ARM and Aarch64 targets.
813 */
6c2c63d3 814void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 815{
6c2c63d3
RH
816 TCGv_i32 value;
817 TCGCond cond;
818 bool global = true;
d9ba4830 819
d9ba4830
PB
820 switch (cc) {
821 case 0: /* eq: Z */
d9ba4830 822 case 1: /* ne: !Z */
6c2c63d3
RH
823 cond = TCG_COND_EQ;
824 value = cpu_ZF;
d9ba4830 825 break;
6c2c63d3 826
d9ba4830 827 case 2: /* cs: C */
d9ba4830 828 case 3: /* cc: !C */
6c2c63d3
RH
829 cond = TCG_COND_NE;
830 value = cpu_CF;
d9ba4830 831 break;
6c2c63d3 832
d9ba4830 833 case 4: /* mi: N */
d9ba4830 834 case 5: /* pl: !N */
6c2c63d3
RH
835 cond = TCG_COND_LT;
836 value = cpu_NF;
d9ba4830 837 break;
6c2c63d3 838
d9ba4830 839 case 6: /* vs: V */
d9ba4830 840 case 7: /* vc: !V */
6c2c63d3
RH
841 cond = TCG_COND_LT;
842 value = cpu_VF;
d9ba4830 843 break;
6c2c63d3 844
d9ba4830 845 case 8: /* hi: C && !Z */
6c2c63d3
RH
846 case 9: /* ls: !C || Z -> !(C && !Z) */
847 cond = TCG_COND_NE;
848 value = tcg_temp_new_i32();
849 global = false;
850 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
851 ZF is non-zero for !Z; so AND the two subexpressions. */
852 tcg_gen_neg_i32(value, cpu_CF);
853 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 854 break;
6c2c63d3 855
d9ba4830 856 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 857 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
858 /* Since we're only interested in the sign bit, == 0 is >= 0. */
859 cond = TCG_COND_GE;
860 value = tcg_temp_new_i32();
861 global = false;
862 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 863 break;
6c2c63d3 864
d9ba4830 865 case 12: /* gt: !Z && N == V */
d9ba4830 866 case 13: /* le: Z || N != V */
6c2c63d3
RH
867 cond = TCG_COND_NE;
868 value = tcg_temp_new_i32();
869 global = false;
870 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
871 * the sign bit then AND with ZF to yield the result. */
872 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
873 tcg_gen_sari_i32(value, value, 31);
874 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 875 break;
6c2c63d3 876
9305eac0
RH
877 case 14: /* always */
878 case 15: /* always */
879 /* Use the ALWAYS condition, which will fold early.
880 * It doesn't matter what we use for the value. */
881 cond = TCG_COND_ALWAYS;
882 value = cpu_ZF;
883 goto no_invert;
884
d9ba4830
PB
885 default:
886 fprintf(stderr, "Bad condition code 0x%x\n", cc);
887 abort();
888 }
6c2c63d3
RH
889
890 if (cc & 1) {
891 cond = tcg_invert_cond(cond);
892 }
893
9305eac0 894 no_invert:
6c2c63d3
RH
895 cmp->cond = cond;
896 cmp->value = value;
897 cmp->value_global = global;
898}
899
900void arm_free_cc(DisasCompare *cmp)
901{
902 if (!cmp->value_global) {
903 tcg_temp_free_i32(cmp->value);
904 }
905}
906
907void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
908{
909 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
910}
911
912void arm_gen_test_cc(int cc, TCGLabel *label)
913{
914 DisasCompare cmp;
915 arm_test_cc(&cmp, cc);
916 arm_jump_cc(&cmp, label);
917 arm_free_cc(&cmp);
d9ba4830 918}
2c0262af 919
b1d8e52e 920static const uint8_t table_logic_cc[16] = {
2c0262af
FB
921 1, /* and */
922 1, /* xor */
923 0, /* sub */
924 0, /* rsb */
925 0, /* add */
926 0, /* adc */
927 0, /* sbc */
928 0, /* rsc */
929 1, /* andl */
930 1, /* xorl */
931 0, /* cmp */
932 0, /* cmn */
933 1, /* orr */
934 1, /* mov */
935 1, /* bic */
936 1, /* mvn */
937};
3b46e624 938
4d5e8c96
PM
939static inline void gen_set_condexec(DisasContext *s)
940{
941 if (s->condexec_mask) {
942 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
943 TCGv_i32 tmp = tcg_temp_new_i32();
944 tcg_gen_movi_i32(tmp, val);
945 store_cpu_field(tmp, condexec_bits);
946 }
947}
948
949static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
950{
951 tcg_gen_movi_i32(cpu_R[15], val);
952}
953
d9ba4830
PB
954/* Set PC and Thumb state from an immediate address. */
955static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 956{
39d5492a 957 TCGv_i32 tmp;
99c475ab 958
dcba3a8d 959 s->base.is_jmp = DISAS_JUMP;
d9ba4830 960 if (s->thumb != (addr & 1)) {
7d1b0095 961 tmp = tcg_temp_new_i32();
d9ba4830 962 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 963 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 964 tcg_temp_free_i32(tmp);
d9ba4830 965 }
155c3eac 966 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
967}
968
969/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 970static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 971{
dcba3a8d 972 s->base.is_jmp = DISAS_JUMP;
155c3eac
FN
973 tcg_gen_andi_i32(cpu_R[15], var, ~1);
974 tcg_gen_andi_i32(var, var, 1);
975 store_cpu_field(var, thumb);
d9ba4830
PB
976}
977
3bb8a96f
PM
978/* Set PC and Thumb state from var. var is marked as dead.
979 * For M-profile CPUs, include logic to detect exception-return
980 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
981 * and BX reg, and no others, and happens only for code in Handler mode.
982 */
983static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
984{
985 /* Generate the same code here as for a simple bx, but flag via
dcba3a8d 986 * s->base.is_jmp that we need to do the rest of the work later.
3bb8a96f
PM
987 */
988 gen_bx(s, var);
d02a8698
PM
989 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
990 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
dcba3a8d 991 s->base.is_jmp = DISAS_BX_EXCRET;
3bb8a96f
PM
992 }
993}
994
995static inline void gen_bx_excret_final_code(DisasContext *s)
996{
997 /* Generate the code to finish possible exception return and end the TB */
998 TCGLabel *excret_label = gen_new_label();
d02a8698
PM
999 uint32_t min_magic;
1000
1001 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
1002 /* Covers FNC_RETURN and EXC_RETURN magic */
1003 min_magic = FNC_RETURN_MIN_MAGIC;
1004 } else {
1005 /* EXC_RETURN magic only */
1006 min_magic = EXC_RETURN_MIN_MAGIC;
1007 }
3bb8a96f
PM
1008
1009 /* Is the new PC value in the magic range indicating exception return? */
d02a8698 1010 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
3bb8a96f
PM
1011 /* No: end the TB as we would for a DISAS_JMP */
1012 if (is_singlestepping(s)) {
1013 gen_singlestep_exception(s);
1014 } else {
07ea28b4 1015 tcg_gen_exit_tb(NULL, 0);
3bb8a96f
PM
1016 }
1017 gen_set_label(excret_label);
1018 /* Yes: this is an exception return.
1019 * At this point in runtime env->regs[15] and env->thumb will hold
1020 * the exception-return magic number, which do_v7m_exception_exit()
1021 * will read. Nothing else will be able to see those values because
1022 * the cpu-exec main loop guarantees that we will always go straight
1023 * from raising the exception to the exception-handling code.
1024 *
1025 * gen_ss_advance(s) does nothing on M profile currently but
1026 * calling it is conceptually the right thing as we have executed
1027 * this instruction (compare SWI, HVC, SMC handling).
1028 */
1029 gen_ss_advance(s);
1030 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1031}
1032
fb602cb7
PM
1033static inline void gen_bxns(DisasContext *s, int rm)
1034{
1035 TCGv_i32 var = load_reg(s, rm);
1036
1037 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1038 * we need to sync state before calling it, but:
1039 * - we don't need to do gen_set_pc_im() because the bxns helper will
1040 * always set the PC itself
1041 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1042 * unless it's outside an IT block or the last insn in an IT block,
1043 * so we know that condexec == 0 (already set at the top of the TB)
1044 * is correct in the non-UNPREDICTABLE cases, and we can choose
1045 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1046 */
1047 gen_helper_v7m_bxns(cpu_env, var);
1048 tcg_temp_free_i32(var);
ef475b5d 1049 s->base.is_jmp = DISAS_EXIT;
fb602cb7
PM
1050}
1051
3e3fa230
PM
1052static inline void gen_blxns(DisasContext *s, int rm)
1053{
1054 TCGv_i32 var = load_reg(s, rm);
1055
1056 /* We don't need to sync condexec state, for the same reason as bxns.
1057 * We do however need to set the PC, because the blxns helper reads it.
1058 * The blxns helper may throw an exception.
1059 */
1060 gen_set_pc_im(s, s->pc);
1061 gen_helper_v7m_blxns(cpu_env, var);
1062 tcg_temp_free_i32(var);
1063 s->base.is_jmp = DISAS_EXIT;
1064}
1065
21aeb343
JR
1066/* Variant of store_reg which uses branch&exchange logic when storing
1067 to r15 in ARM architecture v7 and above. The source must be a temporary
1068 and will be marked as dead. */
7dcc1f89 1069static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
1070{
1071 if (reg == 15 && ENABLE_ARCH_7) {
1072 gen_bx(s, var);
1073 } else {
1074 store_reg(s, reg, var);
1075 }
1076}
1077
be5e7a76
DES
1078/* Variant of store_reg which uses branch&exchange logic when storing
1079 * to r15 in ARM architecture v5T and above. This is used for storing
1080 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1081 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 1082static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
1083{
1084 if (reg == 15 && ENABLE_ARCH_5) {
3bb8a96f 1085 gen_bx_excret(s, var);
be5e7a76
DES
1086 } else {
1087 store_reg(s, reg, var);
1088 }
1089}
1090
e334bd31
PB
1091#ifdef CONFIG_USER_ONLY
1092#define IS_USER_ONLY 1
1093#else
1094#define IS_USER_ONLY 0
1095#endif
1096
08307563
PM
1097/* Abstractions of "generate code to do a guest load/store for
1098 * AArch32", where a vaddr is always 32 bits (and is zero
1099 * extended if we're a 64 bit core) and data is also
1100 * 32 bits unless specifically doing a 64 bit access.
1101 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 1102 * that the address argument is TCGv_i32 rather than TCGv.
08307563 1103 */
08307563 1104
7f5616f5 1105static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
08307563 1106{
7f5616f5
RH
1107 TCGv addr = tcg_temp_new();
1108 tcg_gen_extu_i32_tl(addr, a32);
1109
e334bd31 1110 /* Not needed for user-mode BE32, where we use MO_BE instead. */
7f5616f5
RH
1111 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1112 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
e334bd31 1113 }
7f5616f5 1114 return addr;
08307563
PM
1115}
1116
7f5616f5
RH
1117static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1118 int index, TCGMemOp opc)
08307563 1119{
2aeba0d0
JS
1120 TCGv addr;
1121
1122 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1123 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1124 opc |= MO_ALIGN;
1125 }
1126
1127 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1128 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1129 tcg_temp_free(addr);
08307563
PM
1130}
1131
7f5616f5
RH
1132static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1133 int index, TCGMemOp opc)
1134{
2aeba0d0
JS
1135 TCGv addr;
1136
1137 if (arm_dc_feature(s, ARM_FEATURE_M) &&
1138 !arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
1139 opc |= MO_ALIGN;
1140 }
1141
1142 addr = gen_aa32_addr(s, a32, opc);
7f5616f5
RH
1143 tcg_gen_qemu_st_i32(val, addr, index, opc);
1144 tcg_temp_free(addr);
1145}
08307563 1146
7f5616f5 1147#define DO_GEN_LD(SUFF, OPC) \
12dcc321 1148static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1149 TCGv_i32 a32, int index) \
08307563 1150{ \
7f5616f5 1151 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1152} \
1153static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1154 TCGv_i32 val, \
1155 TCGv_i32 a32, int index, \
1156 ISSInfo issinfo) \
1157{ \
1158 gen_aa32_ld##SUFF(s, val, a32, index); \
1159 disas_set_da_iss(s, OPC, issinfo); \
08307563
PM
1160}
1161
7f5616f5 1162#define DO_GEN_ST(SUFF, OPC) \
12dcc321 1163static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1164 TCGv_i32 a32, int index) \
08307563 1165{ \
7f5616f5 1166 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1167} \
1168static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1169 TCGv_i32 val, \
1170 TCGv_i32 a32, int index, \
1171 ISSInfo issinfo) \
1172{ \
1173 gen_aa32_st##SUFF(s, val, a32, index); \
1174 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
08307563
PM
1175}
1176
7f5616f5 1177static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
08307563 1178{
e334bd31
PB
1179 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1180 if (!IS_USER_ONLY && s->sctlr_b) {
1181 tcg_gen_rotri_i64(val, val, 32);
1182 }
08307563
PM
1183}
1184
7f5616f5
RH
1185static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1186 int index, TCGMemOp opc)
08307563 1187{
7f5616f5
RH
1188 TCGv addr = gen_aa32_addr(s, a32, opc);
1189 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1190 gen_aa32_frob64(s, val);
1191 tcg_temp_free(addr);
1192}
1193
1194static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1195 TCGv_i32 a32, int index)
1196{
1197 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1198}
1199
1200static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1201 int index, TCGMemOp opc)
1202{
1203 TCGv addr = gen_aa32_addr(s, a32, opc);
e334bd31
PB
1204
1205 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1206 if (!IS_USER_ONLY && s->sctlr_b) {
7f5616f5 1207 TCGv_i64 tmp = tcg_temp_new_i64();
e334bd31 1208 tcg_gen_rotri_i64(tmp, val, 32);
7f5616f5
RH
1209 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1210 tcg_temp_free_i64(tmp);
e334bd31 1211 } else {
7f5616f5 1212 tcg_gen_qemu_st_i64(val, addr, index, opc);
e334bd31 1213 }
7f5616f5 1214 tcg_temp_free(addr);
08307563
PM
1215}
1216
7f5616f5
RH
1217static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1218 TCGv_i32 a32, int index)
1219{
1220 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1221}
08307563 1222
7f5616f5
RH
1223DO_GEN_LD(8s, MO_SB)
1224DO_GEN_LD(8u, MO_UB)
1225DO_GEN_LD(16s, MO_SW)
1226DO_GEN_LD(16u, MO_UW)
1227DO_GEN_LD(32u, MO_UL)
7f5616f5
RH
1228DO_GEN_ST(8, MO_UB)
1229DO_GEN_ST(16, MO_UW)
1230DO_GEN_ST(32, MO_UL)
08307563 1231
37e6456e
PM
1232static inline void gen_hvc(DisasContext *s, int imm16)
1233{
1234 /* The pre HVC helper handles cases when HVC gets trapped
1235 * as an undefined insn by runtime configuration (ie before
1236 * the insn really executes).
1237 */
1238 gen_set_pc_im(s, s->pc - 4);
1239 gen_helper_pre_hvc(cpu_env);
1240 /* Otherwise we will treat this as a real exception which
1241 * happens after execution of the insn. (The distinction matters
1242 * for the PC value reported to the exception handler and also
1243 * for single stepping.)
1244 */
1245 s->svc_imm = imm16;
1246 gen_set_pc_im(s, s->pc);
dcba3a8d 1247 s->base.is_jmp = DISAS_HVC;
37e6456e
PM
1248}
1249
1250static inline void gen_smc(DisasContext *s)
1251{
1252 /* As with HVC, we may take an exception either before or after
1253 * the insn executes.
1254 */
1255 TCGv_i32 tmp;
1256
1257 gen_set_pc_im(s, s->pc - 4);
1258 tmp = tcg_const_i32(syn_aa32_smc());
1259 gen_helper_pre_smc(cpu_env, tmp);
1260 tcg_temp_free_i32(tmp);
1261 gen_set_pc_im(s, s->pc);
dcba3a8d 1262 s->base.is_jmp = DISAS_SMC;
37e6456e
PM
1263}
1264
d4a2dc67
PM
1265static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1266{
1267 gen_set_condexec(s);
1268 gen_set_pc_im(s, s->pc - offset);
1269 gen_exception_internal(excp);
dcba3a8d 1270 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1271}
1272
73710361
GB
1273static void gen_exception_insn(DisasContext *s, int offset, int excp,
1274 int syn, uint32_t target_el)
d4a2dc67
PM
1275{
1276 gen_set_condexec(s);
1277 gen_set_pc_im(s, s->pc - offset);
73710361 1278 gen_exception(excp, syn, target_el);
dcba3a8d 1279 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1280}
1281
c900a2e6
PM
1282static void gen_exception_bkpt_insn(DisasContext *s, int offset, uint32_t syn)
1283{
1284 TCGv_i32 tcg_syn;
1285
1286 gen_set_condexec(s);
1287 gen_set_pc_im(s, s->pc - offset);
1288 tcg_syn = tcg_const_i32(syn);
1289 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
1290 tcg_temp_free_i32(tcg_syn);
1291 s->base.is_jmp = DISAS_NORETURN;
1292}
1293
b5ff1b31
FB
1294/* Force a TB lookup after an instruction that changes the CPU state. */
1295static inline void gen_lookup_tb(DisasContext *s)
1296{
a6445c52 1297 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
dcba3a8d 1298 s->base.is_jmp = DISAS_EXIT;
b5ff1b31
FB
1299}
1300
19a6e31c
PM
1301static inline void gen_hlt(DisasContext *s, int imm)
1302{
1303 /* HLT. This has two purposes.
1304 * Architecturally, it is an external halting debug instruction.
1305 * Since QEMU doesn't implement external debug, we treat this as
1306 * it is required for halting debug disabled: it will UNDEF.
1307 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1308 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1309 * must trigger semihosting even for ARMv7 and earlier, where
1310 * HLT was an undefined encoding.
1311 * In system mode, we don't allow userspace access to
1312 * semihosting, to provide some semblance of security
1313 * (and for consistency with our 32-bit semihosting).
1314 */
1315 if (semihosting_enabled() &&
1316#ifndef CONFIG_USER_ONLY
1317 s->current_el != 0 &&
1318#endif
1319 (imm == (s->thumb ? 0x3c : 0xf000))) {
1320 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1321 return;
1322 }
1323
1324 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1325 default_exception_el(s));
1326}
1327
b0109805 1328static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1329 TCGv_i32 var)
2c0262af 1330{
1e8d4eec 1331 int val, rm, shift, shiftop;
39d5492a 1332 TCGv_i32 offset;
2c0262af
FB
1333
1334 if (!(insn & (1 << 25))) {
1335 /* immediate */
1336 val = insn & 0xfff;
1337 if (!(insn & (1 << 23)))
1338 val = -val;
537730b9 1339 if (val != 0)
b0109805 1340 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1341 } else {
1342 /* shift/register */
1343 rm = (insn) & 0xf;
1344 shift = (insn >> 7) & 0x1f;
1e8d4eec 1345 shiftop = (insn >> 5) & 3;
b26eefb6 1346 offset = load_reg(s, rm);
9a119ff6 1347 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1348 if (!(insn & (1 << 23)))
b0109805 1349 tcg_gen_sub_i32(var, var, offset);
2c0262af 1350 else
b0109805 1351 tcg_gen_add_i32(var, var, offset);
7d1b0095 1352 tcg_temp_free_i32(offset);
2c0262af
FB
1353 }
1354}
1355
191f9a93 1356static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1357 int extra, TCGv_i32 var)
2c0262af
FB
1358{
1359 int val, rm;
39d5492a 1360 TCGv_i32 offset;
3b46e624 1361
2c0262af
FB
1362 if (insn & (1 << 22)) {
1363 /* immediate */
1364 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1365 if (!(insn & (1 << 23)))
1366 val = -val;
18acad92 1367 val += extra;
537730b9 1368 if (val != 0)
b0109805 1369 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1370 } else {
1371 /* register */
191f9a93 1372 if (extra)
b0109805 1373 tcg_gen_addi_i32(var, var, extra);
2c0262af 1374 rm = (insn) & 0xf;
b26eefb6 1375 offset = load_reg(s, rm);
2c0262af 1376 if (!(insn & (1 << 23)))
b0109805 1377 tcg_gen_sub_i32(var, var, offset);
2c0262af 1378 else
b0109805 1379 tcg_gen_add_i32(var, var, offset);
7d1b0095 1380 tcg_temp_free_i32(offset);
2c0262af
FB
1381 }
1382}
1383
5aaebd13
PM
1384static TCGv_ptr get_fpstatus_ptr(int neon)
1385{
1386 TCGv_ptr statusptr = tcg_temp_new_ptr();
1387 int offset;
1388 if (neon) {
0ecb72a5 1389 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1390 } else {
0ecb72a5 1391 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1392 }
1393 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1394 return statusptr;
1395}
1396
4373f3ce
PB
1397#define VFP_OP2(name) \
1398static inline void gen_vfp_##name(int dp) \
1399{ \
ae1857ec
PM
1400 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1401 if (dp) { \
1402 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1403 } else { \
1404 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1405 } \
1406 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1407}
1408
4373f3ce
PB
1409VFP_OP2(add)
1410VFP_OP2(sub)
1411VFP_OP2(mul)
1412VFP_OP2(div)
1413
1414#undef VFP_OP2
1415
605a6aed
PM
1416static inline void gen_vfp_F1_mul(int dp)
1417{
1418 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1419 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1420 if (dp) {
ae1857ec 1421 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1422 } else {
ae1857ec 1423 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1424 }
ae1857ec 1425 tcg_temp_free_ptr(fpst);
605a6aed
PM
1426}
1427
1428static inline void gen_vfp_F1_neg(int dp)
1429{
1430 /* Like gen_vfp_neg() but put result in F1 */
1431 if (dp) {
1432 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1433 } else {
1434 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1435 }
1436}
1437
4373f3ce
PB
1438static inline void gen_vfp_abs(int dp)
1439{
1440 if (dp)
1441 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1442 else
1443 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1444}
1445
1446static inline void gen_vfp_neg(int dp)
1447{
1448 if (dp)
1449 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1450 else
1451 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1452}
1453
1454static inline void gen_vfp_sqrt(int dp)
1455{
1456 if (dp)
1457 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1458 else
1459 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1460}
1461
1462static inline void gen_vfp_cmp(int dp)
1463{
1464 if (dp)
1465 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1466 else
1467 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1468}
1469
1470static inline void gen_vfp_cmpe(int dp)
1471{
1472 if (dp)
1473 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1474 else
1475 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1476}
1477
1478static inline void gen_vfp_F1_ld0(int dp)
1479{
1480 if (dp)
5b340b51 1481 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1482 else
5b340b51 1483 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1484}
1485
5500b06c
PM
1486#define VFP_GEN_ITOF(name) \
1487static inline void gen_vfp_##name(int dp, int neon) \
1488{ \
5aaebd13 1489 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1490 if (dp) { \
1491 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1492 } else { \
1493 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1494 } \
b7fa9214 1495 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1496}
1497
5500b06c
PM
1498VFP_GEN_ITOF(uito)
1499VFP_GEN_ITOF(sito)
1500#undef VFP_GEN_ITOF
4373f3ce 1501
5500b06c
PM
1502#define VFP_GEN_FTOI(name) \
1503static inline void gen_vfp_##name(int dp, int neon) \
1504{ \
5aaebd13 1505 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1506 if (dp) { \
1507 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1508 } else { \
1509 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1510 } \
b7fa9214 1511 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1512}
1513
5500b06c
PM
1514VFP_GEN_FTOI(toui)
1515VFP_GEN_FTOI(touiz)
1516VFP_GEN_FTOI(tosi)
1517VFP_GEN_FTOI(tosiz)
1518#undef VFP_GEN_FTOI
4373f3ce 1519
16d5b3ca 1520#define VFP_GEN_FIX(name, round) \
5500b06c 1521static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1522{ \
39d5492a 1523 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1524 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1525 if (dp) { \
16d5b3ca
WN
1526 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1527 statusptr); \
5500b06c 1528 } else { \
16d5b3ca
WN
1529 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1530 statusptr); \
5500b06c 1531 } \
b75263d6 1532 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1533 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1534}
16d5b3ca
WN
1535VFP_GEN_FIX(tosh, _round_to_zero)
1536VFP_GEN_FIX(tosl, _round_to_zero)
1537VFP_GEN_FIX(touh, _round_to_zero)
1538VFP_GEN_FIX(toul, _round_to_zero)
1539VFP_GEN_FIX(shto, )
1540VFP_GEN_FIX(slto, )
1541VFP_GEN_FIX(uhto, )
1542VFP_GEN_FIX(ulto, )
4373f3ce 1543#undef VFP_GEN_FIX
9ee6e8bb 1544
39d5492a 1545static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1546{
08307563 1547 if (dp) {
12dcc321 1548 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1549 } else {
12dcc321 1550 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
08307563 1551 }
b5ff1b31
FB
1552}
1553
39d5492a 1554static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1555{
08307563 1556 if (dp) {
12dcc321 1557 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1558 } else {
12dcc321 1559 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
08307563 1560 }
b5ff1b31
FB
1561}
1562
c39c2b90 1563static inline long vfp_reg_offset(bool dp, unsigned reg)
8e96005d 1564{
9a2b5256 1565 if (dp) {
c39c2b90 1566 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
8e96005d 1567 } else {
c39c2b90 1568 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
9a2b5256
RH
1569 if (reg & 1) {
1570 ofs += offsetof(CPU_DoubleU, l.upper);
1571 } else {
1572 ofs += offsetof(CPU_DoubleU, l.lower);
1573 }
1574 return ofs;
8e96005d
FB
1575 }
1576}
9ee6e8bb
PB
1577
1578/* Return the offset of a 32-bit piece of a NEON register.
1579 zero is the least significant end of the register. */
1580static inline long
1581neon_reg_offset (int reg, int n)
1582{
1583 int sreg;
1584 sreg = reg * 2 + n;
1585 return vfp_reg_offset(0, sreg);
1586}
1587
32f91fb7
RH
1588/* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
1589 * where 0 is the least significant end of the register.
1590 */
1591static inline long
1592neon_element_offset(int reg, int element, TCGMemOp size)
1593{
1594 int element_size = 1 << size;
1595 int ofs = element * element_size;
1596#ifdef HOST_WORDS_BIGENDIAN
1597 /* Calculate the offset assuming fully little-endian,
1598 * then XOR to account for the order of the 8-byte units.
1599 */
1600 if (element_size < 8) {
1601 ofs ^= 8 - element_size;
1602 }
1603#endif
1604 return neon_reg_offset(reg, 0) + ofs;
1605}
1606
39d5492a 1607static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1608{
39d5492a 1609 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1610 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1611 return tmp;
1612}
1613
2d6ac920
RH
1614static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop)
1615{
1616 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1617
1618 switch (mop) {
1619 case MO_UB:
1620 tcg_gen_ld8u_i32(var, cpu_env, offset);
1621 break;
1622 case MO_UW:
1623 tcg_gen_ld16u_i32(var, cpu_env, offset);
1624 break;
1625 case MO_UL:
1626 tcg_gen_ld_i32(var, cpu_env, offset);
1627 break;
1628 default:
1629 g_assert_not_reached();
1630 }
1631}
1632
ac55d007
RH
1633static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop)
1634{
1635 long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
1636
1637 switch (mop) {
1638 case MO_UB:
1639 tcg_gen_ld8u_i64(var, cpu_env, offset);
1640 break;
1641 case MO_UW:
1642 tcg_gen_ld16u_i64(var, cpu_env, offset);
1643 break;
1644 case MO_UL:
1645 tcg_gen_ld32u_i64(var, cpu_env, offset);
1646 break;
1647 case MO_Q:
1648 tcg_gen_ld_i64(var, cpu_env, offset);
1649 break;
1650 default:
1651 g_assert_not_reached();
1652 }
1653}
1654
39d5492a 1655static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1656{
1657 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1658 tcg_temp_free_i32(var);
8f8e3aa4
PB
1659}
1660
2d6ac920
RH
1661static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var)
1662{
1663 long offset = neon_element_offset(reg, ele, size);
1664
1665 switch (size) {
1666 case MO_8:
1667 tcg_gen_st8_i32(var, cpu_env, offset);
1668 break;
1669 case MO_16:
1670 tcg_gen_st16_i32(var, cpu_env, offset);
1671 break;
1672 case MO_32:
1673 tcg_gen_st_i32(var, cpu_env, offset);
1674 break;
1675 default:
1676 g_assert_not_reached();
1677 }
1678}
1679
ac55d007
RH
1680static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var)
1681{
1682 long offset = neon_element_offset(reg, ele, size);
1683
1684 switch (size) {
1685 case MO_8:
1686 tcg_gen_st8_i64(var, cpu_env, offset);
1687 break;
1688 case MO_16:
1689 tcg_gen_st16_i64(var, cpu_env, offset);
1690 break;
1691 case MO_32:
1692 tcg_gen_st32_i64(var, cpu_env, offset);
1693 break;
1694 case MO_64:
1695 tcg_gen_st_i64(var, cpu_env, offset);
1696 break;
1697 default:
1698 g_assert_not_reached();
1699 }
1700}
1701
a7812ae4 1702static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1703{
1704 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1705}
1706
a7812ae4 1707static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1708{
1709 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1710}
1711
1a66ac61
RH
1712static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1713{
1714 TCGv_ptr ret = tcg_temp_new_ptr();
1715 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1716 return ret;
1717}
1718
4373f3ce
PB
1719#define tcg_gen_ld_f32 tcg_gen_ld_i32
1720#define tcg_gen_ld_f64 tcg_gen_ld_i64
1721#define tcg_gen_st_f32 tcg_gen_st_i32
1722#define tcg_gen_st_f64 tcg_gen_st_i64
1723
b7bcbe95
FB
1724static inline void gen_mov_F0_vreg(int dp, int reg)
1725{
1726 if (dp)
4373f3ce 1727 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1728 else
4373f3ce 1729 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1730}
1731
1732static inline void gen_mov_F1_vreg(int dp, int reg)
1733{
1734 if (dp)
4373f3ce 1735 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1736 else
4373f3ce 1737 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1738}
1739
1740static inline void gen_mov_vreg_F0(int dp, int reg)
1741{
1742 if (dp)
4373f3ce 1743 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1744 else
4373f3ce 1745 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1746}
1747
d00584b7 1748#define ARM_CP_RW_BIT (1 << 20)
18c9b560 1749
a7812ae4 1750static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1751{
0ecb72a5 1752 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1753}
1754
a7812ae4 1755static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1756{
0ecb72a5 1757 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1758}
1759
39d5492a 1760static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1761{
39d5492a 1762 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1763 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1764 return var;
e677137d
PB
1765}
1766
39d5492a 1767static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1768{
0ecb72a5 1769 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1770 tcg_temp_free_i32(var);
e677137d
PB
1771}
1772
1773static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1774{
1775 iwmmxt_store_reg(cpu_M0, rn);
1776}
1777
1778static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1779{
1780 iwmmxt_load_reg(cpu_M0, rn);
1781}
1782
1783static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1784{
1785 iwmmxt_load_reg(cpu_V1, rn);
1786 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1787}
1788
1789static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1790{
1791 iwmmxt_load_reg(cpu_V1, rn);
1792 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1793}
1794
1795static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1796{
1797 iwmmxt_load_reg(cpu_V1, rn);
1798 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1799}
1800
1801#define IWMMXT_OP(name) \
1802static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1803{ \
1804 iwmmxt_load_reg(cpu_V1, rn); \
1805 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1806}
1807
477955bd
PM
1808#define IWMMXT_OP_ENV(name) \
1809static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1810{ \
1811 iwmmxt_load_reg(cpu_V1, rn); \
1812 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1813}
1814
1815#define IWMMXT_OP_ENV_SIZE(name) \
1816IWMMXT_OP_ENV(name##b) \
1817IWMMXT_OP_ENV(name##w) \
1818IWMMXT_OP_ENV(name##l)
e677137d 1819
477955bd 1820#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1821static inline void gen_op_iwmmxt_##name##_M0(void) \
1822{ \
477955bd 1823 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1824}
1825
1826IWMMXT_OP(maddsq)
1827IWMMXT_OP(madduq)
1828IWMMXT_OP(sadb)
1829IWMMXT_OP(sadw)
1830IWMMXT_OP(mulslw)
1831IWMMXT_OP(mulshw)
1832IWMMXT_OP(mululw)
1833IWMMXT_OP(muluhw)
1834IWMMXT_OP(macsw)
1835IWMMXT_OP(macuw)
1836
477955bd
PM
1837IWMMXT_OP_ENV_SIZE(unpackl)
1838IWMMXT_OP_ENV_SIZE(unpackh)
1839
1840IWMMXT_OP_ENV1(unpacklub)
1841IWMMXT_OP_ENV1(unpackluw)
1842IWMMXT_OP_ENV1(unpacklul)
1843IWMMXT_OP_ENV1(unpackhub)
1844IWMMXT_OP_ENV1(unpackhuw)
1845IWMMXT_OP_ENV1(unpackhul)
1846IWMMXT_OP_ENV1(unpacklsb)
1847IWMMXT_OP_ENV1(unpacklsw)
1848IWMMXT_OP_ENV1(unpacklsl)
1849IWMMXT_OP_ENV1(unpackhsb)
1850IWMMXT_OP_ENV1(unpackhsw)
1851IWMMXT_OP_ENV1(unpackhsl)
1852
1853IWMMXT_OP_ENV_SIZE(cmpeq)
1854IWMMXT_OP_ENV_SIZE(cmpgtu)
1855IWMMXT_OP_ENV_SIZE(cmpgts)
1856
1857IWMMXT_OP_ENV_SIZE(mins)
1858IWMMXT_OP_ENV_SIZE(minu)
1859IWMMXT_OP_ENV_SIZE(maxs)
1860IWMMXT_OP_ENV_SIZE(maxu)
1861
1862IWMMXT_OP_ENV_SIZE(subn)
1863IWMMXT_OP_ENV_SIZE(addn)
1864IWMMXT_OP_ENV_SIZE(subu)
1865IWMMXT_OP_ENV_SIZE(addu)
1866IWMMXT_OP_ENV_SIZE(subs)
1867IWMMXT_OP_ENV_SIZE(adds)
1868
1869IWMMXT_OP_ENV(avgb0)
1870IWMMXT_OP_ENV(avgb1)
1871IWMMXT_OP_ENV(avgw0)
1872IWMMXT_OP_ENV(avgw1)
e677137d 1873
477955bd
PM
1874IWMMXT_OP_ENV(packuw)
1875IWMMXT_OP_ENV(packul)
1876IWMMXT_OP_ENV(packuq)
1877IWMMXT_OP_ENV(packsw)
1878IWMMXT_OP_ENV(packsl)
1879IWMMXT_OP_ENV(packsq)
e677137d 1880
e677137d
PB
1881static void gen_op_iwmmxt_set_mup(void)
1882{
39d5492a 1883 TCGv_i32 tmp;
e677137d
PB
1884 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1885 tcg_gen_ori_i32(tmp, tmp, 2);
1886 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1887}
1888
1889static void gen_op_iwmmxt_set_cup(void)
1890{
39d5492a 1891 TCGv_i32 tmp;
e677137d
PB
1892 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1893 tcg_gen_ori_i32(tmp, tmp, 1);
1894 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1895}
1896
1897static void gen_op_iwmmxt_setpsr_nz(void)
1898{
39d5492a 1899 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1900 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1901 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1902}
1903
1904static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1905{
1906 iwmmxt_load_reg(cpu_V1, rn);
86831435 1907 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1908 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1909}
1910
39d5492a
PM
1911static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1912 TCGv_i32 dest)
18c9b560
AZ
1913{
1914 int rd;
1915 uint32_t offset;
39d5492a 1916 TCGv_i32 tmp;
18c9b560
AZ
1917
1918 rd = (insn >> 16) & 0xf;
da6b5335 1919 tmp = load_reg(s, rd);
18c9b560
AZ
1920
1921 offset = (insn & 0xff) << ((insn >> 7) & 2);
1922 if (insn & (1 << 24)) {
1923 /* Pre indexed */
1924 if (insn & (1 << 23))
da6b5335 1925 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1926 else
da6b5335
FN
1927 tcg_gen_addi_i32(tmp, tmp, -offset);
1928 tcg_gen_mov_i32(dest, tmp);
18c9b560 1929 if (insn & (1 << 21))
da6b5335
FN
1930 store_reg(s, rd, tmp);
1931 else
7d1b0095 1932 tcg_temp_free_i32(tmp);
18c9b560
AZ
1933 } else if (insn & (1 << 21)) {
1934 /* Post indexed */
da6b5335 1935 tcg_gen_mov_i32(dest, tmp);
18c9b560 1936 if (insn & (1 << 23))
da6b5335 1937 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1938 else
da6b5335
FN
1939 tcg_gen_addi_i32(tmp, tmp, -offset);
1940 store_reg(s, rd, tmp);
18c9b560
AZ
1941 } else if (!(insn & (1 << 23)))
1942 return 1;
1943 return 0;
1944}
1945
39d5492a 1946static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1947{
1948 int rd = (insn >> 0) & 0xf;
39d5492a 1949 TCGv_i32 tmp;
18c9b560 1950
da6b5335
FN
1951 if (insn & (1 << 8)) {
1952 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1953 return 1;
da6b5335
FN
1954 } else {
1955 tmp = iwmmxt_load_creg(rd);
1956 }
1957 } else {
7d1b0095 1958 tmp = tcg_temp_new_i32();
da6b5335 1959 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1960 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1961 }
1962 tcg_gen_andi_i32(tmp, tmp, mask);
1963 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1964 tcg_temp_free_i32(tmp);
18c9b560
AZ
1965 return 0;
1966}
1967
a1c7273b 1968/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1969 (ie. an undefined instruction). */
7dcc1f89 1970static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1971{
1972 int rd, wrd;
1973 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1974 TCGv_i32 addr;
1975 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1976
1977 if ((insn & 0x0e000e00) == 0x0c000000) {
1978 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1979 wrd = insn & 0xf;
1980 rdlo = (insn >> 12) & 0xf;
1981 rdhi = (insn >> 16) & 0xf;
d00584b7 1982 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1983 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1984 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1985 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1986 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
d00584b7 1987 } else { /* TMCRR */
da6b5335
FN
1988 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1989 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1990 gen_op_iwmmxt_set_mup();
1991 }
1992 return 0;
1993 }
1994
1995 wrd = (insn >> 12) & 0xf;
7d1b0095 1996 addr = tcg_temp_new_i32();
da6b5335 1997 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1998 tcg_temp_free_i32(addr);
18c9b560 1999 return 1;
da6b5335 2000 }
18c9b560 2001 if (insn & ARM_CP_RW_BIT) {
d00584b7 2002 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 2003 tmp = tcg_temp_new_i32();
12dcc321 2004 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 2005 iwmmxt_store_creg(wrd, tmp);
18c9b560 2006 } else {
e677137d
PB
2007 i = 1;
2008 if (insn & (1 << 8)) {
d00584b7 2009 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 2010 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d 2011 i = 0;
d00584b7 2012 } else { /* WLDRW wRd */
29531141 2013 tmp = tcg_temp_new_i32();
12dcc321 2014 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
2015 }
2016 } else {
29531141 2017 tmp = tcg_temp_new_i32();
d00584b7 2018 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 2019 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
d00584b7 2020 } else { /* WLDRB */
12dcc321 2021 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
2022 }
2023 }
2024 if (i) {
2025 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 2026 tcg_temp_free_i32(tmp);
e677137d 2027 }
18c9b560
AZ
2028 gen_op_iwmmxt_movq_wRn_M0(wrd);
2029 }
2030 } else {
d00584b7 2031 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 2032 tmp = iwmmxt_load_creg(wrd);
12dcc321 2033 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
2034 } else {
2035 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2036 tmp = tcg_temp_new_i32();
e677137d 2037 if (insn & (1 << 8)) {
d00584b7 2038 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 2039 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
d00584b7 2040 } else { /* WSTRW wRd */
ecc7b3aa 2041 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 2042 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
2043 }
2044 } else {
d00584b7 2045 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 2046 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 2047 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
d00584b7 2048 } else { /* WSTRB */
ecc7b3aa 2049 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 2050 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
2051 }
2052 }
18c9b560 2053 }
29531141 2054 tcg_temp_free_i32(tmp);
18c9b560 2055 }
7d1b0095 2056 tcg_temp_free_i32(addr);
18c9b560
AZ
2057 return 0;
2058 }
2059
2060 if ((insn & 0x0f000000) != 0x0e000000)
2061 return 1;
2062
2063 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
d00584b7 2064 case 0x000: /* WOR */
18c9b560
AZ
2065 wrd = (insn >> 12) & 0xf;
2066 rd0 = (insn >> 0) & 0xf;
2067 rd1 = (insn >> 16) & 0xf;
2068 gen_op_iwmmxt_movq_M0_wRn(rd0);
2069 gen_op_iwmmxt_orq_M0_wRn(rd1);
2070 gen_op_iwmmxt_setpsr_nz();
2071 gen_op_iwmmxt_movq_wRn_M0(wrd);
2072 gen_op_iwmmxt_set_mup();
2073 gen_op_iwmmxt_set_cup();
2074 break;
d00584b7 2075 case 0x011: /* TMCR */
18c9b560
AZ
2076 if (insn & 0xf)
2077 return 1;
2078 rd = (insn >> 12) & 0xf;
2079 wrd = (insn >> 16) & 0xf;
2080 switch (wrd) {
2081 case ARM_IWMMXT_wCID:
2082 case ARM_IWMMXT_wCASF:
2083 break;
2084 case ARM_IWMMXT_wCon:
2085 gen_op_iwmmxt_set_cup();
2086 /* Fall through. */
2087 case ARM_IWMMXT_wCSSF:
da6b5335
FN
2088 tmp = iwmmxt_load_creg(wrd);
2089 tmp2 = load_reg(s, rd);
f669df27 2090 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 2091 tcg_temp_free_i32(tmp2);
da6b5335 2092 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
2093 break;
2094 case ARM_IWMMXT_wCGR0:
2095 case ARM_IWMMXT_wCGR1:
2096 case ARM_IWMMXT_wCGR2:
2097 case ARM_IWMMXT_wCGR3:
2098 gen_op_iwmmxt_set_cup();
da6b5335
FN
2099 tmp = load_reg(s, rd);
2100 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
2101 break;
2102 default:
2103 return 1;
2104 }
2105 break;
d00584b7 2106 case 0x100: /* WXOR */
18c9b560
AZ
2107 wrd = (insn >> 12) & 0xf;
2108 rd0 = (insn >> 0) & 0xf;
2109 rd1 = (insn >> 16) & 0xf;
2110 gen_op_iwmmxt_movq_M0_wRn(rd0);
2111 gen_op_iwmmxt_xorq_M0_wRn(rd1);
2112 gen_op_iwmmxt_setpsr_nz();
2113 gen_op_iwmmxt_movq_wRn_M0(wrd);
2114 gen_op_iwmmxt_set_mup();
2115 gen_op_iwmmxt_set_cup();
2116 break;
d00584b7 2117 case 0x111: /* TMRC */
18c9b560
AZ
2118 if (insn & 0xf)
2119 return 1;
2120 rd = (insn >> 12) & 0xf;
2121 wrd = (insn >> 16) & 0xf;
da6b5335
FN
2122 tmp = iwmmxt_load_creg(wrd);
2123 store_reg(s, rd, tmp);
18c9b560 2124 break;
d00584b7 2125 case 0x300: /* WANDN */
18c9b560
AZ
2126 wrd = (insn >> 12) & 0xf;
2127 rd0 = (insn >> 0) & 0xf;
2128 rd1 = (insn >> 16) & 0xf;
2129 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 2130 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
2131 gen_op_iwmmxt_andq_M0_wRn(rd1);
2132 gen_op_iwmmxt_setpsr_nz();
2133 gen_op_iwmmxt_movq_wRn_M0(wrd);
2134 gen_op_iwmmxt_set_mup();
2135 gen_op_iwmmxt_set_cup();
2136 break;
d00584b7 2137 case 0x200: /* WAND */
18c9b560
AZ
2138 wrd = (insn >> 12) & 0xf;
2139 rd0 = (insn >> 0) & 0xf;
2140 rd1 = (insn >> 16) & 0xf;
2141 gen_op_iwmmxt_movq_M0_wRn(rd0);
2142 gen_op_iwmmxt_andq_M0_wRn(rd1);
2143 gen_op_iwmmxt_setpsr_nz();
2144 gen_op_iwmmxt_movq_wRn_M0(wrd);
2145 gen_op_iwmmxt_set_mup();
2146 gen_op_iwmmxt_set_cup();
2147 break;
d00584b7 2148 case 0x810: case 0xa10: /* WMADD */
18c9b560
AZ
2149 wrd = (insn >> 12) & 0xf;
2150 rd0 = (insn >> 0) & 0xf;
2151 rd1 = (insn >> 16) & 0xf;
2152 gen_op_iwmmxt_movq_M0_wRn(rd0);
2153 if (insn & (1 << 21))
2154 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
2155 else
2156 gen_op_iwmmxt_madduq_M0_wRn(rd1);
2157 gen_op_iwmmxt_movq_wRn_M0(wrd);
2158 gen_op_iwmmxt_set_mup();
2159 break;
d00584b7 2160 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
18c9b560
AZ
2161 wrd = (insn >> 12) & 0xf;
2162 rd0 = (insn >> 16) & 0xf;
2163 rd1 = (insn >> 0) & 0xf;
2164 gen_op_iwmmxt_movq_M0_wRn(rd0);
2165 switch ((insn >> 22) & 3) {
2166 case 0:
2167 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
2168 break;
2169 case 1:
2170 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
2171 break;
2172 case 2:
2173 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
2174 break;
2175 case 3:
2176 return 1;
2177 }
2178 gen_op_iwmmxt_movq_wRn_M0(wrd);
2179 gen_op_iwmmxt_set_mup();
2180 gen_op_iwmmxt_set_cup();
2181 break;
d00584b7 2182 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
18c9b560
AZ
2183 wrd = (insn >> 12) & 0xf;
2184 rd0 = (insn >> 16) & 0xf;
2185 rd1 = (insn >> 0) & 0xf;
2186 gen_op_iwmmxt_movq_M0_wRn(rd0);
2187 switch ((insn >> 22) & 3) {
2188 case 0:
2189 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
2190 break;
2191 case 1:
2192 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
2193 break;
2194 case 2:
2195 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
2196 break;
2197 case 3:
2198 return 1;
2199 }
2200 gen_op_iwmmxt_movq_wRn_M0(wrd);
2201 gen_op_iwmmxt_set_mup();
2202 gen_op_iwmmxt_set_cup();
2203 break;
d00584b7 2204 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
18c9b560
AZ
2205 wrd = (insn >> 12) & 0xf;
2206 rd0 = (insn >> 16) & 0xf;
2207 rd1 = (insn >> 0) & 0xf;
2208 gen_op_iwmmxt_movq_M0_wRn(rd0);
2209 if (insn & (1 << 22))
2210 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2211 else
2212 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2213 if (!(insn & (1 << 20)))
2214 gen_op_iwmmxt_addl_M0_wRn(wrd);
2215 gen_op_iwmmxt_movq_wRn_M0(wrd);
2216 gen_op_iwmmxt_set_mup();
2217 break;
d00584b7 2218 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
18c9b560
AZ
2219 wrd = (insn >> 12) & 0xf;
2220 rd0 = (insn >> 16) & 0xf;
2221 rd1 = (insn >> 0) & 0xf;
2222 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2223 if (insn & (1 << 21)) {
2224 if (insn & (1 << 20))
2225 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2226 else
2227 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2228 } else {
2229 if (insn & (1 << 20))
2230 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2231 else
2232 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2233 }
18c9b560
AZ
2234 gen_op_iwmmxt_movq_wRn_M0(wrd);
2235 gen_op_iwmmxt_set_mup();
2236 break;
d00584b7 2237 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
18c9b560
AZ
2238 wrd = (insn >> 12) & 0xf;
2239 rd0 = (insn >> 16) & 0xf;
2240 rd1 = (insn >> 0) & 0xf;
2241 gen_op_iwmmxt_movq_M0_wRn(rd0);
2242 if (insn & (1 << 21))
2243 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2244 else
2245 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2246 if (!(insn & (1 << 20))) {
e677137d
PB
2247 iwmmxt_load_reg(cpu_V1, wrd);
2248 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
2249 }
2250 gen_op_iwmmxt_movq_wRn_M0(wrd);
2251 gen_op_iwmmxt_set_mup();
2252 break;
d00584b7 2253 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
18c9b560
AZ
2254 wrd = (insn >> 12) & 0xf;
2255 rd0 = (insn >> 16) & 0xf;
2256 rd1 = (insn >> 0) & 0xf;
2257 gen_op_iwmmxt_movq_M0_wRn(rd0);
2258 switch ((insn >> 22) & 3) {
2259 case 0:
2260 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2261 break;
2262 case 1:
2263 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2264 break;
2265 case 2:
2266 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2267 break;
2268 case 3:
2269 return 1;
2270 }
2271 gen_op_iwmmxt_movq_wRn_M0(wrd);
2272 gen_op_iwmmxt_set_mup();
2273 gen_op_iwmmxt_set_cup();
2274 break;
d00584b7 2275 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
18c9b560
AZ
2276 wrd = (insn >> 12) & 0xf;
2277 rd0 = (insn >> 16) & 0xf;
2278 rd1 = (insn >> 0) & 0xf;
2279 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2280 if (insn & (1 << 22)) {
2281 if (insn & (1 << 20))
2282 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2283 else
2284 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2285 } else {
2286 if (insn & (1 << 20))
2287 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2288 else
2289 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2290 }
18c9b560
AZ
2291 gen_op_iwmmxt_movq_wRn_M0(wrd);
2292 gen_op_iwmmxt_set_mup();
2293 gen_op_iwmmxt_set_cup();
2294 break;
d00584b7 2295 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
18c9b560
AZ
2296 wrd = (insn >> 12) & 0xf;
2297 rd0 = (insn >> 16) & 0xf;
2298 rd1 = (insn >> 0) & 0xf;
2299 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2300 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2301 tcg_gen_andi_i32(tmp, tmp, 7);
2302 iwmmxt_load_reg(cpu_V1, rd1);
2303 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2304 tcg_temp_free_i32(tmp);
18c9b560
AZ
2305 gen_op_iwmmxt_movq_wRn_M0(wrd);
2306 gen_op_iwmmxt_set_mup();
2307 break;
d00584b7 2308 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2309 if (((insn >> 6) & 3) == 3)
2310 return 1;
18c9b560
AZ
2311 rd = (insn >> 12) & 0xf;
2312 wrd = (insn >> 16) & 0xf;
da6b5335 2313 tmp = load_reg(s, rd);
18c9b560
AZ
2314 gen_op_iwmmxt_movq_M0_wRn(wrd);
2315 switch ((insn >> 6) & 3) {
2316 case 0:
da6b5335
FN
2317 tmp2 = tcg_const_i32(0xff);
2318 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2319 break;
2320 case 1:
da6b5335
FN
2321 tmp2 = tcg_const_i32(0xffff);
2322 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2323 break;
2324 case 2:
da6b5335
FN
2325 tmp2 = tcg_const_i32(0xffffffff);
2326 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2327 break;
da6b5335 2328 default:
f764718d
RH
2329 tmp2 = NULL;
2330 tmp3 = NULL;
18c9b560 2331 }
da6b5335 2332 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2333 tcg_temp_free_i32(tmp3);
2334 tcg_temp_free_i32(tmp2);
7d1b0095 2335 tcg_temp_free_i32(tmp);
18c9b560
AZ
2336 gen_op_iwmmxt_movq_wRn_M0(wrd);
2337 gen_op_iwmmxt_set_mup();
2338 break;
d00584b7 2339 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
18c9b560
AZ
2340 rd = (insn >> 12) & 0xf;
2341 wrd = (insn >> 16) & 0xf;
da6b5335 2342 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2343 return 1;
2344 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2345 tmp = tcg_temp_new_i32();
18c9b560
AZ
2346 switch ((insn >> 22) & 3) {
2347 case 0:
da6b5335 2348 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2349 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2350 if (insn & 8) {
2351 tcg_gen_ext8s_i32(tmp, tmp);
2352 } else {
2353 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2354 }
2355 break;
2356 case 1:
da6b5335 2357 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2358 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2359 if (insn & 8) {
2360 tcg_gen_ext16s_i32(tmp, tmp);
2361 } else {
2362 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2363 }
2364 break;
2365 case 2:
da6b5335 2366 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2367 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2368 break;
18c9b560 2369 }
da6b5335 2370 store_reg(s, rd, tmp);
18c9b560 2371 break;
d00584b7 2372 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2373 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2374 return 1;
da6b5335 2375 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2376 switch ((insn >> 22) & 3) {
2377 case 0:
da6b5335 2378 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2379 break;
2380 case 1:
da6b5335 2381 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2382 break;
2383 case 2:
da6b5335 2384 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2385 break;
18c9b560 2386 }
da6b5335
FN
2387 tcg_gen_shli_i32(tmp, tmp, 28);
2388 gen_set_nzcv(tmp);
7d1b0095 2389 tcg_temp_free_i32(tmp);
18c9b560 2390 break;
d00584b7 2391 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2392 if (((insn >> 6) & 3) == 3)
2393 return 1;
18c9b560
AZ
2394 rd = (insn >> 12) & 0xf;
2395 wrd = (insn >> 16) & 0xf;
da6b5335 2396 tmp = load_reg(s, rd);
18c9b560
AZ
2397 switch ((insn >> 6) & 3) {
2398 case 0:
da6b5335 2399 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2400 break;
2401 case 1:
da6b5335 2402 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2403 break;
2404 case 2:
da6b5335 2405 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2406 break;
18c9b560 2407 }
7d1b0095 2408 tcg_temp_free_i32(tmp);
18c9b560
AZ
2409 gen_op_iwmmxt_movq_wRn_M0(wrd);
2410 gen_op_iwmmxt_set_mup();
2411 break;
d00584b7 2412 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2413 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2414 return 1;
da6b5335 2415 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2416 tmp2 = tcg_temp_new_i32();
da6b5335 2417 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2418 switch ((insn >> 22) & 3) {
2419 case 0:
2420 for (i = 0; i < 7; i ++) {
da6b5335
FN
2421 tcg_gen_shli_i32(tmp2, tmp2, 4);
2422 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2423 }
2424 break;
2425 case 1:
2426 for (i = 0; i < 3; i ++) {
da6b5335
FN
2427 tcg_gen_shli_i32(tmp2, tmp2, 8);
2428 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2429 }
2430 break;
2431 case 2:
da6b5335
FN
2432 tcg_gen_shli_i32(tmp2, tmp2, 16);
2433 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2434 break;
18c9b560 2435 }
da6b5335 2436 gen_set_nzcv(tmp);
7d1b0095
PM
2437 tcg_temp_free_i32(tmp2);
2438 tcg_temp_free_i32(tmp);
18c9b560 2439 break;
d00584b7 2440 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
18c9b560
AZ
2441 wrd = (insn >> 12) & 0xf;
2442 rd0 = (insn >> 16) & 0xf;
2443 gen_op_iwmmxt_movq_M0_wRn(rd0);
2444 switch ((insn >> 22) & 3) {
2445 case 0:
e677137d 2446 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2447 break;
2448 case 1:
e677137d 2449 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2450 break;
2451 case 2:
e677137d 2452 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2453 break;
2454 case 3:
2455 return 1;
2456 }
2457 gen_op_iwmmxt_movq_wRn_M0(wrd);
2458 gen_op_iwmmxt_set_mup();
2459 break;
d00584b7 2460 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2461 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2462 return 1;
da6b5335 2463 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2464 tmp2 = tcg_temp_new_i32();
da6b5335 2465 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2466 switch ((insn >> 22) & 3) {
2467 case 0:
2468 for (i = 0; i < 7; i ++) {
da6b5335
FN
2469 tcg_gen_shli_i32(tmp2, tmp2, 4);
2470 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2471 }
2472 break;
2473 case 1:
2474 for (i = 0; i < 3; i ++) {
da6b5335
FN
2475 tcg_gen_shli_i32(tmp2, tmp2, 8);
2476 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2477 }
2478 break;
2479 case 2:
da6b5335
FN
2480 tcg_gen_shli_i32(tmp2, tmp2, 16);
2481 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2482 break;
18c9b560 2483 }
da6b5335 2484 gen_set_nzcv(tmp);
7d1b0095
PM
2485 tcg_temp_free_i32(tmp2);
2486 tcg_temp_free_i32(tmp);
18c9b560 2487 break;
d00584b7 2488 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
18c9b560
AZ
2489 rd = (insn >> 12) & 0xf;
2490 rd0 = (insn >> 16) & 0xf;
da6b5335 2491 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2492 return 1;
2493 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2494 tmp = tcg_temp_new_i32();
18c9b560
AZ
2495 switch ((insn >> 22) & 3) {
2496 case 0:
da6b5335 2497 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2498 break;
2499 case 1:
da6b5335 2500 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2501 break;
2502 case 2:
da6b5335 2503 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2504 break;
18c9b560 2505 }
da6b5335 2506 store_reg(s, rd, tmp);
18c9b560 2507 break;
d00584b7 2508 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
18c9b560
AZ
2509 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2510 wrd = (insn >> 12) & 0xf;
2511 rd0 = (insn >> 16) & 0xf;
2512 rd1 = (insn >> 0) & 0xf;
2513 gen_op_iwmmxt_movq_M0_wRn(rd0);
2514 switch ((insn >> 22) & 3) {
2515 case 0:
2516 if (insn & (1 << 21))
2517 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2518 else
2519 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2520 break;
2521 case 1:
2522 if (insn & (1 << 21))
2523 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2524 else
2525 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2526 break;
2527 case 2:
2528 if (insn & (1 << 21))
2529 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2530 else
2531 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2532 break;
2533 case 3:
2534 return 1;
2535 }
2536 gen_op_iwmmxt_movq_wRn_M0(wrd);
2537 gen_op_iwmmxt_set_mup();
2538 gen_op_iwmmxt_set_cup();
2539 break;
d00584b7 2540 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
18c9b560
AZ
2541 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2542 wrd = (insn >> 12) & 0xf;
2543 rd0 = (insn >> 16) & 0xf;
2544 gen_op_iwmmxt_movq_M0_wRn(rd0);
2545 switch ((insn >> 22) & 3) {
2546 case 0:
2547 if (insn & (1 << 21))
2548 gen_op_iwmmxt_unpacklsb_M0();
2549 else
2550 gen_op_iwmmxt_unpacklub_M0();
2551 break;
2552 case 1:
2553 if (insn & (1 << 21))
2554 gen_op_iwmmxt_unpacklsw_M0();
2555 else
2556 gen_op_iwmmxt_unpackluw_M0();
2557 break;
2558 case 2:
2559 if (insn & (1 << 21))
2560 gen_op_iwmmxt_unpacklsl_M0();
2561 else
2562 gen_op_iwmmxt_unpacklul_M0();
2563 break;
2564 case 3:
2565 return 1;
2566 }
2567 gen_op_iwmmxt_movq_wRn_M0(wrd);
2568 gen_op_iwmmxt_set_mup();
2569 gen_op_iwmmxt_set_cup();
2570 break;
d00584b7 2571 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
18c9b560
AZ
2572 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2573 wrd = (insn >> 12) & 0xf;
2574 rd0 = (insn >> 16) & 0xf;
2575 gen_op_iwmmxt_movq_M0_wRn(rd0);
2576 switch ((insn >> 22) & 3) {
2577 case 0:
2578 if (insn & (1 << 21))
2579 gen_op_iwmmxt_unpackhsb_M0();
2580 else
2581 gen_op_iwmmxt_unpackhub_M0();
2582 break;
2583 case 1:
2584 if (insn & (1 << 21))
2585 gen_op_iwmmxt_unpackhsw_M0();
2586 else
2587 gen_op_iwmmxt_unpackhuw_M0();
2588 break;
2589 case 2:
2590 if (insn & (1 << 21))
2591 gen_op_iwmmxt_unpackhsl_M0();
2592 else
2593 gen_op_iwmmxt_unpackhul_M0();
2594 break;
2595 case 3:
2596 return 1;
2597 }
2598 gen_op_iwmmxt_movq_wRn_M0(wrd);
2599 gen_op_iwmmxt_set_mup();
2600 gen_op_iwmmxt_set_cup();
2601 break;
d00584b7 2602 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
18c9b560 2603 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2604 if (((insn >> 22) & 3) == 0)
2605 return 1;
18c9b560
AZ
2606 wrd = (insn >> 12) & 0xf;
2607 rd0 = (insn >> 16) & 0xf;
2608 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2609 tmp = tcg_temp_new_i32();
da6b5335 2610 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2611 tcg_temp_free_i32(tmp);
18c9b560 2612 return 1;
da6b5335 2613 }
18c9b560 2614 switch ((insn >> 22) & 3) {
18c9b560 2615 case 1:
477955bd 2616 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2617 break;
2618 case 2:
477955bd 2619 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2620 break;
2621 case 3:
477955bd 2622 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2623 break;
2624 }
7d1b0095 2625 tcg_temp_free_i32(tmp);
18c9b560
AZ
2626 gen_op_iwmmxt_movq_wRn_M0(wrd);
2627 gen_op_iwmmxt_set_mup();
2628 gen_op_iwmmxt_set_cup();
2629 break;
d00584b7 2630 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
18c9b560 2631 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2632 if (((insn >> 22) & 3) == 0)
2633 return 1;
18c9b560
AZ
2634 wrd = (insn >> 12) & 0xf;
2635 rd0 = (insn >> 16) & 0xf;
2636 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2637 tmp = tcg_temp_new_i32();
da6b5335 2638 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2639 tcg_temp_free_i32(tmp);
18c9b560 2640 return 1;
da6b5335 2641 }
18c9b560 2642 switch ((insn >> 22) & 3) {
18c9b560 2643 case 1:
477955bd 2644 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2645 break;
2646 case 2:
477955bd 2647 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2648 break;
2649 case 3:
477955bd 2650 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2651 break;
2652 }
7d1b0095 2653 tcg_temp_free_i32(tmp);
18c9b560
AZ
2654 gen_op_iwmmxt_movq_wRn_M0(wrd);
2655 gen_op_iwmmxt_set_mup();
2656 gen_op_iwmmxt_set_cup();
2657 break;
d00584b7 2658 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
18c9b560 2659 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2660 if (((insn >> 22) & 3) == 0)
2661 return 1;
18c9b560
AZ
2662 wrd = (insn >> 12) & 0xf;
2663 rd0 = (insn >> 16) & 0xf;
2664 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2665 tmp = tcg_temp_new_i32();
da6b5335 2666 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2667 tcg_temp_free_i32(tmp);
18c9b560 2668 return 1;
da6b5335 2669 }
18c9b560 2670 switch ((insn >> 22) & 3) {
18c9b560 2671 case 1:
477955bd 2672 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2673 break;
2674 case 2:
477955bd 2675 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2676 break;
2677 case 3:
477955bd 2678 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2679 break;
2680 }
7d1b0095 2681 tcg_temp_free_i32(tmp);
18c9b560
AZ
2682 gen_op_iwmmxt_movq_wRn_M0(wrd);
2683 gen_op_iwmmxt_set_mup();
2684 gen_op_iwmmxt_set_cup();
2685 break;
d00584b7 2686 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
18c9b560 2687 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2688 if (((insn >> 22) & 3) == 0)
2689 return 1;
18c9b560
AZ
2690 wrd = (insn >> 12) & 0xf;
2691 rd0 = (insn >> 16) & 0xf;
2692 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2693 tmp = tcg_temp_new_i32();
18c9b560 2694 switch ((insn >> 22) & 3) {
18c9b560 2695 case 1:
da6b5335 2696 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2697 tcg_temp_free_i32(tmp);
18c9b560 2698 return 1;
da6b5335 2699 }
477955bd 2700 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2701 break;
2702 case 2:
da6b5335 2703 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2704 tcg_temp_free_i32(tmp);
18c9b560 2705 return 1;
da6b5335 2706 }
477955bd 2707 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2708 break;
2709 case 3:
da6b5335 2710 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2711 tcg_temp_free_i32(tmp);
18c9b560 2712 return 1;
da6b5335 2713 }
477955bd 2714 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2715 break;
2716 }
7d1b0095 2717 tcg_temp_free_i32(tmp);
18c9b560
AZ
2718 gen_op_iwmmxt_movq_wRn_M0(wrd);
2719 gen_op_iwmmxt_set_mup();
2720 gen_op_iwmmxt_set_cup();
2721 break;
d00584b7 2722 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
18c9b560
AZ
2723 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2724 wrd = (insn >> 12) & 0xf;
2725 rd0 = (insn >> 16) & 0xf;
2726 rd1 = (insn >> 0) & 0xf;
2727 gen_op_iwmmxt_movq_M0_wRn(rd0);
2728 switch ((insn >> 22) & 3) {
2729 case 0:
2730 if (insn & (1 << 21))
2731 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2732 else
2733 gen_op_iwmmxt_minub_M0_wRn(rd1);
2734 break;
2735 case 1:
2736 if (insn & (1 << 21))
2737 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2738 else
2739 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2740 break;
2741 case 2:
2742 if (insn & (1 << 21))
2743 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2744 else
2745 gen_op_iwmmxt_minul_M0_wRn(rd1);
2746 break;
2747 case 3:
2748 return 1;
2749 }
2750 gen_op_iwmmxt_movq_wRn_M0(wrd);
2751 gen_op_iwmmxt_set_mup();
2752 break;
d00584b7 2753 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
18c9b560
AZ
2754 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2755 wrd = (insn >> 12) & 0xf;
2756 rd0 = (insn >> 16) & 0xf;
2757 rd1 = (insn >> 0) & 0xf;
2758 gen_op_iwmmxt_movq_M0_wRn(rd0);
2759 switch ((insn >> 22) & 3) {
2760 case 0:
2761 if (insn & (1 << 21))
2762 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2763 else
2764 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2765 break;
2766 case 1:
2767 if (insn & (1 << 21))
2768 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2769 else
2770 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2771 break;
2772 case 2:
2773 if (insn & (1 << 21))
2774 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2775 else
2776 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2777 break;
2778 case 3:
2779 return 1;
2780 }
2781 gen_op_iwmmxt_movq_wRn_M0(wrd);
2782 gen_op_iwmmxt_set_mup();
2783 break;
d00584b7 2784 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
18c9b560
AZ
2785 case 0x402: case 0x502: case 0x602: case 0x702:
2786 wrd = (insn >> 12) & 0xf;
2787 rd0 = (insn >> 16) & 0xf;
2788 rd1 = (insn >> 0) & 0xf;
2789 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2790 tmp = tcg_const_i32((insn >> 20) & 3);
2791 iwmmxt_load_reg(cpu_V1, rd1);
2792 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2793 tcg_temp_free_i32(tmp);
18c9b560
AZ
2794 gen_op_iwmmxt_movq_wRn_M0(wrd);
2795 gen_op_iwmmxt_set_mup();
2796 break;
d00584b7 2797 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
18c9b560
AZ
2798 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2799 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2800 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2801 wrd = (insn >> 12) & 0xf;
2802 rd0 = (insn >> 16) & 0xf;
2803 rd1 = (insn >> 0) & 0xf;
2804 gen_op_iwmmxt_movq_M0_wRn(rd0);
2805 switch ((insn >> 20) & 0xf) {
2806 case 0x0:
2807 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2808 break;
2809 case 0x1:
2810 gen_op_iwmmxt_subub_M0_wRn(rd1);
2811 break;
2812 case 0x3:
2813 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2814 break;
2815 case 0x4:
2816 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2817 break;
2818 case 0x5:
2819 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2820 break;
2821 case 0x7:
2822 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2823 break;
2824 case 0x8:
2825 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2826 break;
2827 case 0x9:
2828 gen_op_iwmmxt_subul_M0_wRn(rd1);
2829 break;
2830 case 0xb:
2831 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2832 break;
2833 default:
2834 return 1;
2835 }
2836 gen_op_iwmmxt_movq_wRn_M0(wrd);
2837 gen_op_iwmmxt_set_mup();
2838 gen_op_iwmmxt_set_cup();
2839 break;
d00584b7 2840 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
18c9b560
AZ
2841 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2842 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2843 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2844 wrd = (insn >> 12) & 0xf;
2845 rd0 = (insn >> 16) & 0xf;
2846 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2847 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2848 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2849 tcg_temp_free_i32(tmp);
18c9b560
AZ
2850 gen_op_iwmmxt_movq_wRn_M0(wrd);
2851 gen_op_iwmmxt_set_mup();
2852 gen_op_iwmmxt_set_cup();
2853 break;
d00584b7 2854 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
18c9b560
AZ
2855 case 0x418: case 0x518: case 0x618: case 0x718:
2856 case 0x818: case 0x918: case 0xa18: case 0xb18:
2857 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2858 wrd = (insn >> 12) & 0xf;
2859 rd0 = (insn >> 16) & 0xf;
2860 rd1 = (insn >> 0) & 0xf;
2861 gen_op_iwmmxt_movq_M0_wRn(rd0);
2862 switch ((insn >> 20) & 0xf) {
2863 case 0x0:
2864 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2865 break;
2866 case 0x1:
2867 gen_op_iwmmxt_addub_M0_wRn(rd1);
2868 break;
2869 case 0x3:
2870 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2871 break;
2872 case 0x4:
2873 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2874 break;
2875 case 0x5:
2876 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2877 break;
2878 case 0x7:
2879 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2880 break;
2881 case 0x8:
2882 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2883 break;
2884 case 0x9:
2885 gen_op_iwmmxt_addul_M0_wRn(rd1);
2886 break;
2887 case 0xb:
2888 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2889 break;
2890 default:
2891 return 1;
2892 }
2893 gen_op_iwmmxt_movq_wRn_M0(wrd);
2894 gen_op_iwmmxt_set_mup();
2895 gen_op_iwmmxt_set_cup();
2896 break;
d00584b7 2897 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
18c9b560
AZ
2898 case 0x408: case 0x508: case 0x608: case 0x708:
2899 case 0x808: case 0x908: case 0xa08: case 0xb08:
2900 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2901 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2902 return 1;
18c9b560
AZ
2903 wrd = (insn >> 12) & 0xf;
2904 rd0 = (insn >> 16) & 0xf;
2905 rd1 = (insn >> 0) & 0xf;
2906 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2907 switch ((insn >> 22) & 3) {
18c9b560
AZ
2908 case 1:
2909 if (insn & (1 << 21))
2910 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2911 else
2912 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2913 break;
2914 case 2:
2915 if (insn & (1 << 21))
2916 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2917 else
2918 gen_op_iwmmxt_packul_M0_wRn(rd1);
2919 break;
2920 case 3:
2921 if (insn & (1 << 21))
2922 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2923 else
2924 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2925 break;
2926 }
2927 gen_op_iwmmxt_movq_wRn_M0(wrd);
2928 gen_op_iwmmxt_set_mup();
2929 gen_op_iwmmxt_set_cup();
2930 break;
2931 case 0x201: case 0x203: case 0x205: case 0x207:
2932 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2933 case 0x211: case 0x213: case 0x215: case 0x217:
2934 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2935 wrd = (insn >> 5) & 0xf;
2936 rd0 = (insn >> 12) & 0xf;
2937 rd1 = (insn >> 0) & 0xf;
2938 if (rd0 == 0xf || rd1 == 0xf)
2939 return 1;
2940 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2941 tmp = load_reg(s, rd0);
2942 tmp2 = load_reg(s, rd1);
18c9b560 2943 switch ((insn >> 16) & 0xf) {
d00584b7 2944 case 0x0: /* TMIA */
da6b5335 2945 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2946 break;
d00584b7 2947 case 0x8: /* TMIAPH */
da6b5335 2948 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2949 break;
d00584b7 2950 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2951 if (insn & (1 << 16))
da6b5335 2952 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2953 if (insn & (1 << 17))
da6b5335
FN
2954 tcg_gen_shri_i32(tmp2, tmp2, 16);
2955 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2956 break;
2957 default:
7d1b0095
PM
2958 tcg_temp_free_i32(tmp2);
2959 tcg_temp_free_i32(tmp);
18c9b560
AZ
2960 return 1;
2961 }
7d1b0095
PM
2962 tcg_temp_free_i32(tmp2);
2963 tcg_temp_free_i32(tmp);
18c9b560
AZ
2964 gen_op_iwmmxt_movq_wRn_M0(wrd);
2965 gen_op_iwmmxt_set_mup();
2966 break;
2967 default:
2968 return 1;
2969 }
2970
2971 return 0;
2972}
2973
a1c7273b 2974/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2975 (ie. an undefined instruction). */
7dcc1f89 2976static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2977{
2978 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2979 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2980
2981 if ((insn & 0x0ff00f10) == 0x0e200010) {
2982 /* Multiply with Internal Accumulate Format */
2983 rd0 = (insn >> 12) & 0xf;
2984 rd1 = insn & 0xf;
2985 acc = (insn >> 5) & 7;
2986
2987 if (acc != 0)
2988 return 1;
2989
3a554c0f
FN
2990 tmp = load_reg(s, rd0);
2991 tmp2 = load_reg(s, rd1);
18c9b560 2992 switch ((insn >> 16) & 0xf) {
d00584b7 2993 case 0x0: /* MIA */
3a554c0f 2994 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2995 break;
d00584b7 2996 case 0x8: /* MIAPH */
3a554c0f 2997 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560 2998 break;
d00584b7
PM
2999 case 0xc: /* MIABB */
3000 case 0xd: /* MIABT */
3001 case 0xe: /* MIATB */
3002 case 0xf: /* MIATT */
18c9b560 3003 if (insn & (1 << 16))
3a554c0f 3004 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 3005 if (insn & (1 << 17))
3a554c0f
FN
3006 tcg_gen_shri_i32(tmp2, tmp2, 16);
3007 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
3008 break;
3009 default:
3010 return 1;
3011 }
7d1b0095
PM
3012 tcg_temp_free_i32(tmp2);
3013 tcg_temp_free_i32(tmp);
18c9b560
AZ
3014
3015 gen_op_iwmmxt_movq_wRn_M0(acc);
3016 return 0;
3017 }
3018
3019 if ((insn & 0x0fe00ff8) == 0x0c400000) {
3020 /* Internal Accumulator Access Format */
3021 rdhi = (insn >> 16) & 0xf;
3022 rdlo = (insn >> 12) & 0xf;
3023 acc = insn & 7;
3024
3025 if (acc != 0)
3026 return 1;
3027
d00584b7 3028 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 3029 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 3030 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 3031 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 3032 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 3033 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
d00584b7 3034 } else { /* MAR */
3a554c0f
FN
3035 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
3036 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
3037 }
3038 return 0;
3039 }
3040
3041 return 1;
3042}
3043
9ee6e8bb
PB
3044#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
3045#define VFP_SREG(insn, bigbit, smallbit) \
3046 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
3047#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 3048 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
3049 reg = (((insn) >> (bigbit)) & 0x0f) \
3050 | (((insn) >> ((smallbit) - 4)) & 0x10); \
3051 } else { \
3052 if (insn & (1 << (smallbit))) \
3053 return 1; \
3054 reg = ((insn) >> (bigbit)) & 0x0f; \
3055 }} while (0)
3056
3057#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
3058#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
3059#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
3060#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
3061#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
3062#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
3063
4373f3ce 3064/* Move between integer and VFP cores. */
39d5492a 3065static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 3066{
39d5492a 3067 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
3068 tcg_gen_mov_i32(tmp, cpu_F0s);
3069 return tmp;
3070}
3071
39d5492a 3072static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
3073{
3074 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 3075 tcg_temp_free_i32(tmp);
4373f3ce
PB
3076}
3077
39d5492a 3078static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 3079{
39d5492a 3080 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 3081 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
3082 tcg_gen_shli_i32(tmp, var, 16);
3083 tcg_gen_or_i32(var, var, tmp);
7d1b0095 3084 tcg_temp_free_i32(tmp);
ad69471c
PB
3085}
3086
39d5492a 3087static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 3088{
39d5492a 3089 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
3090 tcg_gen_andi_i32(var, var, 0xffff0000);
3091 tcg_gen_shri_i32(tmp, var, 16);
3092 tcg_gen_or_i32(var, var, tmp);
7d1b0095 3093 tcg_temp_free_i32(tmp);
ad69471c
PB
3094}
3095
04731fb5
WN
3096static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
3097 uint32_t dp)
3098{
3099 uint32_t cc = extract32(insn, 20, 2);
3100
3101 if (dp) {
3102 TCGv_i64 frn, frm, dest;
3103 TCGv_i64 tmp, zero, zf, nf, vf;
3104
3105 zero = tcg_const_i64(0);
3106
3107 frn = tcg_temp_new_i64();
3108 frm = tcg_temp_new_i64();
3109 dest = tcg_temp_new_i64();
3110
3111 zf = tcg_temp_new_i64();
3112 nf = tcg_temp_new_i64();
3113 vf = tcg_temp_new_i64();
3114
3115 tcg_gen_extu_i32_i64(zf, cpu_ZF);
3116 tcg_gen_ext_i32_i64(nf, cpu_NF);
3117 tcg_gen_ext_i32_i64(vf, cpu_VF);
3118
3119 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3120 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3121 switch (cc) {
3122 case 0: /* eq: Z */
3123 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
3124 frn, frm);
3125 break;
3126 case 1: /* vs: V */
3127 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
3128 frn, frm);
3129 break;
3130 case 2: /* ge: N == V -> N ^ V == 0 */
3131 tmp = tcg_temp_new_i64();
3132 tcg_gen_xor_i64(tmp, vf, nf);
3133 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3134 frn, frm);
3135 tcg_temp_free_i64(tmp);
3136 break;
3137 case 3: /* gt: !Z && N == V */
3138 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
3139 frn, frm);
3140 tmp = tcg_temp_new_i64();
3141 tcg_gen_xor_i64(tmp, vf, nf);
3142 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3143 dest, frm);
3144 tcg_temp_free_i64(tmp);
3145 break;
3146 }
3147 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3148 tcg_temp_free_i64(frn);
3149 tcg_temp_free_i64(frm);
3150 tcg_temp_free_i64(dest);
3151
3152 tcg_temp_free_i64(zf);
3153 tcg_temp_free_i64(nf);
3154 tcg_temp_free_i64(vf);
3155
3156 tcg_temp_free_i64(zero);
3157 } else {
3158 TCGv_i32 frn, frm, dest;
3159 TCGv_i32 tmp, zero;
3160
3161 zero = tcg_const_i32(0);
3162
3163 frn = tcg_temp_new_i32();
3164 frm = tcg_temp_new_i32();
3165 dest = tcg_temp_new_i32();
3166 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3167 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3168 switch (cc) {
3169 case 0: /* eq: Z */
3170 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
3171 frn, frm);
3172 break;
3173 case 1: /* vs: V */
3174 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
3175 frn, frm);
3176 break;
3177 case 2: /* ge: N == V -> N ^ V == 0 */
3178 tmp = tcg_temp_new_i32();
3179 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3180 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3181 frn, frm);
3182 tcg_temp_free_i32(tmp);
3183 break;
3184 case 3: /* gt: !Z && N == V */
3185 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
3186 frn, frm);
3187 tmp = tcg_temp_new_i32();
3188 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3189 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3190 dest, frm);
3191 tcg_temp_free_i32(tmp);
3192 break;
3193 }
3194 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3195 tcg_temp_free_i32(frn);
3196 tcg_temp_free_i32(frm);
3197 tcg_temp_free_i32(dest);
3198
3199 tcg_temp_free_i32(zero);
3200 }
3201
3202 return 0;
3203}
3204
40cfacdd
WN
3205static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
3206 uint32_t rm, uint32_t dp)
3207{
3208 uint32_t vmin = extract32(insn, 6, 1);
3209 TCGv_ptr fpst = get_fpstatus_ptr(0);
3210
3211 if (dp) {
3212 TCGv_i64 frn, frm, dest;
3213
3214 frn = tcg_temp_new_i64();
3215 frm = tcg_temp_new_i64();
3216 dest = tcg_temp_new_i64();
3217
3218 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3219 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3220 if (vmin) {
f71a2ae5 3221 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 3222 } else {
f71a2ae5 3223 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
3224 }
3225 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3226 tcg_temp_free_i64(frn);
3227 tcg_temp_free_i64(frm);
3228 tcg_temp_free_i64(dest);
3229 } else {
3230 TCGv_i32 frn, frm, dest;
3231
3232 frn = tcg_temp_new_i32();
3233 frm = tcg_temp_new_i32();
3234 dest = tcg_temp_new_i32();
3235
3236 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3237 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3238 if (vmin) {
f71a2ae5 3239 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 3240 } else {
f71a2ae5 3241 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
3242 }
3243 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3244 tcg_temp_free_i32(frn);
3245 tcg_temp_free_i32(frm);
3246 tcg_temp_free_i32(dest);
3247 }
3248
3249 tcg_temp_free_ptr(fpst);
3250 return 0;
3251}
3252
7655f39b
WN
3253static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3254 int rounding)
3255{
3256 TCGv_ptr fpst = get_fpstatus_ptr(0);
3257 TCGv_i32 tcg_rmode;
3258
3259 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
9b049916 3260 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
7655f39b
WN
3261
3262 if (dp) {
3263 TCGv_i64 tcg_op;
3264 TCGv_i64 tcg_res;
3265 tcg_op = tcg_temp_new_i64();
3266 tcg_res = tcg_temp_new_i64();
3267 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3268 gen_helper_rintd(tcg_res, tcg_op, fpst);
3269 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3270 tcg_temp_free_i64(tcg_op);
3271 tcg_temp_free_i64(tcg_res);
3272 } else {
3273 TCGv_i32 tcg_op;
3274 TCGv_i32 tcg_res;
3275 tcg_op = tcg_temp_new_i32();
3276 tcg_res = tcg_temp_new_i32();
3277 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3278 gen_helper_rints(tcg_res, tcg_op, fpst);
3279 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3280 tcg_temp_free_i32(tcg_op);
3281 tcg_temp_free_i32(tcg_res);
3282 }
3283
9b049916 3284 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
7655f39b
WN
3285 tcg_temp_free_i32(tcg_rmode);
3286
3287 tcg_temp_free_ptr(fpst);
3288 return 0;
3289}
3290
c9975a83
WN
3291static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3292 int rounding)
3293{
3294 bool is_signed = extract32(insn, 7, 1);
3295 TCGv_ptr fpst = get_fpstatus_ptr(0);
3296 TCGv_i32 tcg_rmode, tcg_shift;
3297
3298 tcg_shift = tcg_const_i32(0);
3299
3300 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
9b049916 3301 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
c9975a83
WN
3302
3303 if (dp) {
3304 TCGv_i64 tcg_double, tcg_res;
3305 TCGv_i32 tcg_tmp;
3306 /* Rd is encoded as a single precision register even when the source
3307 * is double precision.
3308 */
3309 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3310 tcg_double = tcg_temp_new_i64();
3311 tcg_res = tcg_temp_new_i64();
3312 tcg_tmp = tcg_temp_new_i32();
3313 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3314 if (is_signed) {
3315 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3316 } else {
3317 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3318 }
ecc7b3aa 3319 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
c9975a83
WN
3320 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3321 tcg_temp_free_i32(tcg_tmp);
3322 tcg_temp_free_i64(tcg_res);
3323 tcg_temp_free_i64(tcg_double);
3324 } else {
3325 TCGv_i32 tcg_single, tcg_res;
3326 tcg_single = tcg_temp_new_i32();
3327 tcg_res = tcg_temp_new_i32();
3328 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3329 if (is_signed) {
3330 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3331 } else {
3332 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3333 }
3334 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3335 tcg_temp_free_i32(tcg_res);
3336 tcg_temp_free_i32(tcg_single);
3337 }
3338
9b049916 3339 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
c9975a83
WN
3340 tcg_temp_free_i32(tcg_rmode);
3341
3342 tcg_temp_free_i32(tcg_shift);
3343
3344 tcg_temp_free_ptr(fpst);
3345
3346 return 0;
3347}
7655f39b
WN
3348
3349/* Table for converting the most common AArch32 encoding of
3350 * rounding mode to arm_fprounding order (which matches the
3351 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3352 */
3353static const uint8_t fp_decode_rm[] = {
3354 FPROUNDING_TIEAWAY,
3355 FPROUNDING_TIEEVEN,
3356 FPROUNDING_POSINF,
3357 FPROUNDING_NEGINF,
3358};
3359
7dcc1f89 3360static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
3361{
3362 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3363
d614a513 3364 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
04731fb5
WN
3365 return 1;
3366 }
3367
3368 if (dp) {
3369 VFP_DREG_D(rd, insn);
3370 VFP_DREG_N(rn, insn);
3371 VFP_DREG_M(rm, insn);
3372 } else {
3373 rd = VFP_SREG_D(insn);
3374 rn = VFP_SREG_N(insn);
3375 rm = VFP_SREG_M(insn);
3376 }
3377
3378 if ((insn & 0x0f800e50) == 0x0e000a00) {
3379 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
3380 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3381 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
3382 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3383 /* VRINTA, VRINTN, VRINTP, VRINTM */
3384 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3385 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
3386 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3387 /* VCVTA, VCVTN, VCVTP, VCVTM */
3388 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3389 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3390 }
3391 return 1;
3392}
3393
a1c7273b 3394/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3395 (ie. an undefined instruction). */
7dcc1f89 3396static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3397{
3398 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3399 int dp, veclen;
39d5492a
PM
3400 TCGv_i32 addr;
3401 TCGv_i32 tmp;
3402 TCGv_i32 tmp2;
b7bcbe95 3403
d614a513 3404 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3405 return 1;
d614a513 3406 }
40f137e1 3407
2c7ffc41
PM
3408 /* FIXME: this access check should not take precedence over UNDEF
3409 * for invalid encodings; we will generate incorrect syndrome information
3410 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3411 */
9dbbc748 3412 if (s->fp_excp_el) {
2c7ffc41 3413 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 3414 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3415 return 0;
3416 }
3417
5df8bac1 3418 if (!s->vfp_enabled) {
9ee6e8bb 3419 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3420 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3421 return 1;
3422 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3423 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3424 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3425 return 1;
a50c0f51 3426 }
40f137e1 3427 }
6a57f3eb
WN
3428
3429 if (extract32(insn, 28, 4) == 0xf) {
3430 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3431 * only used in v8 and above.
3432 */
7dcc1f89 3433 return disas_vfp_v8_insn(s, insn);
6a57f3eb
WN
3434 }
3435
b7bcbe95
FB
3436 dp = ((insn & 0xf00) == 0xb00);
3437 switch ((insn >> 24) & 0xf) {
3438 case 0xe:
3439 if (insn & (1 << 4)) {
3440 /* single register transfer */
b7bcbe95
FB
3441 rd = (insn >> 12) & 0xf;
3442 if (dp) {
9ee6e8bb
PB
3443 int size;
3444 int pass;
3445
3446 VFP_DREG_N(rn, insn);
3447 if (insn & 0xf)
b7bcbe95 3448 return 1;
9ee6e8bb 3449 if (insn & 0x00c00060
d614a513 3450 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3451 return 1;
d614a513 3452 }
9ee6e8bb
PB
3453
3454 pass = (insn >> 21) & 1;
3455 if (insn & (1 << 22)) {
3456 size = 0;
3457 offset = ((insn >> 5) & 3) * 8;
3458 } else if (insn & (1 << 5)) {
3459 size = 1;
3460 offset = (insn & (1 << 6)) ? 16 : 0;
3461 } else {
3462 size = 2;
3463 offset = 0;
3464 }
18c9b560 3465 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3466 /* vfp->arm */
ad69471c 3467 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3468 switch (size) {
3469 case 0:
9ee6e8bb 3470 if (offset)
ad69471c 3471 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3472 if (insn & (1 << 23))
ad69471c 3473 gen_uxtb(tmp);
9ee6e8bb 3474 else
ad69471c 3475 gen_sxtb(tmp);
9ee6e8bb
PB
3476 break;
3477 case 1:
9ee6e8bb
PB
3478 if (insn & (1 << 23)) {
3479 if (offset) {
ad69471c 3480 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3481 } else {
ad69471c 3482 gen_uxth(tmp);
9ee6e8bb
PB
3483 }
3484 } else {
3485 if (offset) {
ad69471c 3486 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3487 } else {
ad69471c 3488 gen_sxth(tmp);
9ee6e8bb
PB
3489 }
3490 }
3491 break;
3492 case 2:
9ee6e8bb
PB
3493 break;
3494 }
ad69471c 3495 store_reg(s, rd, tmp);
b7bcbe95
FB
3496 } else {
3497 /* arm->vfp */
ad69471c 3498 tmp = load_reg(s, rd);
9ee6e8bb
PB
3499 if (insn & (1 << 23)) {
3500 /* VDUP */
32f91fb7
RH
3501 int vec_size = pass ? 16 : 8;
3502 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rn, 0),
3503 vec_size, vec_size, tmp);
3504 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
3505 } else {
3506 /* VMOV */
3507 switch (size) {
3508 case 0:
ad69471c 3509 tmp2 = neon_load_reg(rn, pass);
d593c48e 3510 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3511 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3512 break;
3513 case 1:
ad69471c 3514 tmp2 = neon_load_reg(rn, pass);
d593c48e 3515 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3516 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3517 break;
3518 case 2:
9ee6e8bb
PB
3519 break;
3520 }
ad69471c 3521 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3522 }
b7bcbe95 3523 }
9ee6e8bb
PB
3524 } else { /* !dp */
3525 if ((insn & 0x6f) != 0x00)
3526 return 1;
3527 rn = VFP_SREG_N(insn);
18c9b560 3528 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3529 /* vfp->arm */
3530 if (insn & (1 << 21)) {
3531 /* system register */
40f137e1 3532 rn >>= 1;
9ee6e8bb 3533
b7bcbe95 3534 switch (rn) {
40f137e1 3535 case ARM_VFP_FPSID:
4373f3ce 3536 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3537 VFP3 restricts all id registers to privileged
3538 accesses. */
3539 if (IS_USER(s)
d614a513 3540 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3541 return 1;
d614a513 3542 }
4373f3ce 3543 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3544 break;
40f137e1 3545 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3546 if (IS_USER(s))
3547 return 1;
4373f3ce 3548 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3549 break;
40f137e1
PB
3550 case ARM_VFP_FPINST:
3551 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3552 /* Not present in VFP3. */
3553 if (IS_USER(s)
d614a513 3554 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3555 return 1;
d614a513 3556 }
4373f3ce 3557 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3558 break;
40f137e1 3559 case ARM_VFP_FPSCR:
601d70b9 3560 if (rd == 15) {
4373f3ce
PB
3561 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3562 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3563 } else {
7d1b0095 3564 tmp = tcg_temp_new_i32();
4373f3ce
PB
3565 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3566 }
b7bcbe95 3567 break;
a50c0f51 3568 case ARM_VFP_MVFR2:
d614a513 3569 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3570 return 1;
3571 }
3572 /* fall through */
9ee6e8bb
PB
3573 case ARM_VFP_MVFR0:
3574 case ARM_VFP_MVFR1:
3575 if (IS_USER(s)
d614a513 3576 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3577 return 1;
d614a513 3578 }
4373f3ce 3579 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3580 break;
b7bcbe95
FB
3581 default:
3582 return 1;
3583 }
3584 } else {
3585 gen_mov_F0_vreg(0, rn);
4373f3ce 3586 tmp = gen_vfp_mrs();
b7bcbe95
FB
3587 }
3588 if (rd == 15) {
b5ff1b31 3589 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3590 gen_set_nzcv(tmp);
7d1b0095 3591 tcg_temp_free_i32(tmp);
4373f3ce
PB
3592 } else {
3593 store_reg(s, rd, tmp);
3594 }
b7bcbe95
FB
3595 } else {
3596 /* arm->vfp */
b7bcbe95 3597 if (insn & (1 << 21)) {
40f137e1 3598 rn >>= 1;
b7bcbe95
FB
3599 /* system register */
3600 switch (rn) {
40f137e1 3601 case ARM_VFP_FPSID:
9ee6e8bb
PB
3602 case ARM_VFP_MVFR0:
3603 case ARM_VFP_MVFR1:
b7bcbe95
FB
3604 /* Writes are ignored. */
3605 break;
40f137e1 3606 case ARM_VFP_FPSCR:
e4c1cfa5 3607 tmp = load_reg(s, rd);
4373f3ce 3608 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3609 tcg_temp_free_i32(tmp);
b5ff1b31 3610 gen_lookup_tb(s);
b7bcbe95 3611 break;
40f137e1 3612 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3613 if (IS_USER(s))
3614 return 1;
71b3c3de
JR
3615 /* TODO: VFP subarchitecture support.
3616 * For now, keep the EN bit only */
e4c1cfa5 3617 tmp = load_reg(s, rd);
71b3c3de 3618 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3619 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3620 gen_lookup_tb(s);
3621 break;
3622 case ARM_VFP_FPINST:
3623 case ARM_VFP_FPINST2:
23adb861
PM
3624 if (IS_USER(s)) {
3625 return 1;
3626 }
e4c1cfa5 3627 tmp = load_reg(s, rd);
4373f3ce 3628 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3629 break;
b7bcbe95
FB
3630 default:
3631 return 1;
3632 }
3633 } else {
e4c1cfa5 3634 tmp = load_reg(s, rd);
4373f3ce 3635 gen_vfp_msr(tmp);
b7bcbe95
FB
3636 gen_mov_vreg_F0(0, rn);
3637 }
3638 }
3639 }
3640 } else {
3641 /* data processing */
3642 /* The opcode is in bits 23, 21, 20 and 6. */
3643 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3644 if (dp) {
3645 if (op == 15) {
3646 /* rn is opcode */
3647 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3648 } else {
3649 /* rn is register number */
9ee6e8bb 3650 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3651 }
3652
239c20c7
WN
3653 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3654 ((rn & 0x1e) == 0x6))) {
3655 /* Integer or single/half precision destination. */
9ee6e8bb 3656 rd = VFP_SREG_D(insn);
b7bcbe95 3657 } else {
9ee6e8bb 3658 VFP_DREG_D(rd, insn);
b7bcbe95 3659 }
04595bf6 3660 if (op == 15 &&
239c20c7
WN
3661 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3662 ((rn & 0x1e) == 0x4))) {
3663 /* VCVT from int or half precision is always from S reg
3664 * regardless of dp bit. VCVT with immediate frac_bits
3665 * has same format as SREG_M.
04595bf6
PM
3666 */
3667 rm = VFP_SREG_M(insn);
b7bcbe95 3668 } else {
9ee6e8bb 3669 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3670 }
3671 } else {
9ee6e8bb 3672 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3673 if (op == 15 && rn == 15) {
3674 /* Double precision destination. */
9ee6e8bb
PB
3675 VFP_DREG_D(rd, insn);
3676 } else {
3677 rd = VFP_SREG_D(insn);
3678 }
04595bf6
PM
3679 /* NB that we implicitly rely on the encoding for the frac_bits
3680 * in VCVT of fixed to float being the same as that of an SREG_M
3681 */
9ee6e8bb 3682 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3683 }
3684
69d1fc22 3685 veclen = s->vec_len;
b7bcbe95
FB
3686 if (op == 15 && rn > 3)
3687 veclen = 0;
3688
3689 /* Shut up compiler warnings. */
3690 delta_m = 0;
3691 delta_d = 0;
3692 bank_mask = 0;
3b46e624 3693
b7bcbe95
FB
3694 if (veclen > 0) {
3695 if (dp)
3696 bank_mask = 0xc;
3697 else
3698 bank_mask = 0x18;
3699
3700 /* Figure out what type of vector operation this is. */
3701 if ((rd & bank_mask) == 0) {
3702 /* scalar */
3703 veclen = 0;
3704 } else {
3705 if (dp)
69d1fc22 3706 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3707 else
69d1fc22 3708 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3709
3710 if ((rm & bank_mask) == 0) {
3711 /* mixed scalar/vector */
3712 delta_m = 0;
3713 } else {
3714 /* vector */
3715 delta_m = delta_d;
3716 }
3717 }
3718 }
3719
3720 /* Load the initial operands. */
3721 if (op == 15) {
3722 switch (rn) {
3723 case 16:
3724 case 17:
3725 /* Integer source */
3726 gen_mov_F0_vreg(0, rm);
3727 break;
3728 case 8:
3729 case 9:
3730 /* Compare */
3731 gen_mov_F0_vreg(dp, rd);
3732 gen_mov_F1_vreg(dp, rm);
3733 break;
3734 case 10:
3735 case 11:
3736 /* Compare with zero */
3737 gen_mov_F0_vreg(dp, rd);
3738 gen_vfp_F1_ld0(dp);
3739 break;
9ee6e8bb
PB
3740 case 20:
3741 case 21:
3742 case 22:
3743 case 23:
644ad806
PB
3744 case 28:
3745 case 29:
3746 case 30:
3747 case 31:
9ee6e8bb
PB
3748 /* Source and destination the same. */
3749 gen_mov_F0_vreg(dp, rd);
3750 break;
6e0c0ed1
PM
3751 case 4:
3752 case 5:
3753 case 6:
3754 case 7:
239c20c7
WN
3755 /* VCVTB, VCVTT: only present with the halfprec extension
3756 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3757 * (we choose to UNDEF)
6e0c0ed1 3758 */
d614a513
PM
3759 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3760 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3761 return 1;
3762 }
239c20c7
WN
3763 if (!extract32(rn, 1, 1)) {
3764 /* Half precision source. */
3765 gen_mov_F0_vreg(0, rm);
3766 break;
3767 }
6e0c0ed1 3768 /* Otherwise fall through */
b7bcbe95
FB
3769 default:
3770 /* One source operand. */
3771 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3772 break;
b7bcbe95
FB
3773 }
3774 } else {
3775 /* Two source operands. */
3776 gen_mov_F0_vreg(dp, rn);
3777 gen_mov_F1_vreg(dp, rm);
3778 }
3779
3780 for (;;) {
3781 /* Perform the calculation. */
3782 switch (op) {
605a6aed
PM
3783 case 0: /* VMLA: fd + (fn * fm) */
3784 /* Note that order of inputs to the add matters for NaNs */
3785 gen_vfp_F1_mul(dp);
3786 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3787 gen_vfp_add(dp);
3788 break;
605a6aed 3789 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3790 gen_vfp_mul(dp);
605a6aed
PM
3791 gen_vfp_F1_neg(dp);
3792 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3793 gen_vfp_add(dp);
3794 break;
605a6aed
PM
3795 case 2: /* VNMLS: -fd + (fn * fm) */
3796 /* Note that it isn't valid to replace (-A + B) with (B - A)
3797 * or similar plausible looking simplifications
3798 * because this will give wrong results for NaNs.
3799 */
3800 gen_vfp_F1_mul(dp);
3801 gen_mov_F0_vreg(dp, rd);
3802 gen_vfp_neg(dp);
3803 gen_vfp_add(dp);
b7bcbe95 3804 break;
605a6aed 3805 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3806 gen_vfp_mul(dp);
605a6aed
PM
3807 gen_vfp_F1_neg(dp);
3808 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3809 gen_vfp_neg(dp);
605a6aed 3810 gen_vfp_add(dp);
b7bcbe95
FB
3811 break;
3812 case 4: /* mul: fn * fm */
3813 gen_vfp_mul(dp);
3814 break;
3815 case 5: /* nmul: -(fn * fm) */
3816 gen_vfp_mul(dp);
3817 gen_vfp_neg(dp);
3818 break;
3819 case 6: /* add: fn + fm */
3820 gen_vfp_add(dp);
3821 break;
3822 case 7: /* sub: fn - fm */
3823 gen_vfp_sub(dp);
3824 break;
3825 case 8: /* div: fn / fm */
3826 gen_vfp_div(dp);
3827 break;
da97f52c
PM
3828 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3829 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3830 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3831 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3832 /* These are fused multiply-add, and must be done as one
3833 * floating point operation with no rounding between the
3834 * multiplication and addition steps.
3835 * NB that doing the negations here as separate steps is
3836 * correct : an input NaN should come out with its sign bit
3837 * flipped if it is a negated-input.
3838 */
d614a513 3839 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3840 return 1;
3841 }
3842 if (dp) {
3843 TCGv_ptr fpst;
3844 TCGv_i64 frd;
3845 if (op & 1) {
3846 /* VFNMS, VFMS */
3847 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3848 }
3849 frd = tcg_temp_new_i64();
3850 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3851 if (op & 2) {
3852 /* VFNMA, VFNMS */
3853 gen_helper_vfp_negd(frd, frd);
3854 }
3855 fpst = get_fpstatus_ptr(0);
3856 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3857 cpu_F1d, frd, fpst);
3858 tcg_temp_free_ptr(fpst);
3859 tcg_temp_free_i64(frd);
3860 } else {
3861 TCGv_ptr fpst;
3862 TCGv_i32 frd;
3863 if (op & 1) {
3864 /* VFNMS, VFMS */
3865 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3866 }
3867 frd = tcg_temp_new_i32();
3868 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3869 if (op & 2) {
3870 gen_helper_vfp_negs(frd, frd);
3871 }
3872 fpst = get_fpstatus_ptr(0);
3873 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3874 cpu_F1s, frd, fpst);
3875 tcg_temp_free_ptr(fpst);
3876 tcg_temp_free_i32(frd);
3877 }
3878 break;
9ee6e8bb 3879 case 14: /* fconst */
d614a513
PM
3880 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3881 return 1;
3882 }
9ee6e8bb
PB
3883
3884 n = (insn << 12) & 0x80000000;
3885 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3886 if (dp) {
3887 if (i & 0x40)
3888 i |= 0x3f80;
3889 else
3890 i |= 0x4000;
3891 n |= i << 16;
4373f3ce 3892 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3893 } else {
3894 if (i & 0x40)
3895 i |= 0x780;
3896 else
3897 i |= 0x800;
3898 n |= i << 19;
5b340b51 3899 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3900 }
9ee6e8bb 3901 break;
b7bcbe95
FB
3902 case 15: /* extension space */
3903 switch (rn) {
3904 case 0: /* cpy */
3905 /* no-op */
3906 break;
3907 case 1: /* abs */
3908 gen_vfp_abs(dp);
3909 break;
3910 case 2: /* neg */
3911 gen_vfp_neg(dp);
3912 break;
3913 case 3: /* sqrt */
3914 gen_vfp_sqrt(dp);
3915 break;
239c20c7 3916 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
486624fc
AB
3917 {
3918 TCGv_ptr fpst = get_fpstatus_ptr(false);
3919 TCGv_i32 ahp_mode = get_ahp_flag();
60011498
PB
3920 tmp = gen_vfp_mrs();
3921 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3922 if (dp) {
3923 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
486624fc 3924 fpst, ahp_mode);
239c20c7
WN
3925 } else {
3926 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
486624fc 3927 fpst, ahp_mode);
239c20c7 3928 }
486624fc
AB
3929 tcg_temp_free_i32(ahp_mode);
3930 tcg_temp_free_ptr(fpst);
7d1b0095 3931 tcg_temp_free_i32(tmp);
60011498 3932 break;
486624fc 3933 }
239c20c7 3934 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
486624fc
AB
3935 {
3936 TCGv_ptr fpst = get_fpstatus_ptr(false);
3937 TCGv_i32 ahp = get_ahp_flag();
60011498
PB
3938 tmp = gen_vfp_mrs();
3939 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3940 if (dp) {
3941 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
486624fc 3942 fpst, ahp);
239c20c7
WN
3943 } else {
3944 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
486624fc 3945 fpst, ahp);
239c20c7 3946 }
7d1b0095 3947 tcg_temp_free_i32(tmp);
486624fc
AB
3948 tcg_temp_free_i32(ahp);
3949 tcg_temp_free_ptr(fpst);
60011498 3950 break;
486624fc 3951 }
239c20c7 3952 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
486624fc
AB
3953 {
3954 TCGv_ptr fpst = get_fpstatus_ptr(false);
3955 TCGv_i32 ahp = get_ahp_flag();
7d1b0095 3956 tmp = tcg_temp_new_i32();
486624fc 3957
239c20c7
WN
3958 if (dp) {
3959 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
486624fc 3960 fpst, ahp);
239c20c7
WN
3961 } else {
3962 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
486624fc 3963 fpst, ahp);
239c20c7 3964 }
486624fc
AB
3965 tcg_temp_free_i32(ahp);
3966 tcg_temp_free_ptr(fpst);
60011498
PB
3967 gen_mov_F0_vreg(0, rd);
3968 tmp2 = gen_vfp_mrs();
3969 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3970 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3971 tcg_temp_free_i32(tmp2);
60011498
PB
3972 gen_vfp_msr(tmp);
3973 break;
486624fc 3974 }
239c20c7 3975 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
486624fc
AB
3976 {
3977 TCGv_ptr fpst = get_fpstatus_ptr(false);
3978 TCGv_i32 ahp = get_ahp_flag();
7d1b0095 3979 tmp = tcg_temp_new_i32();
239c20c7
WN
3980 if (dp) {
3981 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
486624fc 3982 fpst, ahp);
239c20c7
WN
3983 } else {
3984 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
486624fc 3985 fpst, ahp);
239c20c7 3986 }
486624fc
AB
3987 tcg_temp_free_i32(ahp);
3988 tcg_temp_free_ptr(fpst);
60011498
PB
3989 tcg_gen_shli_i32(tmp, tmp, 16);
3990 gen_mov_F0_vreg(0, rd);
3991 tmp2 = gen_vfp_mrs();
3992 tcg_gen_ext16u_i32(tmp2, tmp2);
3993 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3994 tcg_temp_free_i32(tmp2);
60011498
PB
3995 gen_vfp_msr(tmp);
3996 break;
486624fc 3997 }
b7bcbe95
FB
3998 case 8: /* cmp */
3999 gen_vfp_cmp(dp);
4000 break;
4001 case 9: /* cmpe */
4002 gen_vfp_cmpe(dp);
4003 break;
4004 case 10: /* cmpz */
4005 gen_vfp_cmp(dp);
4006 break;
4007 case 11: /* cmpez */
4008 gen_vfp_F1_ld0(dp);
4009 gen_vfp_cmpe(dp);
4010 break;
664c6733
WN
4011 case 12: /* vrintr */
4012 {
4013 TCGv_ptr fpst = get_fpstatus_ptr(0);
4014 if (dp) {
4015 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
4016 } else {
4017 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
4018 }
4019 tcg_temp_free_ptr(fpst);
4020 break;
4021 }
a290c62a
WN
4022 case 13: /* vrintz */
4023 {
4024 TCGv_ptr fpst = get_fpstatus_ptr(0);
4025 TCGv_i32 tcg_rmode;
4026 tcg_rmode = tcg_const_i32(float_round_to_zero);
9b049916 4027 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
a290c62a
WN
4028 if (dp) {
4029 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
4030 } else {
4031 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
4032 }
9b049916 4033 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
a290c62a
WN
4034 tcg_temp_free_i32(tcg_rmode);
4035 tcg_temp_free_ptr(fpst);
4036 break;
4037 }
4e82bc01
WN
4038 case 14: /* vrintx */
4039 {
4040 TCGv_ptr fpst = get_fpstatus_ptr(0);
4041 if (dp) {
4042 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
4043 } else {
4044 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
4045 }
4046 tcg_temp_free_ptr(fpst);
4047 break;
4048 }
b7bcbe95
FB
4049 case 15: /* single<->double conversion */
4050 if (dp)
4373f3ce 4051 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 4052 else
4373f3ce 4053 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
4054 break;
4055 case 16: /* fuito */
5500b06c 4056 gen_vfp_uito(dp, 0);
b7bcbe95
FB
4057 break;
4058 case 17: /* fsito */
5500b06c 4059 gen_vfp_sito(dp, 0);
b7bcbe95 4060 break;
9ee6e8bb 4061 case 20: /* fshto */
d614a513
PM
4062 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4063 return 1;
4064 }
5500b06c 4065 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
4066 break;
4067 case 21: /* fslto */
d614a513
PM
4068 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4069 return 1;
4070 }
5500b06c 4071 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
4072 break;
4073 case 22: /* fuhto */
d614a513
PM
4074 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4075 return 1;
4076 }
5500b06c 4077 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
4078 break;
4079 case 23: /* fulto */
d614a513
PM
4080 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4081 return 1;
4082 }
5500b06c 4083 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 4084 break;
b7bcbe95 4085 case 24: /* ftoui */
5500b06c 4086 gen_vfp_toui(dp, 0);
b7bcbe95
FB
4087 break;
4088 case 25: /* ftouiz */
5500b06c 4089 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
4090 break;
4091 case 26: /* ftosi */
5500b06c 4092 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
4093 break;
4094 case 27: /* ftosiz */
5500b06c 4095 gen_vfp_tosiz(dp, 0);
b7bcbe95 4096 break;
9ee6e8bb 4097 case 28: /* ftosh */
d614a513
PM
4098 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4099 return 1;
4100 }
5500b06c 4101 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
4102 break;
4103 case 29: /* ftosl */
d614a513
PM
4104 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4105 return 1;
4106 }
5500b06c 4107 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
4108 break;
4109 case 30: /* ftouh */
d614a513
PM
4110 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4111 return 1;
4112 }
5500b06c 4113 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
4114 break;
4115 case 31: /* ftoul */
d614a513
PM
4116 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
4117 return 1;
4118 }
5500b06c 4119 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 4120 break;
b7bcbe95 4121 default: /* undefined */
b7bcbe95
FB
4122 return 1;
4123 }
4124 break;
4125 default: /* undefined */
b7bcbe95
FB
4126 return 1;
4127 }
4128
4129 /* Write back the result. */
239c20c7
WN
4130 if (op == 15 && (rn >= 8 && rn <= 11)) {
4131 /* Comparison, do nothing. */
4132 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
4133 (rn & 0x1e) == 0x6)) {
4134 /* VCVT double to int: always integer result.
4135 * VCVT double to half precision is always a single
4136 * precision result.
4137 */
b7bcbe95 4138 gen_mov_vreg_F0(0, rd);
239c20c7 4139 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
4140 /* conversion */
4141 gen_mov_vreg_F0(!dp, rd);
239c20c7 4142 } else {
b7bcbe95 4143 gen_mov_vreg_F0(dp, rd);
239c20c7 4144 }
b7bcbe95
FB
4145
4146 /* break out of the loop if we have finished */
4147 if (veclen == 0)
4148 break;
4149
4150 if (op == 15 && delta_m == 0) {
4151 /* single source one-many */
4152 while (veclen--) {
4153 rd = ((rd + delta_d) & (bank_mask - 1))
4154 | (rd & bank_mask);
4155 gen_mov_vreg_F0(dp, rd);
4156 }
4157 break;
4158 }
4159 /* Setup the next operands. */
4160 veclen--;
4161 rd = ((rd + delta_d) & (bank_mask - 1))
4162 | (rd & bank_mask);
4163
4164 if (op == 15) {
4165 /* One source operand. */
4166 rm = ((rm + delta_m) & (bank_mask - 1))
4167 | (rm & bank_mask);
4168 gen_mov_F0_vreg(dp, rm);
4169 } else {
4170 /* Two source operands. */
4171 rn = ((rn + delta_d) & (bank_mask - 1))
4172 | (rn & bank_mask);
4173 gen_mov_F0_vreg(dp, rn);
4174 if (delta_m) {
4175 rm = ((rm + delta_m) & (bank_mask - 1))
4176 | (rm & bank_mask);
4177 gen_mov_F1_vreg(dp, rm);
4178 }
4179 }
4180 }
4181 }
4182 break;
4183 case 0xc:
4184 case 0xd:
8387da81 4185 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
4186 /* two-register transfer */
4187 rn = (insn >> 16) & 0xf;
4188 rd = (insn >> 12) & 0xf;
4189 if (dp) {
9ee6e8bb
PB
4190 VFP_DREG_M(rm, insn);
4191 } else {
4192 rm = VFP_SREG_M(insn);
4193 }
b7bcbe95 4194
18c9b560 4195 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
4196 /* vfp->arm */
4197 if (dp) {
4373f3ce
PB
4198 gen_mov_F0_vreg(0, rm * 2);
4199 tmp = gen_vfp_mrs();
4200 store_reg(s, rd, tmp);
4201 gen_mov_F0_vreg(0, rm * 2 + 1);
4202 tmp = gen_vfp_mrs();
4203 store_reg(s, rn, tmp);
b7bcbe95
FB
4204 } else {
4205 gen_mov_F0_vreg(0, rm);
4373f3ce 4206 tmp = gen_vfp_mrs();
8387da81 4207 store_reg(s, rd, tmp);
b7bcbe95 4208 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 4209 tmp = gen_vfp_mrs();
8387da81 4210 store_reg(s, rn, tmp);
b7bcbe95
FB
4211 }
4212 } else {
4213 /* arm->vfp */
4214 if (dp) {
4373f3ce
PB
4215 tmp = load_reg(s, rd);
4216 gen_vfp_msr(tmp);
4217 gen_mov_vreg_F0(0, rm * 2);
4218 tmp = load_reg(s, rn);
4219 gen_vfp_msr(tmp);
4220 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 4221 } else {
8387da81 4222 tmp = load_reg(s, rd);
4373f3ce 4223 gen_vfp_msr(tmp);
b7bcbe95 4224 gen_mov_vreg_F0(0, rm);
8387da81 4225 tmp = load_reg(s, rn);
4373f3ce 4226 gen_vfp_msr(tmp);
b7bcbe95
FB
4227 gen_mov_vreg_F0(0, rm + 1);
4228 }
4229 }
4230 } else {
4231 /* Load/store */
4232 rn = (insn >> 16) & 0xf;
4233 if (dp)
9ee6e8bb 4234 VFP_DREG_D(rd, insn);
b7bcbe95 4235 else
9ee6e8bb 4236 rd = VFP_SREG_D(insn);
b7bcbe95
FB
4237 if ((insn & 0x01200000) == 0x01000000) {
4238 /* Single load/store */
4239 offset = (insn & 0xff) << 2;
4240 if ((insn & (1 << 23)) == 0)
4241 offset = -offset;
934814f1
PM
4242 if (s->thumb && rn == 15) {
4243 /* This is actually UNPREDICTABLE */
4244 addr = tcg_temp_new_i32();
4245 tcg_gen_movi_i32(addr, s->pc & ~2);
4246 } else {
4247 addr = load_reg(s, rn);
4248 }
312eea9f 4249 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4250 if (insn & (1 << 20)) {
312eea9f 4251 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4252 gen_mov_vreg_F0(dp, rd);
4253 } else {
4254 gen_mov_F0_vreg(dp, rd);
312eea9f 4255 gen_vfp_st(s, dp, addr);
b7bcbe95 4256 }
7d1b0095 4257 tcg_temp_free_i32(addr);
b7bcbe95
FB
4258 } else {
4259 /* load/store multiple */
934814f1 4260 int w = insn & (1 << 21);
b7bcbe95
FB
4261 if (dp)
4262 n = (insn >> 1) & 0x7f;
4263 else
4264 n = insn & 0xff;
4265
934814f1
PM
4266 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
4267 /* P == U , W == 1 => UNDEF */
4268 return 1;
4269 }
4270 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
4271 /* UNPREDICTABLE cases for bad immediates: we choose to
4272 * UNDEF to avoid generating huge numbers of TCG ops
4273 */
4274 return 1;
4275 }
4276 if (rn == 15 && w) {
4277 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4278 return 1;
4279 }
4280
4281 if (s->thumb && rn == 15) {
4282 /* This is actually UNPREDICTABLE */
4283 addr = tcg_temp_new_i32();
4284 tcg_gen_movi_i32(addr, s->pc & ~2);
4285 } else {
4286 addr = load_reg(s, rn);
4287 }
b7bcbe95 4288 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 4289 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95 4290
8a954faf
PM
4291 if (s->v8m_stackcheck && rn == 13 && w) {
4292 /*
4293 * Here 'addr' is the lowest address we will store to,
4294 * and is either the old SP (if post-increment) or
4295 * the new SP (if pre-decrement). For post-increment
4296 * where the old value is below the limit and the new
4297 * value is above, it is UNKNOWN whether the limit check
4298 * triggers; we choose to trigger.
4299 */
4300 gen_helper_v8m_stackcheck(cpu_env, addr);
4301 }
4302
b7bcbe95
FB
4303 if (dp)
4304 offset = 8;
4305 else
4306 offset = 4;
4307 for (i = 0; i < n; i++) {
18c9b560 4308 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 4309 /* load */
312eea9f 4310 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4311 gen_mov_vreg_F0(dp, rd + i);
4312 } else {
4313 /* store */
4314 gen_mov_F0_vreg(dp, rd + i);
312eea9f 4315 gen_vfp_st(s, dp, addr);
b7bcbe95 4316 }
312eea9f 4317 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4318 }
934814f1 4319 if (w) {
b7bcbe95
FB
4320 /* writeback */
4321 if (insn & (1 << 24))
4322 offset = -offset * n;
4323 else if (dp && (insn & 1))
4324 offset = 4;
4325 else
4326 offset = 0;
4327
4328 if (offset != 0)
312eea9f
FN
4329 tcg_gen_addi_i32(addr, addr, offset);
4330 store_reg(s, rn, addr);
4331 } else {
7d1b0095 4332 tcg_temp_free_i32(addr);
b7bcbe95
FB
4333 }
4334 }
4335 }
4336 break;
4337 default:
4338 /* Should never happen. */
4339 return 1;
4340 }
4341 return 0;
4342}
4343
90aa39a1 4344static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 4345{
90aa39a1 4346#ifndef CONFIG_USER_ONLY
dcba3a8d 4347 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
90aa39a1
SF
4348 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4349#else
4350 return true;
4351#endif
4352}
6e256c93 4353
8a6b28c7
EC
4354static void gen_goto_ptr(void)
4355{
7f11636d 4356 tcg_gen_lookup_and_goto_ptr();
8a6b28c7
EC
4357}
4358
4cae8f56
AB
4359/* This will end the TB but doesn't guarantee we'll return to
4360 * cpu_loop_exec. Any live exit_requests will be processed as we
4361 * enter the next TB.
4362 */
8a6b28c7 4363static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
90aa39a1
SF
4364{
4365 if (use_goto_tb(s, dest)) {
57fec1fe 4366 tcg_gen_goto_tb(n);
eaed129d 4367 gen_set_pc_im(s, dest);
07ea28b4 4368 tcg_gen_exit_tb(s->base.tb, n);
6e256c93 4369 } else {
eaed129d 4370 gen_set_pc_im(s, dest);
8a6b28c7 4371 gen_goto_ptr();
6e256c93 4372 }
dcba3a8d 4373 s->base.is_jmp = DISAS_NORETURN;
c53be334
FB
4374}
4375
8aaca4c0
FB
4376static inline void gen_jmp (DisasContext *s, uint32_t dest)
4377{
b636649f 4378 if (unlikely(is_singlestepping(s))) {
8aaca4c0 4379 /* An indirect jump so that we still trigger the debug exception. */
5899f386 4380 if (s->thumb)
d9ba4830
PB
4381 dest |= 1;
4382 gen_bx_im(s, dest);
8aaca4c0 4383 } else {
6e256c93 4384 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
4385 }
4386}
4387
39d5492a 4388static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 4389{
ee097184 4390 if (x)
d9ba4830 4391 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 4392 else
d9ba4830 4393 gen_sxth(t0);
ee097184 4394 if (y)
d9ba4830 4395 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 4396 else
d9ba4830
PB
4397 gen_sxth(t1);
4398 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
4399}
4400
4401/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
4402static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4403{
b5ff1b31
FB
4404 uint32_t mask;
4405
4406 mask = 0;
4407 if (flags & (1 << 0))
4408 mask |= 0xff;
4409 if (flags & (1 << 1))
4410 mask |= 0xff00;
4411 if (flags & (1 << 2))
4412 mask |= 0xff0000;
4413 if (flags & (1 << 3))
4414 mask |= 0xff000000;
9ee6e8bb 4415
2ae23e75 4416 /* Mask out undefined bits. */
9ee6e8bb 4417 mask &= ~CPSR_RESERVED;
d614a513 4418 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4419 mask &= ~CPSR_T;
d614a513
PM
4420 }
4421 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4422 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4423 }
4424 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4425 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4426 }
4427 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4428 mask &= ~CPSR_IT;
d614a513 4429 }
4051e12c
PM
4430 /* Mask out execution state and reserved bits. */
4431 if (!spsr) {
4432 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4433 }
b5ff1b31
FB
4434 /* Mask out privileged bits. */
4435 if (IS_USER(s))
9ee6e8bb 4436 mask &= CPSR_USER;
b5ff1b31
FB
4437 return mask;
4438}
4439
2fbac54b 4440/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4441static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4442{
39d5492a 4443 TCGv_i32 tmp;
b5ff1b31
FB
4444 if (spsr) {
4445 /* ??? This is also undefined in system mode. */
4446 if (IS_USER(s))
4447 return 1;
d9ba4830
PB
4448
4449 tmp = load_cpu_field(spsr);
4450 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4451 tcg_gen_andi_i32(t0, t0, mask);
4452 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4453 store_cpu_field(tmp, spsr);
b5ff1b31 4454 } else {
2fbac54b 4455 gen_set_cpsr(t0, mask);
b5ff1b31 4456 }
7d1b0095 4457 tcg_temp_free_i32(t0);
b5ff1b31
FB
4458 gen_lookup_tb(s);
4459 return 0;
4460}
4461
2fbac54b
FN
4462/* Returns nonzero if access to the PSR is not permitted. */
4463static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4464{
39d5492a 4465 TCGv_i32 tmp;
7d1b0095 4466 tmp = tcg_temp_new_i32();
2fbac54b
FN
4467 tcg_gen_movi_i32(tmp, val);
4468 return gen_set_psr(s, mask, spsr, tmp);
4469}
4470
8bfd0550
PM
4471static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4472 int *tgtmode, int *regno)
4473{
4474 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4475 * the target mode and register number, and identify the various
4476 * unpredictable cases.
4477 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4478 * + executed in user mode
4479 * + using R15 as the src/dest register
4480 * + accessing an unimplemented register
4481 * + accessing a register that's inaccessible at current PL/security state*
4482 * + accessing a register that you could access with a different insn
4483 * We choose to UNDEF in all these cases.
4484 * Since we don't know which of the various AArch32 modes we are in
4485 * we have to defer some checks to runtime.
4486 * Accesses to Monitor mode registers from Secure EL1 (which implies
4487 * that EL3 is AArch64) must trap to EL3.
4488 *
4489 * If the access checks fail this function will emit code to take
4490 * an exception and return false. Otherwise it will return true,
4491 * and set *tgtmode and *regno appropriately.
4492 */
4493 int exc_target = default_exception_el(s);
4494
4495 /* These instructions are present only in ARMv8, or in ARMv7 with the
4496 * Virtualization Extensions.
4497 */
4498 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4499 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4500 goto undef;
4501 }
4502
4503 if (IS_USER(s) || rn == 15) {
4504 goto undef;
4505 }
4506
4507 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4508 * of registers into (r, sysm).
4509 */
4510 if (r) {
4511 /* SPSRs for other modes */
4512 switch (sysm) {
4513 case 0xe: /* SPSR_fiq */
4514 *tgtmode = ARM_CPU_MODE_FIQ;
4515 break;
4516 case 0x10: /* SPSR_irq */
4517 *tgtmode = ARM_CPU_MODE_IRQ;
4518 break;
4519 case 0x12: /* SPSR_svc */
4520 *tgtmode = ARM_CPU_MODE_SVC;
4521 break;
4522 case 0x14: /* SPSR_abt */
4523 *tgtmode = ARM_CPU_MODE_ABT;
4524 break;
4525 case 0x16: /* SPSR_und */
4526 *tgtmode = ARM_CPU_MODE_UND;
4527 break;
4528 case 0x1c: /* SPSR_mon */
4529 *tgtmode = ARM_CPU_MODE_MON;
4530 break;
4531 case 0x1e: /* SPSR_hyp */
4532 *tgtmode = ARM_CPU_MODE_HYP;
4533 break;
4534 default: /* unallocated */
4535 goto undef;
4536 }
4537 /* We arbitrarily assign SPSR a register number of 16. */
4538 *regno = 16;
4539 } else {
4540 /* general purpose registers for other modes */
4541 switch (sysm) {
4542 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4543 *tgtmode = ARM_CPU_MODE_USR;
4544 *regno = sysm + 8;
4545 break;
4546 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4547 *tgtmode = ARM_CPU_MODE_FIQ;
4548 *regno = sysm;
4549 break;
4550 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4551 *tgtmode = ARM_CPU_MODE_IRQ;
4552 *regno = sysm & 1 ? 13 : 14;
4553 break;
4554 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4555 *tgtmode = ARM_CPU_MODE_SVC;
4556 *regno = sysm & 1 ? 13 : 14;
4557 break;
4558 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4559 *tgtmode = ARM_CPU_MODE_ABT;
4560 *regno = sysm & 1 ? 13 : 14;
4561 break;
4562 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4563 *tgtmode = ARM_CPU_MODE_UND;
4564 *regno = sysm & 1 ? 13 : 14;
4565 break;
4566 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4567 *tgtmode = ARM_CPU_MODE_MON;
4568 *regno = sysm & 1 ? 13 : 14;
4569 break;
4570 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4571 *tgtmode = ARM_CPU_MODE_HYP;
4572 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4573 *regno = sysm & 1 ? 13 : 17;
4574 break;
4575 default: /* unallocated */
4576 goto undef;
4577 }
4578 }
4579
4580 /* Catch the 'accessing inaccessible register' cases we can detect
4581 * at translate time.
4582 */
4583 switch (*tgtmode) {
4584 case ARM_CPU_MODE_MON:
4585 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4586 goto undef;
4587 }
4588 if (s->current_el == 1) {
4589 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4590 * then accesses to Mon registers trap to EL3
4591 */
4592 exc_target = 3;
4593 goto undef;
4594 }
4595 break;
4596 case ARM_CPU_MODE_HYP:
aec4dd09
PM
4597 /*
4598 * SPSR_hyp and r13_hyp can only be accessed from Monitor mode
4599 * (and so we can forbid accesses from EL2 or below). elr_hyp
4600 * can be accessed also from Hyp mode, so forbid accesses from
4601 * EL0 or EL1.
8bfd0550 4602 */
aec4dd09
PM
4603 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
4604 (s->current_el < 3 && *regno != 17)) {
8bfd0550
PM
4605 goto undef;
4606 }
4607 break;
4608 default:
4609 break;
4610 }
4611
4612 return true;
4613
4614undef:
4615 /* If we get here then some access check did not pass */
4616 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4617 return false;
4618}
4619
4620static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4621{
4622 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4623 int tgtmode = 0, regno = 0;
4624
4625 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4626 return;
4627 }
4628
4629 /* Sync state because msr_banked() can raise exceptions */
4630 gen_set_condexec(s);
4631 gen_set_pc_im(s, s->pc - 4);
4632 tcg_reg = load_reg(s, rn);
4633 tcg_tgtmode = tcg_const_i32(tgtmode);
4634 tcg_regno = tcg_const_i32(regno);
4635 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4636 tcg_temp_free_i32(tcg_tgtmode);
4637 tcg_temp_free_i32(tcg_regno);
4638 tcg_temp_free_i32(tcg_reg);
dcba3a8d 4639 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4640}
4641
4642static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4643{
4644 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4645 int tgtmode = 0, regno = 0;
4646
4647 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4648 return;
4649 }
4650
4651 /* Sync state because mrs_banked() can raise exceptions */
4652 gen_set_condexec(s);
4653 gen_set_pc_im(s, s->pc - 4);
4654 tcg_reg = tcg_temp_new_i32();
4655 tcg_tgtmode = tcg_const_i32(tgtmode);
4656 tcg_regno = tcg_const_i32(regno);
4657 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4658 tcg_temp_free_i32(tcg_tgtmode);
4659 tcg_temp_free_i32(tcg_regno);
4660 store_reg(s, rn, tcg_reg);
dcba3a8d 4661 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4662}
4663
fb0e8e79
PM
4664/* Store value to PC as for an exception return (ie don't
4665 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4666 * will do the masking based on the new value of the Thumb bit.
4667 */
4668static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4669{
fb0e8e79
PM
4670 tcg_gen_mov_i32(cpu_R[15], pc);
4671 tcg_temp_free_i32(pc);
b5ff1b31
FB
4672}
4673
b0109805 4674/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4675static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4676{
fb0e8e79
PM
4677 store_pc_exc_ret(s, pc);
4678 /* The cpsr_write_eret helper will mask the low bits of PC
4679 * appropriately depending on the new Thumb bit, so it must
4680 * be called after storing the new PC.
4681 */
e69ad9df
AL
4682 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4683 gen_io_start();
4684 }
235ea1f5 4685 gen_helper_cpsr_write_eret(cpu_env, cpsr);
e69ad9df
AL
4686 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
4687 gen_io_end();
4688 }
7d1b0095 4689 tcg_temp_free_i32(cpsr);
b29fd33d 4690 /* Must exit loop to check un-masked IRQs */
dcba3a8d 4691 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb 4692}
3b46e624 4693
fb0e8e79
PM
4694/* Generate an old-style exception return. Marks pc as dead. */
4695static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4696{
4697 gen_rfe(s, pc, load_cpu_field(spsr));
4698}
4699
c22edfeb
AB
4700/*
4701 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4702 * only call the helper when running single threaded TCG code to ensure
4703 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4704 * just skip this instruction. Currently the SEV/SEVL instructions
4705 * which are *one* of many ways to wake the CPU from WFE are not
4706 * implemented so we can't sleep like WFI does.
4707 */
9ee6e8bb
PB
4708static void gen_nop_hint(DisasContext *s, int val)
4709{
4710 switch (val) {
2399d4e7
EC
4711 /* When running in MTTCG we don't generate jumps to the yield and
4712 * WFE helpers as it won't affect the scheduling of other vCPUs.
4713 * If we wanted to more completely model WFE/SEV so we don't busy
4714 * spin unnecessarily we would need to do something more involved.
4715 */
c87e5a61 4716 case 1: /* yield */
2399d4e7 4717 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4718 gen_set_pc_im(s, s->pc);
dcba3a8d 4719 s->base.is_jmp = DISAS_YIELD;
c22edfeb 4720 }
c87e5a61 4721 break;
9ee6e8bb 4722 case 3: /* wfi */
eaed129d 4723 gen_set_pc_im(s, s->pc);
dcba3a8d 4724 s->base.is_jmp = DISAS_WFI;
9ee6e8bb
PB
4725 break;
4726 case 2: /* wfe */
2399d4e7 4727 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4728 gen_set_pc_im(s, s->pc);
dcba3a8d 4729 s->base.is_jmp = DISAS_WFE;
c22edfeb 4730 }
72c1d3af 4731 break;
9ee6e8bb 4732 case 4: /* sev */
12b10571
MR
4733 case 5: /* sevl */
4734 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4735 default: /* nop */
4736 break;
4737 }
4738}
99c475ab 4739
ad69471c 4740#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4741
39d5492a 4742static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4743{
4744 switch (size) {
dd8fbd78
FN
4745 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4746 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4747 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4748 default: abort();
9ee6e8bb 4749 }
9ee6e8bb
PB
4750}
4751
39d5492a 4752static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4753{
4754 switch (size) {
dd8fbd78
FN
4755 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4756 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4757 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4758 default: return;
4759 }
4760}
4761
4762/* 32-bit pairwise ops end up the same as the elementwise versions. */
4763#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4764#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4765#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4766#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4767
ad69471c
PB
4768#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4769 switch ((size << 1) | u) { \
4770 case 0: \
dd8fbd78 4771 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4772 break; \
4773 case 1: \
dd8fbd78 4774 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4775 break; \
4776 case 2: \
dd8fbd78 4777 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4778 break; \
4779 case 3: \
dd8fbd78 4780 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4781 break; \
4782 case 4: \
dd8fbd78 4783 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4784 break; \
4785 case 5: \
dd8fbd78 4786 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4787 break; \
4788 default: return 1; \
4789 }} while (0)
9ee6e8bb
PB
4790
4791#define GEN_NEON_INTEGER_OP(name) do { \
4792 switch ((size << 1) | u) { \
ad69471c 4793 case 0: \
dd8fbd78 4794 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4795 break; \
4796 case 1: \
dd8fbd78 4797 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4798 break; \
4799 case 2: \
dd8fbd78 4800 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4801 break; \
4802 case 3: \
dd8fbd78 4803 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4804 break; \
4805 case 4: \
dd8fbd78 4806 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4807 break; \
4808 case 5: \
dd8fbd78 4809 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4810 break; \
9ee6e8bb
PB
4811 default: return 1; \
4812 }} while (0)
4813
39d5492a 4814static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4815{
39d5492a 4816 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4817 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4818 return tmp;
9ee6e8bb
PB
4819}
4820
39d5492a 4821static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4822{
dd8fbd78 4823 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4824 tcg_temp_free_i32(var);
9ee6e8bb
PB
4825}
4826
39d5492a 4827static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4828{
39d5492a 4829 TCGv_i32 tmp;
9ee6e8bb 4830 if (size == 1) {
0fad6efc
PM
4831 tmp = neon_load_reg(reg & 7, reg >> 4);
4832 if (reg & 8) {
dd8fbd78 4833 gen_neon_dup_high16(tmp);
0fad6efc
PM
4834 } else {
4835 gen_neon_dup_low16(tmp);
dd8fbd78 4836 }
0fad6efc
PM
4837 } else {
4838 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4839 }
dd8fbd78 4840 return tmp;
9ee6e8bb
PB
4841}
4842
02acedf9 4843static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4844{
b13708bb
RH
4845 TCGv_ptr pd, pm;
4846
600b828c 4847 if (!q && size == 2) {
02acedf9
PM
4848 return 1;
4849 }
b13708bb
RH
4850 pd = vfp_reg_ptr(true, rd);
4851 pm = vfp_reg_ptr(true, rm);
02acedf9
PM
4852 if (q) {
4853 switch (size) {
4854 case 0:
b13708bb 4855 gen_helper_neon_qunzip8(pd, pm);
02acedf9
PM
4856 break;
4857 case 1:
b13708bb 4858 gen_helper_neon_qunzip16(pd, pm);
02acedf9
PM
4859 break;
4860 case 2:
b13708bb 4861 gen_helper_neon_qunzip32(pd, pm);
02acedf9
PM
4862 break;
4863 default:
4864 abort();
4865 }
4866 } else {
4867 switch (size) {
4868 case 0:
b13708bb 4869 gen_helper_neon_unzip8(pd, pm);
02acedf9
PM
4870 break;
4871 case 1:
b13708bb 4872 gen_helper_neon_unzip16(pd, pm);
02acedf9
PM
4873 break;
4874 default:
4875 abort();
4876 }
4877 }
b13708bb
RH
4878 tcg_temp_free_ptr(pd);
4879 tcg_temp_free_ptr(pm);
02acedf9 4880 return 0;
19457615
FN
4881}
4882
d68a6f3a 4883static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4884{
b13708bb
RH
4885 TCGv_ptr pd, pm;
4886
600b828c 4887 if (!q && size == 2) {
d68a6f3a
PM
4888 return 1;
4889 }
b13708bb
RH
4890 pd = vfp_reg_ptr(true, rd);
4891 pm = vfp_reg_ptr(true, rm);
d68a6f3a
PM
4892 if (q) {
4893 switch (size) {
4894 case 0:
b13708bb 4895 gen_helper_neon_qzip8(pd, pm);
d68a6f3a
PM
4896 break;
4897 case 1:
b13708bb 4898 gen_helper_neon_qzip16(pd, pm);
d68a6f3a
PM
4899 break;
4900 case 2:
b13708bb 4901 gen_helper_neon_qzip32(pd, pm);
d68a6f3a
PM
4902 break;
4903 default:
4904 abort();
4905 }
4906 } else {
4907 switch (size) {
4908 case 0:
b13708bb 4909 gen_helper_neon_zip8(pd, pm);
d68a6f3a
PM
4910 break;
4911 case 1:
b13708bb 4912 gen_helper_neon_zip16(pd, pm);
d68a6f3a
PM
4913 break;
4914 default:
4915 abort();
4916 }
4917 }
b13708bb
RH
4918 tcg_temp_free_ptr(pd);
4919 tcg_temp_free_ptr(pm);
d68a6f3a 4920 return 0;
19457615
FN
4921}
4922
39d5492a 4923static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4924{
39d5492a 4925 TCGv_i32 rd, tmp;
19457615 4926
7d1b0095
PM
4927 rd = tcg_temp_new_i32();
4928 tmp = tcg_temp_new_i32();
19457615
FN
4929
4930 tcg_gen_shli_i32(rd, t0, 8);
4931 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4932 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4933 tcg_gen_or_i32(rd, rd, tmp);
4934
4935 tcg_gen_shri_i32(t1, t1, 8);
4936 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4937 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4938 tcg_gen_or_i32(t1, t1, tmp);
4939 tcg_gen_mov_i32(t0, rd);
4940
7d1b0095
PM
4941 tcg_temp_free_i32(tmp);
4942 tcg_temp_free_i32(rd);
19457615
FN
4943}
4944
39d5492a 4945static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4946{
39d5492a 4947 TCGv_i32 rd, tmp;
19457615 4948
7d1b0095
PM
4949 rd = tcg_temp_new_i32();
4950 tmp = tcg_temp_new_i32();
19457615
FN
4951
4952 tcg_gen_shli_i32(rd, t0, 16);
4953 tcg_gen_andi_i32(tmp, t1, 0xffff);
4954 tcg_gen_or_i32(rd, rd, tmp);
4955 tcg_gen_shri_i32(t1, t1, 16);
4956 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4957 tcg_gen_or_i32(t1, t1, tmp);
4958 tcg_gen_mov_i32(t0, rd);
4959
7d1b0095
PM
4960 tcg_temp_free_i32(tmp);
4961 tcg_temp_free_i32(rd);
19457615
FN
4962}
4963
4964
9ee6e8bb
PB
4965static struct {
4966 int nregs;
4967 int interleave;
4968 int spacing;
308e5636 4969} const neon_ls_element_type[11] = {
ac55d007
RH
4970 {1, 4, 1},
4971 {1, 4, 2},
9ee6e8bb 4972 {4, 1, 1},
ac55d007
RH
4973 {2, 2, 2},
4974 {1, 3, 1},
4975 {1, 3, 2},
9ee6e8bb
PB
4976 {3, 1, 1},
4977 {1, 1, 1},
ac55d007
RH
4978 {1, 2, 1},
4979 {1, 2, 2},
9ee6e8bb
PB
4980 {2, 1, 1}
4981};
4982
4983/* Translate a NEON load/store element instruction. Return nonzero if the
4984 instruction is invalid. */
7dcc1f89 4985static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4986{
4987 int rd, rn, rm;
4988 int op;
4989 int nregs;
4990 int interleave;
84496233 4991 int spacing;
9ee6e8bb
PB
4992 int stride;
4993 int size;
4994 int reg;
9ee6e8bb 4995 int load;
9ee6e8bb 4996 int n;
7377c2c9 4997 int vec_size;
ac55d007
RH
4998 int mmu_idx;
4999 TCGMemOp endian;
39d5492a
PM
5000 TCGv_i32 addr;
5001 TCGv_i32 tmp;
5002 TCGv_i32 tmp2;
84496233 5003 TCGv_i64 tmp64;
9ee6e8bb 5004
2c7ffc41
PM
5005 /* FIXME: this access check should not take precedence over UNDEF
5006 * for invalid encodings; we will generate incorrect syndrome information
5007 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5008 */
9dbbc748 5009 if (s->fp_excp_el) {
2c7ffc41 5010 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 5011 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
5012 return 0;
5013 }
5014
5df8bac1 5015 if (!s->vfp_enabled)
9ee6e8bb
PB
5016 return 1;
5017 VFP_DREG_D(rd, insn);
5018 rn = (insn >> 16) & 0xf;
5019 rm = insn & 0xf;
5020 load = (insn & (1 << 21)) != 0;
ac55d007
RH
5021 endian = s->be_data;
5022 mmu_idx = get_mem_index(s);
9ee6e8bb
PB
5023 if ((insn & (1 << 23)) == 0) {
5024 /* Load store all elements. */
5025 op = (insn >> 8) & 0xf;
5026 size = (insn >> 6) & 3;
84496233 5027 if (op > 10)
9ee6e8bb 5028 return 1;
f2dd89d0
PM
5029 /* Catch UNDEF cases for bad values of align field */
5030 switch (op & 0xc) {
5031 case 4:
5032 if (((insn >> 5) & 1) == 1) {
5033 return 1;
5034 }
5035 break;
5036 case 8:
5037 if (((insn >> 4) & 3) == 3) {
5038 return 1;
5039 }
5040 break;
5041 default:
5042 break;
5043 }
9ee6e8bb
PB
5044 nregs = neon_ls_element_type[op].nregs;
5045 interleave = neon_ls_element_type[op].interleave;
84496233 5046 spacing = neon_ls_element_type[op].spacing;
ac55d007 5047 if (size == 3 && (interleave | spacing) != 1) {
84496233 5048 return 1;
ac55d007 5049 }
e23f12b3
RH
5050 /* For our purposes, bytes are always little-endian. */
5051 if (size == 0) {
5052 endian = MO_LE;
5053 }
5054 /* Consecutive little-endian elements from a single register
5055 * can be promoted to a larger little-endian operation.
5056 */
5057 if (interleave == 1 && endian == MO_LE) {
5058 size = 3;
5059 }
ac55d007 5060 tmp64 = tcg_temp_new_i64();
e318a60b 5061 addr = tcg_temp_new_i32();
ac55d007 5062 tmp2 = tcg_const_i32(1 << size);
dcc65026 5063 load_reg_var(s, addr, rn);
9ee6e8bb 5064 for (reg = 0; reg < nregs; reg++) {
ac55d007
RH
5065 for (n = 0; n < 8 >> size; n++) {
5066 int xs;
5067 for (xs = 0; xs < interleave; xs++) {
5068 int tt = rd + reg + spacing * xs;
5069
5070 if (load) {
5071 gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
5072 neon_store_element64(tt, n, size, tmp64);
5073 } else {
5074 neon_load_element64(tmp64, tt, n, size);
5075 gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
9ee6e8bb 5076 }
ac55d007 5077 tcg_gen_add_i32(addr, addr, tmp2);
9ee6e8bb
PB
5078 }
5079 }
9ee6e8bb 5080 }
e318a60b 5081 tcg_temp_free_i32(addr);
ac55d007
RH
5082 tcg_temp_free_i32(tmp2);
5083 tcg_temp_free_i64(tmp64);
5084 stride = nregs * interleave * 8;
9ee6e8bb
PB
5085 } else {
5086 size = (insn >> 10) & 3;
5087 if (size == 3) {
5088 /* Load single element to all lanes. */
8e18cde3
PM
5089 int a = (insn >> 4) & 1;
5090 if (!load) {
9ee6e8bb 5091 return 1;
8e18cde3 5092 }
9ee6e8bb
PB
5093 size = (insn >> 6) & 3;
5094 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
5095
5096 if (size == 3) {
5097 if (nregs != 4 || a == 0) {
9ee6e8bb 5098 return 1;
99c475ab 5099 }
8e18cde3
PM
5100 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
5101 size = 2;
5102 }
5103 if (nregs == 1 && a == 1 && size == 0) {
5104 return 1;
5105 }
5106 if (nregs == 3 && a == 1) {
5107 return 1;
5108 }
e318a60b 5109 addr = tcg_temp_new_i32();
8e18cde3 5110 load_reg_var(s, addr, rn);
7377c2c9
RH
5111
5112 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
5113 * VLD2/3/4 to all lanes: bit 5 indicates register stride.
5114 */
5115 stride = (insn & (1 << 5)) ? 2 : 1;
5116 vec_size = nregs == 1 ? stride * 8 : 8;
5117
5118 tmp = tcg_temp_new_i32();
5119 for (reg = 0; reg < nregs; reg++) {
5120 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
5121 s->be_data | size);
5122 if ((rd & 1) && vec_size == 16) {
5123 /* We cannot write 16 bytes at once because the
5124 * destination is unaligned.
5125 */
5126 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
5127 8, 8, tmp);
5128 tcg_gen_gvec_mov(0, neon_reg_offset(rd + 1, 0),
5129 neon_reg_offset(rd, 0), 8, 8);
5130 } else {
5131 tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
5132 vec_size, vec_size, tmp);
8e18cde3 5133 }
7377c2c9
RH
5134 tcg_gen_addi_i32(addr, addr, 1 << size);
5135 rd += stride;
9ee6e8bb 5136 }
7377c2c9 5137 tcg_temp_free_i32(tmp);
e318a60b 5138 tcg_temp_free_i32(addr);
9ee6e8bb
PB
5139 stride = (1 << size) * nregs;
5140 } else {
5141 /* Single element. */
93262b16 5142 int idx = (insn >> 4) & 0xf;
2d6ac920 5143 int reg_idx;
9ee6e8bb
PB
5144 switch (size) {
5145 case 0:
2d6ac920 5146 reg_idx = (insn >> 5) & 7;
9ee6e8bb
PB
5147 stride = 1;
5148 break;
5149 case 1:
2d6ac920 5150 reg_idx = (insn >> 6) & 3;
9ee6e8bb
PB
5151 stride = (insn & (1 << 5)) ? 2 : 1;
5152 break;
5153 case 2:
2d6ac920 5154 reg_idx = (insn >> 7) & 1;
9ee6e8bb
PB
5155 stride = (insn & (1 << 6)) ? 2 : 1;
5156 break;
5157 default:
5158 abort();
5159 }
5160 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
5161 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
5162 switch (nregs) {
5163 case 1:
5164 if (((idx & (1 << size)) != 0) ||
5165 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
5166 return 1;
5167 }
5168 break;
5169 case 3:
5170 if ((idx & 1) != 0) {
5171 return 1;
5172 }
5173 /* fall through */
5174 case 2:
5175 if (size == 2 && (idx & 2) != 0) {
5176 return 1;
5177 }
5178 break;
5179 case 4:
5180 if ((size == 2) && ((idx & 3) == 3)) {
5181 return 1;
5182 }
5183 break;
5184 default:
5185 abort();
5186 }
5187 if ((rd + stride * (nregs - 1)) > 31) {
5188 /* Attempts to write off the end of the register file
5189 * are UNPREDICTABLE; we choose to UNDEF because otherwise
5190 * the neon_load_reg() would write off the end of the array.
5191 */
5192 return 1;
5193 }
2d6ac920 5194 tmp = tcg_temp_new_i32();
e318a60b 5195 addr = tcg_temp_new_i32();
dcc65026 5196 load_reg_var(s, addr, rn);
9ee6e8bb
PB
5197 for (reg = 0; reg < nregs; reg++) {
5198 if (load) {
2d6ac920
RH
5199 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
5200 s->be_data | size);
5201 neon_store_element(rd, reg_idx, size, tmp);
9ee6e8bb 5202 } else { /* Store */
2d6ac920
RH
5203 neon_load_element(tmp, rd, reg_idx, size);
5204 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
5205 s->be_data | size);
99c475ab 5206 }
9ee6e8bb 5207 rd += stride;
1b2b1e54 5208 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 5209 }
e318a60b 5210 tcg_temp_free_i32(addr);
2d6ac920 5211 tcg_temp_free_i32(tmp);
9ee6e8bb 5212 stride = nregs * (1 << size);
99c475ab 5213 }
9ee6e8bb
PB
5214 }
5215 if (rm != 15) {
39d5492a 5216 TCGv_i32 base;
b26eefb6
PB
5217
5218 base = load_reg(s, rn);
9ee6e8bb 5219 if (rm == 13) {
b26eefb6 5220 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 5221 } else {
39d5492a 5222 TCGv_i32 index;
b26eefb6
PB
5223 index = load_reg(s, rm);
5224 tcg_gen_add_i32(base, base, index);
7d1b0095 5225 tcg_temp_free_i32(index);
9ee6e8bb 5226 }
b26eefb6 5227 store_reg(s, rn, base);
9ee6e8bb
PB
5228 }
5229 return 0;
5230}
3b46e624 5231
39d5492a 5232static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5233{
5234 switch (size) {
5235 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5236 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 5237 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
5238 default: abort();
5239 }
5240}
5241
39d5492a 5242static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5243{
5244 switch (size) {
02da0b2d
PM
5245 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5246 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5247 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
5248 default: abort();
5249 }
5250}
5251
39d5492a 5252static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5253{
5254 switch (size) {
02da0b2d
PM
5255 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5256 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5257 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
5258 default: abort();
5259 }
5260}
5261
39d5492a 5262static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
5263{
5264 switch (size) {
02da0b2d
PM
5265 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5266 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5267 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
5268 default: abort();
5269 }
5270}
5271
39d5492a 5272static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
5273 int q, int u)
5274{
5275 if (q) {
5276 if (u) {
5277 switch (size) {
5278 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5279 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5280 default: abort();
5281 }
5282 } else {
5283 switch (size) {
5284 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5285 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5286 default: abort();
5287 }
5288 }
5289 } else {
5290 if (u) {
5291 switch (size) {
b408a9b0
CL
5292 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5293 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
5294 default: abort();
5295 }
5296 } else {
5297 switch (size) {
5298 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5299 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5300 default: abort();
5301 }
5302 }
5303 }
5304}
5305
39d5492a 5306static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
5307{
5308 if (u) {
5309 switch (size) {
5310 case 0: gen_helper_neon_widen_u8(dest, src); break;
5311 case 1: gen_helper_neon_widen_u16(dest, src); break;
5312 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5313 default: abort();
5314 }
5315 } else {
5316 switch (size) {
5317 case 0: gen_helper_neon_widen_s8(dest, src); break;
5318 case 1: gen_helper_neon_widen_s16(dest, src); break;
5319 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5320 default: abort();
5321 }
5322 }
7d1b0095 5323 tcg_temp_free_i32(src);
ad69471c
PB
5324}
5325
5326static inline void gen_neon_addl(int size)
5327{
5328 switch (size) {
5329 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5330 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5331 case 2: tcg_gen_add_i64(CPU_V001); break;
5332 default: abort();
5333 }
5334}
5335
5336static inline void gen_neon_subl(int size)
5337{
5338 switch (size) {
5339 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5340 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5341 case 2: tcg_gen_sub_i64(CPU_V001); break;
5342 default: abort();
5343 }
5344}
5345
a7812ae4 5346static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
5347{
5348 switch (size) {
5349 case 0: gen_helper_neon_negl_u16(var, var); break;
5350 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
5351 case 2:
5352 tcg_gen_neg_i64(var, var);
5353 break;
ad69471c
PB
5354 default: abort();
5355 }
5356}
5357
a7812ae4 5358static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
5359{
5360 switch (size) {
02da0b2d
PM
5361 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5362 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
5363 default: abort();
5364 }
5365}
5366
39d5492a
PM
5367static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5368 int size, int u)
ad69471c 5369{
a7812ae4 5370 TCGv_i64 tmp;
ad69471c
PB
5371
5372 switch ((size << 1) | u) {
5373 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5374 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5375 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5376 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5377 case 4:
5378 tmp = gen_muls_i64_i32(a, b);
5379 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5380 tcg_temp_free_i64(tmp);
ad69471c
PB
5381 break;
5382 case 5:
5383 tmp = gen_mulu_i64_i32(a, b);
5384 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5385 tcg_temp_free_i64(tmp);
ad69471c
PB
5386 break;
5387 default: abort();
5388 }
c6067f04
CL
5389
5390 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5391 Don't forget to clean them now. */
5392 if (size < 2) {
7d1b0095
PM
5393 tcg_temp_free_i32(a);
5394 tcg_temp_free_i32(b);
c6067f04 5395 }
ad69471c
PB
5396}
5397
39d5492a
PM
5398static void gen_neon_narrow_op(int op, int u, int size,
5399 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
5400{
5401 if (op) {
5402 if (u) {
5403 gen_neon_unarrow_sats(size, dest, src);
5404 } else {
5405 gen_neon_narrow(size, dest, src);
5406 }
5407 } else {
5408 if (u) {
5409 gen_neon_narrow_satu(size, dest, src);
5410 } else {
5411 gen_neon_narrow_sats(size, dest, src);
5412 }
5413 }
5414}
5415
62698be3
PM
5416/* Symbolic constants for op fields for Neon 3-register same-length.
5417 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5418 * table A7-9.
5419 */
5420#define NEON_3R_VHADD 0
5421#define NEON_3R_VQADD 1
5422#define NEON_3R_VRHADD 2
5423#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5424#define NEON_3R_VHSUB 4
5425#define NEON_3R_VQSUB 5
5426#define NEON_3R_VCGT 6
5427#define NEON_3R_VCGE 7
5428#define NEON_3R_VSHL 8
5429#define NEON_3R_VQSHL 9
5430#define NEON_3R_VRSHL 10
5431#define NEON_3R_VQRSHL 11
5432#define NEON_3R_VMAX 12
5433#define NEON_3R_VMIN 13
5434#define NEON_3R_VABD 14
5435#define NEON_3R_VABA 15
5436#define NEON_3R_VADD_VSUB 16
5437#define NEON_3R_VTST_VCEQ 17
4a7832b0 5438#define NEON_3R_VML 18 /* VMLA, VMLS */
62698be3
PM
5439#define NEON_3R_VMUL 19
5440#define NEON_3R_VPMAX 20
5441#define NEON_3R_VPMIN 21
5442#define NEON_3R_VQDMULH_VQRDMULH 22
36a71934 5443#define NEON_3R_VPADD_VQRDMLAH 23
f1ecb913 5444#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
36a71934 5445#define NEON_3R_VFM_VQRDMLSH 25 /* VFMA, VFMS, VQRDMLSH */
62698be3
PM
5446#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5447#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5448#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5449#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5450#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 5451#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
5452
5453static const uint8_t neon_3r_sizes[] = {
5454 [NEON_3R_VHADD] = 0x7,
5455 [NEON_3R_VQADD] = 0xf,
5456 [NEON_3R_VRHADD] = 0x7,
5457 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5458 [NEON_3R_VHSUB] = 0x7,
5459 [NEON_3R_VQSUB] = 0xf,
5460 [NEON_3R_VCGT] = 0x7,
5461 [NEON_3R_VCGE] = 0x7,
5462 [NEON_3R_VSHL] = 0xf,
5463 [NEON_3R_VQSHL] = 0xf,
5464 [NEON_3R_VRSHL] = 0xf,
5465 [NEON_3R_VQRSHL] = 0xf,
5466 [NEON_3R_VMAX] = 0x7,
5467 [NEON_3R_VMIN] = 0x7,
5468 [NEON_3R_VABD] = 0x7,
5469 [NEON_3R_VABA] = 0x7,
5470 [NEON_3R_VADD_VSUB] = 0xf,
5471 [NEON_3R_VTST_VCEQ] = 0x7,
5472 [NEON_3R_VML] = 0x7,
5473 [NEON_3R_VMUL] = 0x7,
5474 [NEON_3R_VPMAX] = 0x7,
5475 [NEON_3R_VPMIN] = 0x7,
5476 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
36a71934 5477 [NEON_3R_VPADD_VQRDMLAH] = 0x7,
f1ecb913 5478 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
36a71934 5479 [NEON_3R_VFM_VQRDMLSH] = 0x7, /* For VFM, size bit 1 encodes op */
62698be3
PM
5480 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5481 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5482 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5483 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5484 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 5485 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5486};
5487
600b828c
PM
5488/* Symbolic constants for op fields for Neon 2-register miscellaneous.
5489 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5490 * table A7-13.
5491 */
5492#define NEON_2RM_VREV64 0
5493#define NEON_2RM_VREV32 1
5494#define NEON_2RM_VREV16 2
5495#define NEON_2RM_VPADDL 4
5496#define NEON_2RM_VPADDL_U 5
9d935509
AB
5497#define NEON_2RM_AESE 6 /* Includes AESD */
5498#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
5499#define NEON_2RM_VCLS 8
5500#define NEON_2RM_VCLZ 9
5501#define NEON_2RM_VCNT 10
5502#define NEON_2RM_VMVN 11
5503#define NEON_2RM_VPADAL 12
5504#define NEON_2RM_VPADAL_U 13
5505#define NEON_2RM_VQABS 14
5506#define NEON_2RM_VQNEG 15
5507#define NEON_2RM_VCGT0 16
5508#define NEON_2RM_VCGE0 17
5509#define NEON_2RM_VCEQ0 18
5510#define NEON_2RM_VCLE0 19
5511#define NEON_2RM_VCLT0 20
f1ecb913 5512#define NEON_2RM_SHA1H 21
600b828c
PM
5513#define NEON_2RM_VABS 22
5514#define NEON_2RM_VNEG 23
5515#define NEON_2RM_VCGT0_F 24
5516#define NEON_2RM_VCGE0_F 25
5517#define NEON_2RM_VCEQ0_F 26
5518#define NEON_2RM_VCLE0_F 27
5519#define NEON_2RM_VCLT0_F 28
5520#define NEON_2RM_VABS_F 30
5521#define NEON_2RM_VNEG_F 31
5522#define NEON_2RM_VSWP 32
5523#define NEON_2RM_VTRN 33
5524#define NEON_2RM_VUZP 34
5525#define NEON_2RM_VZIP 35
5526#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5527#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5528#define NEON_2RM_VSHLL 38
f1ecb913 5529#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 5530#define NEON_2RM_VRINTN 40
2ce70625 5531#define NEON_2RM_VRINTX 41
34f7b0a2
WN
5532#define NEON_2RM_VRINTA 42
5533#define NEON_2RM_VRINTZ 43
600b828c 5534#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 5535#define NEON_2RM_VRINTM 45
600b828c 5536#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 5537#define NEON_2RM_VRINTP 47
901ad525
WN
5538#define NEON_2RM_VCVTAU 48
5539#define NEON_2RM_VCVTAS 49
5540#define NEON_2RM_VCVTNU 50
5541#define NEON_2RM_VCVTNS 51
5542#define NEON_2RM_VCVTPU 52
5543#define NEON_2RM_VCVTPS 53
5544#define NEON_2RM_VCVTMU 54
5545#define NEON_2RM_VCVTMS 55
600b828c
PM
5546#define NEON_2RM_VRECPE 56
5547#define NEON_2RM_VRSQRTE 57
5548#define NEON_2RM_VRECPE_F 58
5549#define NEON_2RM_VRSQRTE_F 59
5550#define NEON_2RM_VCVT_FS 60
5551#define NEON_2RM_VCVT_FU 61
5552#define NEON_2RM_VCVT_SF 62
5553#define NEON_2RM_VCVT_UF 63
5554
5555static int neon_2rm_is_float_op(int op)
5556{
5557 /* Return true if this neon 2reg-misc op is float-to-float */
5558 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5559 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5560 op == NEON_2RM_VRINTM ||
5561 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5562 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5563}
5564
fe8fcf3d
PM
5565static bool neon_2rm_is_v8_op(int op)
5566{
5567 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5568 switch (op) {
5569 case NEON_2RM_VRINTN:
5570 case NEON_2RM_VRINTA:
5571 case NEON_2RM_VRINTM:
5572 case NEON_2RM_VRINTP:
5573 case NEON_2RM_VRINTZ:
5574 case NEON_2RM_VRINTX:
5575 case NEON_2RM_VCVTAU:
5576 case NEON_2RM_VCVTAS:
5577 case NEON_2RM_VCVTNU:
5578 case NEON_2RM_VCVTNS:
5579 case NEON_2RM_VCVTPU:
5580 case NEON_2RM_VCVTPS:
5581 case NEON_2RM_VCVTMU:
5582 case NEON_2RM_VCVTMS:
5583 return true;
5584 default:
5585 return false;
5586 }
5587}
5588
600b828c
PM
5589/* Each entry in this array has bit n set if the insn allows
5590 * size value n (otherwise it will UNDEF). Since unallocated
5591 * op values will have no bits set they always UNDEF.
5592 */
5593static const uint8_t neon_2rm_sizes[] = {
5594 [NEON_2RM_VREV64] = 0x7,
5595 [NEON_2RM_VREV32] = 0x3,
5596 [NEON_2RM_VREV16] = 0x1,
5597 [NEON_2RM_VPADDL] = 0x7,
5598 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5599 [NEON_2RM_AESE] = 0x1,
5600 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5601 [NEON_2RM_VCLS] = 0x7,
5602 [NEON_2RM_VCLZ] = 0x7,
5603 [NEON_2RM_VCNT] = 0x1,
5604 [NEON_2RM_VMVN] = 0x1,
5605 [NEON_2RM_VPADAL] = 0x7,
5606 [NEON_2RM_VPADAL_U] = 0x7,
5607 [NEON_2RM_VQABS] = 0x7,
5608 [NEON_2RM_VQNEG] = 0x7,
5609 [NEON_2RM_VCGT0] = 0x7,
5610 [NEON_2RM_VCGE0] = 0x7,
5611 [NEON_2RM_VCEQ0] = 0x7,
5612 [NEON_2RM_VCLE0] = 0x7,
5613 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5614 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5615 [NEON_2RM_VABS] = 0x7,
5616 [NEON_2RM_VNEG] = 0x7,
5617 [NEON_2RM_VCGT0_F] = 0x4,
5618 [NEON_2RM_VCGE0_F] = 0x4,
5619 [NEON_2RM_VCEQ0_F] = 0x4,
5620 [NEON_2RM_VCLE0_F] = 0x4,
5621 [NEON_2RM_VCLT0_F] = 0x4,
5622 [NEON_2RM_VABS_F] = 0x4,
5623 [NEON_2RM_VNEG_F] = 0x4,
5624 [NEON_2RM_VSWP] = 0x1,
5625 [NEON_2RM_VTRN] = 0x7,
5626 [NEON_2RM_VUZP] = 0x7,
5627 [NEON_2RM_VZIP] = 0x7,
5628 [NEON_2RM_VMOVN] = 0x7,
5629 [NEON_2RM_VQMOVN] = 0x7,
5630 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5631 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5632 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5633 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5634 [NEON_2RM_VRINTA] = 0x4,
5635 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5636 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5637 [NEON_2RM_VRINTM] = 0x4,
600b828c 5638 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5639 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5640 [NEON_2RM_VCVTAU] = 0x4,
5641 [NEON_2RM_VCVTAS] = 0x4,
5642 [NEON_2RM_VCVTNU] = 0x4,
5643 [NEON_2RM_VCVTNS] = 0x4,
5644 [NEON_2RM_VCVTPU] = 0x4,
5645 [NEON_2RM_VCVTPS] = 0x4,
5646 [NEON_2RM_VCVTMU] = 0x4,
5647 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5648 [NEON_2RM_VRECPE] = 0x4,
5649 [NEON_2RM_VRSQRTE] = 0x4,
5650 [NEON_2RM_VRECPE_F] = 0x4,
5651 [NEON_2RM_VRSQRTE_F] = 0x4,
5652 [NEON_2RM_VCVT_FS] = 0x4,
5653 [NEON_2RM_VCVT_FU] = 0x4,
5654 [NEON_2RM_VCVT_SF] = 0x4,
5655 [NEON_2RM_VCVT_UF] = 0x4,
5656};
5657
36a71934
RH
5658
5659/* Expand v8.1 simd helper. */
5660static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
5661 int q, int rd, int rn, int rm)
5662{
962fcbf2 5663 if (dc_isar_feature(aa32_rdm, s)) {
36a71934
RH
5664 int opr_sz = (1 + q) * 8;
5665 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
5666 vfp_reg_offset(1, rn),
5667 vfp_reg_offset(1, rm), cpu_env,
5668 opr_sz, opr_sz, 0, fn);
5669 return 0;
5670 }
5671 return 1;
5672}
5673
eabcd6fa
RH
5674/*
5675 * Expanders for VBitOps_VBIF, VBIT, VBSL.
5676 */
5677static void gen_bsl_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
5678{
5679 tcg_gen_xor_i64(rn, rn, rm);
5680 tcg_gen_and_i64(rn, rn, rd);
5681 tcg_gen_xor_i64(rd, rm, rn);
5682}
5683
5684static void gen_bit_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
5685{
5686 tcg_gen_xor_i64(rn, rn, rd);
5687 tcg_gen_and_i64(rn, rn, rm);
5688 tcg_gen_xor_i64(rd, rd, rn);
5689}
5690
5691static void gen_bif_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
5692{
5693 tcg_gen_xor_i64(rn, rn, rd);
5694 tcg_gen_andc_i64(rn, rn, rm);
5695 tcg_gen_xor_i64(rd, rd, rn);
5696}
5697
5698static void gen_bsl_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
5699{
5700 tcg_gen_xor_vec(vece, rn, rn, rm);
5701 tcg_gen_and_vec(vece, rn, rn, rd);
5702 tcg_gen_xor_vec(vece, rd, rm, rn);
5703}
5704
5705static void gen_bit_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
5706{
5707 tcg_gen_xor_vec(vece, rn, rn, rd);
5708 tcg_gen_and_vec(vece, rn, rn, rm);
5709 tcg_gen_xor_vec(vece, rd, rd, rn);
5710}
5711
5712static void gen_bif_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
5713{
5714 tcg_gen_xor_vec(vece, rn, rn, rd);
5715 tcg_gen_andc_vec(vece, rn, rn, rm);
5716 tcg_gen_xor_vec(vece, rd, rd, rn);
5717}
5718
5719const GVecGen3 bsl_op = {
5720 .fni8 = gen_bsl_i64,
5721 .fniv = gen_bsl_vec,
5722 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5723 .load_dest = true
5724};
5725
5726const GVecGen3 bit_op = {
5727 .fni8 = gen_bit_i64,
5728 .fniv = gen_bit_vec,
5729 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5730 .load_dest = true
5731};
5732
5733const GVecGen3 bif_op = {
5734 .fni8 = gen_bif_i64,
5735 .fniv = gen_bif_vec,
5736 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5737 .load_dest = true
5738};
5739
41f6c113
RH
5740static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5741{
5742 tcg_gen_vec_sar8i_i64(a, a, shift);
5743 tcg_gen_vec_add8_i64(d, d, a);
5744}
5745
5746static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5747{
5748 tcg_gen_vec_sar16i_i64(a, a, shift);
5749 tcg_gen_vec_add16_i64(d, d, a);
5750}
5751
5752static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5753{
5754 tcg_gen_sari_i32(a, a, shift);
5755 tcg_gen_add_i32(d, d, a);
5756}
5757
5758static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5759{
5760 tcg_gen_sari_i64(a, a, shift);
5761 tcg_gen_add_i64(d, d, a);
5762}
5763
5764static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5765{
5766 tcg_gen_sari_vec(vece, a, a, sh);
5767 tcg_gen_add_vec(vece, d, d, a);
5768}
5769
5770const GVecGen2i ssra_op[4] = {
5771 { .fni8 = gen_ssra8_i64,
5772 .fniv = gen_ssra_vec,
5773 .load_dest = true,
5774 .opc = INDEX_op_sari_vec,
5775 .vece = MO_8 },
5776 { .fni8 = gen_ssra16_i64,
5777 .fniv = gen_ssra_vec,
5778 .load_dest = true,
5779 .opc = INDEX_op_sari_vec,
5780 .vece = MO_16 },
5781 { .fni4 = gen_ssra32_i32,
5782 .fniv = gen_ssra_vec,
5783 .load_dest = true,
5784 .opc = INDEX_op_sari_vec,
5785 .vece = MO_32 },
5786 { .fni8 = gen_ssra64_i64,
5787 .fniv = gen_ssra_vec,
5788 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5789 .load_dest = true,
5790 .opc = INDEX_op_sari_vec,
5791 .vece = MO_64 },
5792};
5793
5794static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5795{
5796 tcg_gen_vec_shr8i_i64(a, a, shift);
5797 tcg_gen_vec_add8_i64(d, d, a);
5798}
5799
5800static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5801{
5802 tcg_gen_vec_shr16i_i64(a, a, shift);
5803 tcg_gen_vec_add16_i64(d, d, a);
5804}
5805
5806static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5807{
5808 tcg_gen_shri_i32(a, a, shift);
5809 tcg_gen_add_i32(d, d, a);
5810}
5811
5812static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5813{
5814 tcg_gen_shri_i64(a, a, shift);
5815 tcg_gen_add_i64(d, d, a);
5816}
5817
5818static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5819{
5820 tcg_gen_shri_vec(vece, a, a, sh);
5821 tcg_gen_add_vec(vece, d, d, a);
5822}
5823
5824const GVecGen2i usra_op[4] = {
5825 { .fni8 = gen_usra8_i64,
5826 .fniv = gen_usra_vec,
5827 .load_dest = true,
5828 .opc = INDEX_op_shri_vec,
5829 .vece = MO_8, },
5830 { .fni8 = gen_usra16_i64,
5831 .fniv = gen_usra_vec,
5832 .load_dest = true,
5833 .opc = INDEX_op_shri_vec,
5834 .vece = MO_16, },
5835 { .fni4 = gen_usra32_i32,
5836 .fniv = gen_usra_vec,
5837 .load_dest = true,
5838 .opc = INDEX_op_shri_vec,
5839 .vece = MO_32, },
5840 { .fni8 = gen_usra64_i64,
5841 .fniv = gen_usra_vec,
5842 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5843 .load_dest = true,
5844 .opc = INDEX_op_shri_vec,
5845 .vece = MO_64, },
5846};
eabcd6fa 5847
f3cd8218
RH
5848static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5849{
5850 uint64_t mask = dup_const(MO_8, 0xff >> shift);
5851 TCGv_i64 t = tcg_temp_new_i64();
5852
5853 tcg_gen_shri_i64(t, a, shift);
5854 tcg_gen_andi_i64(t, t, mask);
5855 tcg_gen_andi_i64(d, d, ~mask);
5856 tcg_gen_or_i64(d, d, t);
5857 tcg_temp_free_i64(t);
5858}
5859
5860static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5861{
5862 uint64_t mask = dup_const(MO_16, 0xffff >> shift);
5863 TCGv_i64 t = tcg_temp_new_i64();
5864
5865 tcg_gen_shri_i64(t, a, shift);
5866 tcg_gen_andi_i64(t, t, mask);
5867 tcg_gen_andi_i64(d, d, ~mask);
5868 tcg_gen_or_i64(d, d, t);
5869 tcg_temp_free_i64(t);
5870}
5871
5872static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5873{
5874 tcg_gen_shri_i32(a, a, shift);
5875 tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
5876}
5877
5878static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5879{
5880 tcg_gen_shri_i64(a, a, shift);
5881 tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
5882}
5883
5884static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5885{
5886 if (sh == 0) {
5887 tcg_gen_mov_vec(d, a);
5888 } else {
5889 TCGv_vec t = tcg_temp_new_vec_matching(d);
5890 TCGv_vec m = tcg_temp_new_vec_matching(d);
5891
5892 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
5893 tcg_gen_shri_vec(vece, t, a, sh);
5894 tcg_gen_and_vec(vece, d, d, m);
5895 tcg_gen_or_vec(vece, d, d, t);
5896
5897 tcg_temp_free_vec(t);
5898 tcg_temp_free_vec(m);
5899 }
5900}
5901
5902const GVecGen2i sri_op[4] = {
5903 { .fni8 = gen_shr8_ins_i64,
5904 .fniv = gen_shr_ins_vec,
5905 .load_dest = true,
5906 .opc = INDEX_op_shri_vec,
5907 .vece = MO_8 },
5908 { .fni8 = gen_shr16_ins_i64,
5909 .fniv = gen_shr_ins_vec,
5910 .load_dest = true,
5911 .opc = INDEX_op_shri_vec,
5912 .vece = MO_16 },
5913 { .fni4 = gen_shr32_ins_i32,
5914 .fniv = gen_shr_ins_vec,
5915 .load_dest = true,
5916 .opc = INDEX_op_shri_vec,
5917 .vece = MO_32 },
5918 { .fni8 = gen_shr64_ins_i64,
5919 .fniv = gen_shr_ins_vec,
5920 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5921 .load_dest = true,
5922 .opc = INDEX_op_shri_vec,
5923 .vece = MO_64 },
5924};
5925
5926static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5927{
5928 uint64_t mask = dup_const(MO_8, 0xff << shift);
5929 TCGv_i64 t = tcg_temp_new_i64();
5930
5931 tcg_gen_shli_i64(t, a, shift);
5932 tcg_gen_andi_i64(t, t, mask);
5933 tcg_gen_andi_i64(d, d, ~mask);
5934 tcg_gen_or_i64(d, d, t);
5935 tcg_temp_free_i64(t);
5936}
5937
5938static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5939{
5940 uint64_t mask = dup_const(MO_16, 0xffff << shift);
5941 TCGv_i64 t = tcg_temp_new_i64();
5942
5943 tcg_gen_shli_i64(t, a, shift);
5944 tcg_gen_andi_i64(t, t, mask);
5945 tcg_gen_andi_i64(d, d, ~mask);
5946 tcg_gen_or_i64(d, d, t);
5947 tcg_temp_free_i64(t);
5948}
5949
5950static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
5951{
5952 tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
5953}
5954
5955static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
5956{
5957 tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
5958}
5959
5960static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
5961{
5962 if (sh == 0) {
5963 tcg_gen_mov_vec(d, a);
5964 } else {
5965 TCGv_vec t = tcg_temp_new_vec_matching(d);
5966 TCGv_vec m = tcg_temp_new_vec_matching(d);
5967
5968 tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
5969 tcg_gen_shli_vec(vece, t, a, sh);
5970 tcg_gen_and_vec(vece, d, d, m);
5971 tcg_gen_or_vec(vece, d, d, t);
5972
5973 tcg_temp_free_vec(t);
5974 tcg_temp_free_vec(m);
5975 }
5976}
5977
5978const GVecGen2i sli_op[4] = {
5979 { .fni8 = gen_shl8_ins_i64,
5980 .fniv = gen_shl_ins_vec,
5981 .load_dest = true,
5982 .opc = INDEX_op_shli_vec,
5983 .vece = MO_8 },
5984 { .fni8 = gen_shl16_ins_i64,
5985 .fniv = gen_shl_ins_vec,
5986 .load_dest = true,
5987 .opc = INDEX_op_shli_vec,
5988 .vece = MO_16 },
5989 { .fni4 = gen_shl32_ins_i32,
5990 .fniv = gen_shl_ins_vec,
5991 .load_dest = true,
5992 .opc = INDEX_op_shli_vec,
5993 .vece = MO_32 },
5994 { .fni8 = gen_shl64_ins_i64,
5995 .fniv = gen_shl_ins_vec,
5996 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
5997 .load_dest = true,
5998 .opc = INDEX_op_shli_vec,
5999 .vece = MO_64 },
6000};
6001
4a7832b0
RH
6002static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6003{
6004 gen_helper_neon_mul_u8(a, a, b);
6005 gen_helper_neon_add_u8(d, d, a);
6006}
6007
6008static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6009{
6010 gen_helper_neon_mul_u8(a, a, b);
6011 gen_helper_neon_sub_u8(d, d, a);
6012}
6013
6014static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6015{
6016 gen_helper_neon_mul_u16(a, a, b);
6017 gen_helper_neon_add_u16(d, d, a);
6018}
6019
6020static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6021{
6022 gen_helper_neon_mul_u16(a, a, b);
6023 gen_helper_neon_sub_u16(d, d, a);
6024}
6025
6026static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6027{
6028 tcg_gen_mul_i32(a, a, b);
6029 tcg_gen_add_i32(d, d, a);
6030}
6031
6032static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6033{
6034 tcg_gen_mul_i32(a, a, b);
6035 tcg_gen_sub_i32(d, d, a);
6036}
6037
6038static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
6039{
6040 tcg_gen_mul_i64(a, a, b);
6041 tcg_gen_add_i64(d, d, a);
6042}
6043
6044static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
6045{
6046 tcg_gen_mul_i64(a, a, b);
6047 tcg_gen_sub_i64(d, d, a);
6048}
6049
6050static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
6051{
6052 tcg_gen_mul_vec(vece, a, a, b);
6053 tcg_gen_add_vec(vece, d, d, a);
6054}
6055
6056static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
6057{
6058 tcg_gen_mul_vec(vece, a, a, b);
6059 tcg_gen_sub_vec(vece, d, d, a);
6060}
6061
6062/* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
6063 * these tables are shared with AArch64 which does support them.
6064 */
6065const GVecGen3 mla_op[4] = {
6066 { .fni4 = gen_mla8_i32,
6067 .fniv = gen_mla_vec,
6068 .opc = INDEX_op_mul_vec,
6069 .load_dest = true,
6070 .vece = MO_8 },
6071 { .fni4 = gen_mla16_i32,
6072 .fniv = gen_mla_vec,
6073 .opc = INDEX_op_mul_vec,
6074 .load_dest = true,
6075 .vece = MO_16 },
6076 { .fni4 = gen_mla32_i32,
6077 .fniv = gen_mla_vec,
6078 .opc = INDEX_op_mul_vec,
6079 .load_dest = true,
6080 .vece = MO_32 },
6081 { .fni8 = gen_mla64_i64,
6082 .fniv = gen_mla_vec,
6083 .opc = INDEX_op_mul_vec,
6084 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
6085 .load_dest = true,
6086 .vece = MO_64 },
6087};
6088
6089const GVecGen3 mls_op[4] = {
6090 { .fni4 = gen_mls8_i32,
6091 .fniv = gen_mls_vec,
6092 .opc = INDEX_op_mul_vec,
6093 .load_dest = true,
6094 .vece = MO_8 },
6095 { .fni4 = gen_mls16_i32,
6096 .fniv = gen_mls_vec,
6097 .opc = INDEX_op_mul_vec,
6098 .load_dest = true,
6099 .vece = MO_16 },
6100 { .fni4 = gen_mls32_i32,
6101 .fniv = gen_mls_vec,
6102 .opc = INDEX_op_mul_vec,
6103 .load_dest = true,
6104 .vece = MO_32 },
6105 { .fni8 = gen_mls64_i64,
6106 .fniv = gen_mls_vec,
6107 .opc = INDEX_op_mul_vec,
6108 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
6109 .load_dest = true,
6110 .vece = MO_64 },
6111};
6112
ea580fa3
RH
6113/* CMTST : test is "if (X & Y != 0)". */
6114static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
6115{
6116 tcg_gen_and_i32(d, a, b);
6117 tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
6118 tcg_gen_neg_i32(d, d);
6119}
6120
6121void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
6122{
6123 tcg_gen_and_i64(d, a, b);
6124 tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
6125 tcg_gen_neg_i64(d, d);
6126}
6127
6128static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
6129{
6130 tcg_gen_and_vec(vece, d, a, b);
6131 tcg_gen_dupi_vec(vece, a, 0);
6132 tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
6133}
6134
6135const GVecGen3 cmtst_op[4] = {
6136 { .fni4 = gen_helper_neon_tst_u8,
6137 .fniv = gen_cmtst_vec,
6138 .vece = MO_8 },
6139 { .fni4 = gen_helper_neon_tst_u16,
6140 .fniv = gen_cmtst_vec,
6141 .vece = MO_16 },
6142 { .fni4 = gen_cmtst_i32,
6143 .fniv = gen_cmtst_vec,
6144 .vece = MO_32 },
6145 { .fni8 = gen_cmtst_i64,
6146 .fniv = gen_cmtst_vec,
6147 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
6148 .vece = MO_64 },
6149};
6150
9ee6e8bb
PB
6151/* Translate a NEON data processing instruction. Return nonzero if the
6152 instruction is invalid.
ad69471c
PB
6153 We process data in a mixture of 32-bit and 64-bit chunks.
6154 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 6155
7dcc1f89 6156static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
6157{
6158 int op;
6159 int q;
eabcd6fa 6160 int rd, rn, rm, rd_ofs, rn_ofs, rm_ofs;
9ee6e8bb
PB
6161 int size;
6162 int shift;
6163 int pass;
6164 int count;
6165 int pairwise;
6166 int u;
eabcd6fa 6167 int vec_size;
f3cd8218 6168 uint32_t imm;
39d5492a 6169 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
1a66ac61 6170 TCGv_ptr ptr1, ptr2, ptr3;
a7812ae4 6171 TCGv_i64 tmp64;
9ee6e8bb 6172
2c7ffc41
PM
6173 /* FIXME: this access check should not take precedence over UNDEF
6174 * for invalid encodings; we will generate incorrect syndrome information
6175 * for attempts to execute invalid vfp/neon encodings with FP disabled.
6176 */
9dbbc748 6177 if (s->fp_excp_el) {
2c7ffc41 6178 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 6179 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
6180 return 0;
6181 }
6182
5df8bac1 6183 if (!s->vfp_enabled)
9ee6e8bb
PB
6184 return 1;
6185 q = (insn & (1 << 6)) != 0;
6186 u = (insn >> 24) & 1;
6187 VFP_DREG_D(rd, insn);
6188 VFP_DREG_N(rn, insn);
6189 VFP_DREG_M(rm, insn);
6190 size = (insn >> 20) & 3;
eabcd6fa
RH
6191 vec_size = q ? 16 : 8;
6192 rd_ofs = neon_reg_offset(rd, 0);
6193 rn_ofs = neon_reg_offset(rn, 0);
6194 rm_ofs = neon_reg_offset(rm, 0);
6195
9ee6e8bb
PB
6196 if ((insn & (1 << 23)) == 0) {
6197 /* Three register same length. */
6198 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
6199 /* Catch invalid op and bad size combinations: UNDEF */
6200 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
6201 return 1;
6202 }
25f84f79
PM
6203 /* All insns of this form UNDEF for either this condition or the
6204 * superset of cases "Q==1"; we catch the latter later.
6205 */
6206 if (q && ((rd | rn | rm) & 1)) {
6207 return 1;
6208 }
36a71934
RH
6209 switch (op) {
6210 case NEON_3R_SHA:
6211 /* The SHA-1/SHA-256 3-register instructions require special
6212 * treatment here, as their size field is overloaded as an
6213 * op type selector, and they all consume their input in a
6214 * single pass.
6215 */
f1ecb913
AB
6216 if (!q) {
6217 return 1;
6218 }
6219 if (!u) { /* SHA-1 */
962fcbf2 6220 if (!dc_isar_feature(aa32_sha1, s)) {
f1ecb913
AB
6221 return 1;
6222 }
1a66ac61
RH
6223 ptr1 = vfp_reg_ptr(true, rd);
6224 ptr2 = vfp_reg_ptr(true, rn);
6225 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913 6226 tmp4 = tcg_const_i32(size);
1a66ac61 6227 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
f1ecb913
AB
6228 tcg_temp_free_i32(tmp4);
6229 } else { /* SHA-256 */
962fcbf2 6230 if (!dc_isar_feature(aa32_sha2, s) || size == 3) {
f1ecb913
AB
6231 return 1;
6232 }
1a66ac61
RH
6233 ptr1 = vfp_reg_ptr(true, rd);
6234 ptr2 = vfp_reg_ptr(true, rn);
6235 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913
AB
6236 switch (size) {
6237 case 0:
1a66ac61 6238 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
f1ecb913
AB
6239 break;
6240 case 1:
1a66ac61 6241 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
f1ecb913
AB
6242 break;
6243 case 2:
1a66ac61 6244 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
f1ecb913
AB
6245 break;
6246 }
6247 }
1a66ac61
RH
6248 tcg_temp_free_ptr(ptr1);
6249 tcg_temp_free_ptr(ptr2);
6250 tcg_temp_free_ptr(ptr3);
f1ecb913 6251 return 0;
36a71934
RH
6252
6253 case NEON_3R_VPADD_VQRDMLAH:
6254 if (!u) {
6255 break; /* VPADD */
6256 }
6257 /* VQRDMLAH */
6258 switch (size) {
6259 case 1:
6260 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s16,
6261 q, rd, rn, rm);
6262 case 2:
6263 return do_v81_helper(s, gen_helper_gvec_qrdmlah_s32,
6264 q, rd, rn, rm);
6265 }
6266 return 1;
6267
6268 case NEON_3R_VFM_VQRDMLSH:
6269 if (!u) {
6270 /* VFM, VFMS */
6271 if (size == 1) {
6272 return 1;
6273 }
6274 break;
6275 }
6276 /* VQRDMLSH */
6277 switch (size) {
6278 case 1:
6279 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s16,
6280 q, rd, rn, rm);
6281 case 2:
6282 return do_v81_helper(s, gen_helper_gvec_qrdmlsh_s32,
6283 q, rd, rn, rm);
6284 }
6285 return 1;
eabcd6fa
RH
6286
6287 case NEON_3R_LOGIC: /* Logic ops. */
6288 switch ((u << 2) | size) {
6289 case 0: /* VAND */
6290 tcg_gen_gvec_and(0, rd_ofs, rn_ofs, rm_ofs,
6291 vec_size, vec_size);
6292 break;
6293 case 1: /* VBIC */
6294 tcg_gen_gvec_andc(0, rd_ofs, rn_ofs, rm_ofs,
6295 vec_size, vec_size);
6296 break;
6297 case 2:
6298 if (rn == rm) {
6299 /* VMOV */
6300 tcg_gen_gvec_mov(0, rd_ofs, rn_ofs, vec_size, vec_size);
6301 } else {
6302 /* VORR */
6303 tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs,
6304 vec_size, vec_size);
6305 }
6306 break;
6307 case 3: /* VORN */
6308 tcg_gen_gvec_orc(0, rd_ofs, rn_ofs, rm_ofs,
6309 vec_size, vec_size);
6310 break;
6311 case 4: /* VEOR */
6312 tcg_gen_gvec_xor(0, rd_ofs, rn_ofs, rm_ofs,
6313 vec_size, vec_size);
6314 break;
6315 case 5: /* VBSL */
6316 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
6317 vec_size, vec_size, &bsl_op);
6318 break;
6319 case 6: /* VBIT */
6320 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
6321 vec_size, vec_size, &bit_op);
6322 break;
6323 case 7: /* VBIF */
6324 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
6325 vec_size, vec_size, &bif_op);
6326 break;
6327 }
6328 return 0;
e4717ae0
RH
6329
6330 case NEON_3R_VADD_VSUB:
6331 if (u) {
6332 tcg_gen_gvec_sub(size, rd_ofs, rn_ofs, rm_ofs,
6333 vec_size, vec_size);
6334 } else {
6335 tcg_gen_gvec_add(size, rd_ofs, rn_ofs, rm_ofs,
6336 vec_size, vec_size);
6337 }
6338 return 0;
82083184
RH
6339
6340 case NEON_3R_VMUL: /* VMUL */
6341 if (u) {
6342 /* Polynomial case allows only P8 and is handled below. */
6343 if (size != 0) {
6344 return 1;
6345 }
6346 } else {
6347 tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs,
6348 vec_size, vec_size);
6349 return 0;
6350 }
6351 break;
4a7832b0
RH
6352
6353 case NEON_3R_VML: /* VMLA, VMLS */
6354 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
6355 u ? &mls_op[size] : &mla_op[size]);
6356 return 0;
ea580fa3
RH
6357
6358 case NEON_3R_VTST_VCEQ:
6359 if (u) { /* VCEQ */
6360 tcg_gen_gvec_cmp(TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs,
6361 vec_size, vec_size);
6362 } else { /* VTST */
6363 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
6364 vec_size, vec_size, &cmtst_op[size]);
6365 }
6366 return 0;
6367
6368 case NEON_3R_VCGT:
6369 tcg_gen_gvec_cmp(u ? TCG_COND_GTU : TCG_COND_GT, size,
6370 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
6371 return 0;
6372
6373 case NEON_3R_VCGE:
6374 tcg_gen_gvec_cmp(u ? TCG_COND_GEU : TCG_COND_GE, size,
6375 rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
6376 return 0;
f1ecb913 6377 }
4a7832b0 6378
eabcd6fa 6379 if (size == 3) {
62698be3 6380 /* 64-bit element instructions. */
9ee6e8bb 6381 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
6382 neon_load_reg64(cpu_V0, rn + pass);
6383 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 6384 switch (op) {
62698be3 6385 case NEON_3R_VQADD:
9ee6e8bb 6386 if (u) {
02da0b2d
PM
6387 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
6388 cpu_V0, cpu_V1);
2c0262af 6389 } else {
02da0b2d
PM
6390 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
6391 cpu_V0, cpu_V1);
2c0262af 6392 }
9ee6e8bb 6393 break;
62698be3 6394 case NEON_3R_VQSUB:
9ee6e8bb 6395 if (u) {
02da0b2d
PM
6396 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
6397 cpu_V0, cpu_V1);
ad69471c 6398 } else {
02da0b2d
PM
6399 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
6400 cpu_V0, cpu_V1);
ad69471c
PB
6401 }
6402 break;
62698be3 6403 case NEON_3R_VSHL:
ad69471c
PB
6404 if (u) {
6405 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
6406 } else {
6407 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
6408 }
6409 break;
62698be3 6410 case NEON_3R_VQSHL:
ad69471c 6411 if (u) {
02da0b2d
PM
6412 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
6413 cpu_V1, cpu_V0);
ad69471c 6414 } else {
02da0b2d
PM
6415 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
6416 cpu_V1, cpu_V0);
ad69471c
PB
6417 }
6418 break;
62698be3 6419 case NEON_3R_VRSHL:
ad69471c
PB
6420 if (u) {
6421 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 6422 } else {
ad69471c
PB
6423 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
6424 }
6425 break;
62698be3 6426 case NEON_3R_VQRSHL:
ad69471c 6427 if (u) {
02da0b2d
PM
6428 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
6429 cpu_V1, cpu_V0);
ad69471c 6430 } else {
02da0b2d
PM
6431 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
6432 cpu_V1, cpu_V0);
1e8d4eec 6433 }
9ee6e8bb 6434 break;
9ee6e8bb
PB
6435 default:
6436 abort();
2c0262af 6437 }
ad69471c 6438 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 6439 }
9ee6e8bb 6440 return 0;
2c0262af 6441 }
25f84f79 6442 pairwise = 0;
9ee6e8bb 6443 switch (op) {
62698be3
PM
6444 case NEON_3R_VSHL:
6445 case NEON_3R_VQSHL:
6446 case NEON_3R_VRSHL:
6447 case NEON_3R_VQRSHL:
9ee6e8bb 6448 {
ad69471c
PB
6449 int rtmp;
6450 /* Shift instruction operands are reversed. */
6451 rtmp = rn;
9ee6e8bb 6452 rn = rm;
ad69471c 6453 rm = rtmp;
9ee6e8bb 6454 }
2c0262af 6455 break;
36a71934 6456 case NEON_3R_VPADD_VQRDMLAH:
62698be3
PM
6457 case NEON_3R_VPMAX:
6458 case NEON_3R_VPMIN:
9ee6e8bb 6459 pairwise = 1;
2c0262af 6460 break;
25f84f79
PM
6461 case NEON_3R_FLOAT_ARITH:
6462 pairwise = (u && size < 2); /* if VPADD (float) */
6463 break;
6464 case NEON_3R_FLOAT_MINMAX:
6465 pairwise = u; /* if VPMIN/VPMAX (float) */
6466 break;
6467 case NEON_3R_FLOAT_CMP:
6468 if (!u && size) {
6469 /* no encoding for U=0 C=1x */
6470 return 1;
6471 }
6472 break;
6473 case NEON_3R_FLOAT_ACMP:
6474 if (!u) {
6475 return 1;
6476 }
6477 break;
505935fc
WN
6478 case NEON_3R_FLOAT_MISC:
6479 /* VMAXNM/VMINNM in ARMv8 */
d614a513 6480 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
6481 return 1;
6482 }
2c0262af 6483 break;
36a71934
RH
6484 case NEON_3R_VFM_VQRDMLSH:
6485 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
6486 return 1;
6487 }
6488 break;
9ee6e8bb 6489 default:
2c0262af 6490 break;
9ee6e8bb 6491 }
dd8fbd78 6492
25f84f79
PM
6493 if (pairwise && q) {
6494 /* All the pairwise insns UNDEF if Q is set */
6495 return 1;
6496 }
6497
9ee6e8bb
PB
6498 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6499
6500 if (pairwise) {
6501 /* Pairwise. */
a5a14945
JR
6502 if (pass < 1) {
6503 tmp = neon_load_reg(rn, 0);
6504 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 6505 } else {
a5a14945
JR
6506 tmp = neon_load_reg(rm, 0);
6507 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
6508 }
6509 } else {
6510 /* Elementwise. */
dd8fbd78
FN
6511 tmp = neon_load_reg(rn, pass);
6512 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
6513 }
6514 switch (op) {
62698be3 6515 case NEON_3R_VHADD:
9ee6e8bb
PB
6516 GEN_NEON_INTEGER_OP(hadd);
6517 break;
62698be3 6518 case NEON_3R_VQADD:
02da0b2d 6519 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 6520 break;
62698be3 6521 case NEON_3R_VRHADD:
9ee6e8bb 6522 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 6523 break;
62698be3 6524 case NEON_3R_VHSUB:
9ee6e8bb
PB
6525 GEN_NEON_INTEGER_OP(hsub);
6526 break;
62698be3 6527 case NEON_3R_VQSUB:
02da0b2d 6528 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 6529 break;
62698be3 6530 case NEON_3R_VSHL:
ad69471c 6531 GEN_NEON_INTEGER_OP(shl);
2c0262af 6532 break;
62698be3 6533 case NEON_3R_VQSHL:
02da0b2d 6534 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 6535 break;
62698be3 6536 case NEON_3R_VRSHL:
ad69471c 6537 GEN_NEON_INTEGER_OP(rshl);
2c0262af 6538 break;
62698be3 6539 case NEON_3R_VQRSHL:
02da0b2d 6540 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 6541 break;
62698be3 6542 case NEON_3R_VMAX:
9ee6e8bb
PB
6543 GEN_NEON_INTEGER_OP(max);
6544 break;
62698be3 6545 case NEON_3R_VMIN:
9ee6e8bb
PB
6546 GEN_NEON_INTEGER_OP(min);
6547 break;
62698be3 6548 case NEON_3R_VABD:
9ee6e8bb
PB
6549 GEN_NEON_INTEGER_OP(abd);
6550 break;
62698be3 6551 case NEON_3R_VABA:
9ee6e8bb 6552 GEN_NEON_INTEGER_OP(abd);
7d1b0095 6553 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
6554 tmp2 = neon_load_reg(rd, pass);
6555 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 6556 break;
62698be3 6557 case NEON_3R_VMUL:
82083184
RH
6558 /* VMUL.P8; other cases already eliminated. */
6559 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb 6560 break;
62698be3 6561 case NEON_3R_VPMAX:
9ee6e8bb
PB
6562 GEN_NEON_INTEGER_OP(pmax);
6563 break;
62698be3 6564 case NEON_3R_VPMIN:
9ee6e8bb
PB
6565 GEN_NEON_INTEGER_OP(pmin);
6566 break;
62698be3 6567 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
6568 if (!u) { /* VQDMULH */
6569 switch (size) {
02da0b2d
PM
6570 case 1:
6571 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
6572 break;
6573 case 2:
6574 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
6575 break;
62698be3 6576 default: abort();
9ee6e8bb 6577 }
62698be3 6578 } else { /* VQRDMULH */
9ee6e8bb 6579 switch (size) {
02da0b2d
PM
6580 case 1:
6581 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
6582 break;
6583 case 2:
6584 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6585 break;
62698be3 6586 default: abort();
9ee6e8bb
PB
6587 }
6588 }
6589 break;
36a71934 6590 case NEON_3R_VPADD_VQRDMLAH:
9ee6e8bb 6591 switch (size) {
dd8fbd78
FN
6592 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
6593 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
6594 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 6595 default: abort();
9ee6e8bb
PB
6596 }
6597 break;
62698be3 6598 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
6599 {
6600 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
6601 switch ((u << 2) | size) {
6602 case 0: /* VADD */
aa47cfdd
PM
6603 case 4: /* VPADD */
6604 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6605 break;
6606 case 2: /* VSUB */
aa47cfdd 6607 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6608 break;
6609 case 6: /* VABD */
aa47cfdd 6610 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6611 break;
6612 default:
62698be3 6613 abort();
9ee6e8bb 6614 }
aa47cfdd 6615 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6616 break;
aa47cfdd 6617 }
62698be3 6618 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
6619 {
6620 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6621 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6622 if (!u) {
7d1b0095 6623 tcg_temp_free_i32(tmp2);
dd8fbd78 6624 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6625 if (size == 0) {
aa47cfdd 6626 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6627 } else {
aa47cfdd 6628 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
6629 }
6630 }
aa47cfdd 6631 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6632 break;
aa47cfdd 6633 }
62698be3 6634 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
6635 {
6636 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 6637 if (!u) {
aa47cfdd 6638 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 6639 } else {
aa47cfdd
PM
6640 if (size == 0) {
6641 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6642 } else {
6643 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6644 }
b5ff1b31 6645 }
aa47cfdd 6646 tcg_temp_free_ptr(fpstatus);
2c0262af 6647 break;
aa47cfdd 6648 }
62698be3 6649 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
6650 {
6651 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6652 if (size == 0) {
6653 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
6654 } else {
6655 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
6656 }
6657 tcg_temp_free_ptr(fpstatus);
2c0262af 6658 break;
aa47cfdd 6659 }
62698be3 6660 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
6661 {
6662 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6663 if (size == 0) {
f71a2ae5 6664 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 6665 } else {
f71a2ae5 6666 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
6667 }
6668 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6669 break;
aa47cfdd 6670 }
505935fc
WN
6671 case NEON_3R_FLOAT_MISC:
6672 if (u) {
6673 /* VMAXNM/VMINNM */
6674 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6675 if (size == 0) {
f71a2ae5 6676 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 6677 } else {
f71a2ae5 6678 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
6679 }
6680 tcg_temp_free_ptr(fpstatus);
6681 } else {
6682 if (size == 0) {
6683 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
6684 } else {
6685 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
6686 }
6687 }
2c0262af 6688 break;
36a71934 6689 case NEON_3R_VFM_VQRDMLSH:
da97f52c
PM
6690 {
6691 /* VFMA, VFMS: fused multiply-add */
6692 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6693 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
6694 if (size) {
6695 /* VFMS */
6696 gen_helper_vfp_negs(tmp, tmp);
6697 }
6698 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
6699 tcg_temp_free_i32(tmp3);
6700 tcg_temp_free_ptr(fpstatus);
6701 break;
6702 }
9ee6e8bb
PB
6703 default:
6704 abort();
2c0262af 6705 }
7d1b0095 6706 tcg_temp_free_i32(tmp2);
dd8fbd78 6707
9ee6e8bb
PB
6708 /* Save the result. For elementwise operations we can put it
6709 straight into the destination register. For pairwise operations
6710 we have to be careful to avoid clobbering the source operands. */
6711 if (pairwise && rd == rm) {
dd8fbd78 6712 neon_store_scratch(pass, tmp);
9ee6e8bb 6713 } else {
dd8fbd78 6714 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6715 }
6716
6717 } /* for pass */
6718 if (pairwise && rd == rm) {
6719 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
6720 tmp = neon_load_scratch(pass);
6721 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6722 }
6723 }
ad69471c 6724 /* End of 3 register same size operations. */
9ee6e8bb
PB
6725 } else if (insn & (1 << 4)) {
6726 if ((insn & 0x00380080) != 0) {
6727 /* Two registers and shift. */
6728 op = (insn >> 8) & 0xf;
6729 if (insn & (1 << 7)) {
cc13115b
PM
6730 /* 64-bit shift. */
6731 if (op > 7) {
6732 return 1;
6733 }
9ee6e8bb
PB
6734 size = 3;
6735 } else {
6736 size = 2;
6737 while ((insn & (1 << (size + 19))) == 0)
6738 size--;
6739 }
6740 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
9ee6e8bb
PB
6741 if (op < 8) {
6742 /* Shift by immediate:
6743 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
6744 if (q && ((rd | rm) & 1)) {
6745 return 1;
6746 }
6747 if (!u && (op == 4 || op == 6)) {
6748 return 1;
6749 }
9ee6e8bb
PB
6750 /* Right shifts are encoded as N - shift, where N is the
6751 element size in bits. */
1dc8425e 6752 if (op <= 4) {
9ee6e8bb 6753 shift = shift - (1 << (size + 3));
1dc8425e
RH
6754 }
6755
6756 switch (op) {
6757 case 0: /* VSHR */
6758 /* Right shift comes here negative. */
6759 shift = -shift;
6760 /* Shifts larger than the element size are architecturally
6761 * valid. Unsigned results in all zeros; signed results
6762 * in all sign bits.
6763 */
6764 if (!u) {
6765 tcg_gen_gvec_sari(size, rd_ofs, rm_ofs,
6766 MIN(shift, (8 << size) - 1),
6767 vec_size, vec_size);
6768 } else if (shift >= 8 << size) {
6769 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
6770 } else {
6771 tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift,
6772 vec_size, vec_size);
6773 }
6774 return 0;
6775
41f6c113
RH
6776 case 1: /* VSRA */
6777 /* Right shift comes here negative. */
6778 shift = -shift;
6779 /* Shifts larger than the element size are architecturally
6780 * valid. Unsigned results in all zeros; signed results
6781 * in all sign bits.
6782 */
6783 if (!u) {
6784 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
6785 MIN(shift, (8 << size) - 1),
6786 &ssra_op[size]);
6787 } else if (shift >= 8 << size) {
6788 /* rd += 0 */
6789 } else {
6790 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
6791 shift, &usra_op[size]);
6792 }
6793 return 0;
6794
f3cd8218
RH
6795 case 4: /* VSRI */
6796 if (!u) {
6797 return 1;
6798 }
6799 /* Right shift comes here negative. */
6800 shift = -shift;
6801 /* Shift out of range leaves destination unchanged. */
6802 if (shift < 8 << size) {
6803 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
6804 shift, &sri_op[size]);
6805 }
6806 return 0;
6807
1dc8425e 6808 case 5: /* VSHL, VSLI */
f3cd8218
RH
6809 if (u) { /* VSLI */
6810 /* Shift out of range leaves destination unchanged. */
6811 if (shift < 8 << size) {
6812 tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size,
6813 vec_size, shift, &sli_op[size]);
6814 }
6815 } else { /* VSHL */
1dc8425e
RH
6816 /* Shifts larger than the element size are
6817 * architecturally valid and results in zero.
6818 */
6819 if (shift >= 8 << size) {
6820 tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
6821 } else {
6822 tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
6823 vec_size, vec_size);
6824 }
1dc8425e 6825 }
f3cd8218 6826 return 0;
1dc8425e
RH
6827 }
6828
9ee6e8bb
PB
6829 if (size == 3) {
6830 count = q + 1;
6831 } else {
6832 count = q ? 4: 2;
6833 }
1dc8425e
RH
6834
6835 /* To avoid excessive duplication of ops we implement shift
6836 * by immediate using the variable shift operations.
6837 */
6838 imm = dup_const(size, shift);
9ee6e8bb
PB
6839
6840 for (pass = 0; pass < count; pass++) {
ad69471c
PB
6841 if (size == 3) {
6842 neon_load_reg64(cpu_V0, rm + pass);
6843 tcg_gen_movi_i64(cpu_V1, imm);
6844 switch (op) {
ad69471c
PB
6845 case 2: /* VRSHR */
6846 case 3: /* VRSRA */
6847 if (u)
6848 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6849 else
ad69471c 6850 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6851 break;
0322b26e 6852 case 6: /* VQSHLU */
02da0b2d
PM
6853 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6854 cpu_V0, cpu_V1);
ad69471c 6855 break;
0322b26e
PM
6856 case 7: /* VQSHL */
6857 if (u) {
02da0b2d 6858 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
6859 cpu_V0, cpu_V1);
6860 } else {
02da0b2d 6861 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
6862 cpu_V0, cpu_V1);
6863 }
9ee6e8bb 6864 break;
1dc8425e
RH
6865 default:
6866 g_assert_not_reached();
9ee6e8bb 6867 }
41f6c113 6868 if (op == 3) {
ad69471c 6869 /* Accumulate. */
5371cb81 6870 neon_load_reg64(cpu_V1, rd + pass);
ad69471c 6871 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
6872 }
6873 neon_store_reg64(cpu_V0, rd + pass);
6874 } else { /* size < 3 */
6875 /* Operands in T0 and T1. */
dd8fbd78 6876 tmp = neon_load_reg(rm, pass);
7d1b0095 6877 tmp2 = tcg_temp_new_i32();
dd8fbd78 6878 tcg_gen_movi_i32(tmp2, imm);
ad69471c 6879 switch (op) {
ad69471c
PB
6880 case 2: /* VRSHR */
6881 case 3: /* VRSRA */
6882 GEN_NEON_INTEGER_OP(rshl);
6883 break;
0322b26e 6884 case 6: /* VQSHLU */
ad69471c 6885 switch (size) {
0322b26e 6886 case 0:
02da0b2d
PM
6887 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6888 tmp, tmp2);
0322b26e
PM
6889 break;
6890 case 1:
02da0b2d
PM
6891 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6892 tmp, tmp2);
0322b26e
PM
6893 break;
6894 case 2:
02da0b2d
PM
6895 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6896 tmp, tmp2);
0322b26e
PM
6897 break;
6898 default:
cc13115b 6899 abort();
ad69471c
PB
6900 }
6901 break;
0322b26e 6902 case 7: /* VQSHL */
02da0b2d 6903 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 6904 break;
1dc8425e
RH
6905 default:
6906 g_assert_not_reached();
ad69471c 6907 }
7d1b0095 6908 tcg_temp_free_i32(tmp2);
ad69471c 6909
41f6c113 6910 if (op == 3) {
ad69471c 6911 /* Accumulate. */
dd8fbd78 6912 tmp2 = neon_load_reg(rd, pass);
5371cb81 6913 gen_neon_add(size, tmp, tmp2);
7d1b0095 6914 tcg_temp_free_i32(tmp2);
ad69471c 6915 }
dd8fbd78 6916 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6917 }
6918 } /* for pass */
6919 } else if (op < 10) {
ad69471c 6920 /* Shift by immediate and narrow:
9ee6e8bb 6921 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 6922 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
6923 if (rm & 1) {
6924 return 1;
6925 }
9ee6e8bb
PB
6926 shift = shift - (1 << (size + 3));
6927 size++;
92cdfaeb 6928 if (size == 3) {
a7812ae4 6929 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
6930 neon_load_reg64(cpu_V0, rm);
6931 neon_load_reg64(cpu_V1, rm + 1);
6932 for (pass = 0; pass < 2; pass++) {
6933 TCGv_i64 in;
6934 if (pass == 0) {
6935 in = cpu_V0;
6936 } else {
6937 in = cpu_V1;
6938 }
ad69471c 6939 if (q) {
0b36f4cd 6940 if (input_unsigned) {
92cdfaeb 6941 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 6942 } else {
92cdfaeb 6943 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 6944 }
ad69471c 6945 } else {
0b36f4cd 6946 if (input_unsigned) {
92cdfaeb 6947 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 6948 } else {
92cdfaeb 6949 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 6950 }
ad69471c 6951 }
7d1b0095 6952 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6953 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6954 neon_store_reg(rd, pass, tmp);
6955 } /* for pass */
6956 tcg_temp_free_i64(tmp64);
6957 } else {
6958 if (size == 1) {
6959 imm = (uint16_t)shift;
6960 imm |= imm << 16;
2c0262af 6961 } else {
92cdfaeb
PM
6962 /* size == 2 */
6963 imm = (uint32_t)shift;
6964 }
6965 tmp2 = tcg_const_i32(imm);
6966 tmp4 = neon_load_reg(rm + 1, 0);
6967 tmp5 = neon_load_reg(rm + 1, 1);
6968 for (pass = 0; pass < 2; pass++) {
6969 if (pass == 0) {
6970 tmp = neon_load_reg(rm, 0);
6971 } else {
6972 tmp = tmp4;
6973 }
0b36f4cd
CL
6974 gen_neon_shift_narrow(size, tmp, tmp2, q,
6975 input_unsigned);
92cdfaeb
PM
6976 if (pass == 0) {
6977 tmp3 = neon_load_reg(rm, 1);
6978 } else {
6979 tmp3 = tmp5;
6980 }
0b36f4cd
CL
6981 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6982 input_unsigned);
36aa55dc 6983 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
6984 tcg_temp_free_i32(tmp);
6985 tcg_temp_free_i32(tmp3);
6986 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6987 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6988 neon_store_reg(rd, pass, tmp);
6989 } /* for pass */
c6067f04 6990 tcg_temp_free_i32(tmp2);
b75263d6 6991 }
9ee6e8bb 6992 } else if (op == 10) {
cc13115b
PM
6993 /* VSHLL, VMOVL */
6994 if (q || (rd & 1)) {
9ee6e8bb 6995 return 1;
cc13115b 6996 }
ad69471c
PB
6997 tmp = neon_load_reg(rm, 0);
6998 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6999 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7000 if (pass == 1)
7001 tmp = tmp2;
7002
7003 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 7004
9ee6e8bb
PB
7005 if (shift != 0) {
7006 /* The shift is less than the width of the source
ad69471c
PB
7007 type, so we can just shift the whole register. */
7008 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
7009 /* Widen the result of shift: we need to clear
7010 * the potential overflow bits resulting from
7011 * left bits of the narrow input appearing as
7012 * right bits of left the neighbour narrow
7013 * input. */
ad69471c
PB
7014 if (size < 2 || !u) {
7015 uint64_t imm64;
7016 if (size == 0) {
7017 imm = (0xffu >> (8 - shift));
7018 imm |= imm << 16;
acdf01ef 7019 } else if (size == 1) {
ad69471c 7020 imm = 0xffff >> (16 - shift);
acdf01ef
CL
7021 } else {
7022 /* size == 2 */
7023 imm = 0xffffffff >> (32 - shift);
7024 }
7025 if (size < 2) {
7026 imm64 = imm | (((uint64_t)imm) << 32);
7027 } else {
7028 imm64 = imm;
9ee6e8bb 7029 }
acdf01ef 7030 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
7031 }
7032 }
ad69471c 7033 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 7034 }
f73534a5 7035 } else if (op >= 14) {
9ee6e8bb 7036 /* VCVT fixed-point. */
cc13115b
PM
7037 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
7038 return 1;
7039 }
f73534a5
PM
7040 /* We have already masked out the must-be-1 top bit of imm6,
7041 * hence this 32-shift where the ARM ARM has 64-imm6.
7042 */
7043 shift = 32 - shift;
9ee6e8bb 7044 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 7045 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 7046 if (!(op & 1)) {
9ee6e8bb 7047 if (u)
5500b06c 7048 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 7049 else
5500b06c 7050 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
7051 } else {
7052 if (u)
5500b06c 7053 gen_vfp_toul(0, shift, 1);
9ee6e8bb 7054 else
5500b06c 7055 gen_vfp_tosl(0, shift, 1);
2c0262af 7056 }
4373f3ce 7057 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
7058 }
7059 } else {
9ee6e8bb
PB
7060 return 1;
7061 }
7062 } else { /* (insn & 0x00380080) == 0 */
246fa4ac
RH
7063 int invert, reg_ofs, vec_size;
7064
7d80fee5
PM
7065 if (q && (rd & 1)) {
7066 return 1;
7067 }
9ee6e8bb
PB
7068
7069 op = (insn >> 8) & 0xf;
7070 /* One register and immediate. */
7071 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
7072 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
7073 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
7074 * We choose to not special-case this and will behave as if a
7075 * valid constant encoding of 0 had been given.
7076 */
9ee6e8bb
PB
7077 switch (op) {
7078 case 0: case 1:
7079 /* no-op */
7080 break;
7081 case 2: case 3:
7082 imm <<= 8;
7083 break;
7084 case 4: case 5:
7085 imm <<= 16;
7086 break;
7087 case 6: case 7:
7088 imm <<= 24;
7089 break;
7090 case 8: case 9:
7091 imm |= imm << 16;
7092 break;
7093 case 10: case 11:
7094 imm = (imm << 8) | (imm << 24);
7095 break;
7096 case 12:
8e31209e 7097 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
7098 break;
7099 case 13:
7100 imm = (imm << 16) | 0xffff;
7101 break;
7102 case 14:
7103 imm |= (imm << 8) | (imm << 16) | (imm << 24);
246fa4ac 7104 if (invert) {
9ee6e8bb 7105 imm = ~imm;
246fa4ac 7106 }
9ee6e8bb
PB
7107 break;
7108 case 15:
7d80fee5
PM
7109 if (invert) {
7110 return 1;
7111 }
9ee6e8bb
PB
7112 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
7113 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
7114 break;
7115 }
246fa4ac 7116 if (invert) {
9ee6e8bb 7117 imm = ~imm;
246fa4ac 7118 }
9ee6e8bb 7119
246fa4ac
RH
7120 reg_ofs = neon_reg_offset(rd, 0);
7121 vec_size = q ? 16 : 8;
7122
7123 if (op & 1 && op < 12) {
7124 if (invert) {
7125 /* The immediate value has already been inverted,
7126 * so BIC becomes AND.
7127 */
7128 tcg_gen_gvec_andi(MO_32, reg_ofs, reg_ofs, imm,
7129 vec_size, vec_size);
9ee6e8bb 7130 } else {
246fa4ac
RH
7131 tcg_gen_gvec_ori(MO_32, reg_ofs, reg_ofs, imm,
7132 vec_size, vec_size);
7133 }
7134 } else {
7135 /* VMOV, VMVN. */
7136 if (op == 14 && invert) {
7137 TCGv_i64 t64 = tcg_temp_new_i64();
7138
7139 for (pass = 0; pass <= q; ++pass) {
7140 uint64_t val = 0;
a5a14945 7141 int n;
246fa4ac
RH
7142
7143 for (n = 0; n < 8; n++) {
7144 if (imm & (1 << (n + pass * 8))) {
7145 val |= 0xffull << (n * 8);
7146 }
9ee6e8bb 7147 }
246fa4ac
RH
7148 tcg_gen_movi_i64(t64, val);
7149 neon_store_reg64(t64, rd + pass);
9ee6e8bb 7150 }
246fa4ac
RH
7151 tcg_temp_free_i64(t64);
7152 } else {
7153 tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm);
9ee6e8bb
PB
7154 }
7155 }
7156 }
e4b3861d 7157 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
7158 if (size != 3) {
7159 op = (insn >> 8) & 0xf;
7160 if ((insn & (1 << 6)) == 0) {
7161 /* Three registers of different lengths. */
7162 int src1_wide;
7163 int src2_wide;
7164 int prewiden;
526d0096
PM
7165 /* undefreq: bit 0 : UNDEF if size == 0
7166 * bit 1 : UNDEF if size == 1
7167 * bit 2 : UNDEF if size == 2
7168 * bit 3 : UNDEF if U == 1
7169 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
7170 */
7171 int undefreq;
7172 /* prewiden, src1_wide, src2_wide, undefreq */
7173 static const int neon_3reg_wide[16][4] = {
7174 {1, 0, 0, 0}, /* VADDL */
7175 {1, 1, 0, 0}, /* VADDW */
7176 {1, 0, 0, 0}, /* VSUBL */
7177 {1, 1, 0, 0}, /* VSUBW */
7178 {0, 1, 1, 0}, /* VADDHN */
7179 {0, 0, 0, 0}, /* VABAL */
7180 {0, 1, 1, 0}, /* VSUBHN */
7181 {0, 0, 0, 0}, /* VABDL */
7182 {0, 0, 0, 0}, /* VMLAL */
526d0096 7183 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 7184 {0, 0, 0, 0}, /* VMLSL */
526d0096 7185 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 7186 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 7187 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 7188 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 7189 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
7190 };
7191
7192 prewiden = neon_3reg_wide[op][0];
7193 src1_wide = neon_3reg_wide[op][1];
7194 src2_wide = neon_3reg_wide[op][2];
695272dc 7195 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 7196
526d0096
PM
7197 if ((undefreq & (1 << size)) ||
7198 ((undefreq & 8) && u)) {
695272dc
PM
7199 return 1;
7200 }
7201 if ((src1_wide && (rn & 1)) ||
7202 (src2_wide && (rm & 1)) ||
7203 (!src2_wide && (rd & 1))) {
ad69471c 7204 return 1;
695272dc 7205 }
ad69471c 7206
4e624eda
PM
7207 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
7208 * outside the loop below as it only performs a single pass.
7209 */
7210 if (op == 14 && size == 2) {
7211 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
7212
962fcbf2 7213 if (!dc_isar_feature(aa32_pmull, s)) {
4e624eda
PM
7214 return 1;
7215 }
7216 tcg_rn = tcg_temp_new_i64();
7217 tcg_rm = tcg_temp_new_i64();
7218 tcg_rd = tcg_temp_new_i64();
7219 neon_load_reg64(tcg_rn, rn);
7220 neon_load_reg64(tcg_rm, rm);
7221 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
7222 neon_store_reg64(tcg_rd, rd);
7223 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
7224 neon_store_reg64(tcg_rd, rd + 1);
7225 tcg_temp_free_i64(tcg_rn);
7226 tcg_temp_free_i64(tcg_rm);
7227 tcg_temp_free_i64(tcg_rd);
7228 return 0;
7229 }
7230
9ee6e8bb
PB
7231 /* Avoid overlapping operands. Wide source operands are
7232 always aligned so will never overlap with wide
7233 destinations in problematic ways. */
8f8e3aa4 7234 if (rd == rm && !src2_wide) {
dd8fbd78
FN
7235 tmp = neon_load_reg(rm, 1);
7236 neon_store_scratch(2, tmp);
8f8e3aa4 7237 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
7238 tmp = neon_load_reg(rn, 1);
7239 neon_store_scratch(2, tmp);
9ee6e8bb 7240 }
f764718d 7241 tmp3 = NULL;
9ee6e8bb 7242 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7243 if (src1_wide) {
7244 neon_load_reg64(cpu_V0, rn + pass);
f764718d 7245 tmp = NULL;
9ee6e8bb 7246 } else {
ad69471c 7247 if (pass == 1 && rd == rn) {
dd8fbd78 7248 tmp = neon_load_scratch(2);
9ee6e8bb 7249 } else {
ad69471c
PB
7250 tmp = neon_load_reg(rn, pass);
7251 }
7252 if (prewiden) {
7253 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
7254 }
7255 }
ad69471c
PB
7256 if (src2_wide) {
7257 neon_load_reg64(cpu_V1, rm + pass);
f764718d 7258 tmp2 = NULL;
9ee6e8bb 7259 } else {
ad69471c 7260 if (pass == 1 && rd == rm) {
dd8fbd78 7261 tmp2 = neon_load_scratch(2);
9ee6e8bb 7262 } else {
ad69471c
PB
7263 tmp2 = neon_load_reg(rm, pass);
7264 }
7265 if (prewiden) {
7266 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 7267 }
9ee6e8bb
PB
7268 }
7269 switch (op) {
7270 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 7271 gen_neon_addl(size);
9ee6e8bb 7272 break;
79b0e534 7273 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 7274 gen_neon_subl(size);
9ee6e8bb
PB
7275 break;
7276 case 5: case 7: /* VABAL, VABDL */
7277 switch ((size << 1) | u) {
ad69471c
PB
7278 case 0:
7279 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
7280 break;
7281 case 1:
7282 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
7283 break;
7284 case 2:
7285 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
7286 break;
7287 case 3:
7288 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
7289 break;
7290 case 4:
7291 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
7292 break;
7293 case 5:
7294 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
7295 break;
9ee6e8bb
PB
7296 default: abort();
7297 }
7d1b0095
PM
7298 tcg_temp_free_i32(tmp2);
7299 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7300 break;
7301 case 8: case 9: case 10: case 11: case 12: case 13:
7302 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 7303 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
7304 break;
7305 case 14: /* Polynomial VMULL */
e5ca24cb 7306 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
7307 tcg_temp_free_i32(tmp2);
7308 tcg_temp_free_i32(tmp);
e5ca24cb 7309 break;
695272dc
PM
7310 default: /* 15 is RESERVED: caught earlier */
7311 abort();
9ee6e8bb 7312 }
ebcd88ce
PM
7313 if (op == 13) {
7314 /* VQDMULL */
7315 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
7316 neon_store_reg64(cpu_V0, rd + pass);
7317 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 7318 /* Accumulate. */
ebcd88ce 7319 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 7320 switch (op) {
4dc064e6
PM
7321 case 10: /* VMLSL */
7322 gen_neon_negl(cpu_V0, size);
7323 /* Fall through */
7324 case 5: case 8: /* VABAL, VMLAL */
ad69471c 7325 gen_neon_addl(size);
9ee6e8bb
PB
7326 break;
7327 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 7328 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
7329 if (op == 11) {
7330 gen_neon_negl(cpu_V0, size);
7331 }
ad69471c
PB
7332 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
7333 break;
9ee6e8bb
PB
7334 default:
7335 abort();
7336 }
ad69471c 7337 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7338 } else if (op == 4 || op == 6) {
7339 /* Narrowing operation. */
7d1b0095 7340 tmp = tcg_temp_new_i32();
79b0e534 7341 if (!u) {
9ee6e8bb 7342 switch (size) {
ad69471c
PB
7343 case 0:
7344 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
7345 break;
7346 case 1:
7347 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
7348 break;
7349 case 2:
7350 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 7351 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 7352 break;
9ee6e8bb
PB
7353 default: abort();
7354 }
7355 } else {
7356 switch (size) {
ad69471c
PB
7357 case 0:
7358 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
7359 break;
7360 case 1:
7361 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
7362 break;
7363 case 2:
7364 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
7365 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 7366 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 7367 break;
9ee6e8bb
PB
7368 default: abort();
7369 }
7370 }
ad69471c
PB
7371 if (pass == 0) {
7372 tmp3 = tmp;
7373 } else {
7374 neon_store_reg(rd, 0, tmp3);
7375 neon_store_reg(rd, 1, tmp);
7376 }
9ee6e8bb
PB
7377 } else {
7378 /* Write back the result. */
ad69471c 7379 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7380 }
7381 }
7382 } else {
3e3326df
PM
7383 /* Two registers and a scalar. NB that for ops of this form
7384 * the ARM ARM labels bit 24 as Q, but it is in our variable
7385 * 'u', not 'q'.
7386 */
7387 if (size == 0) {
7388 return 1;
7389 }
9ee6e8bb 7390 switch (op) {
9ee6e8bb 7391 case 1: /* Float VMLA scalar */
9ee6e8bb 7392 case 5: /* Floating point VMLS scalar */
9ee6e8bb 7393 case 9: /* Floating point VMUL scalar */
3e3326df
PM
7394 if (size == 1) {
7395 return 1;
7396 }
7397 /* fall through */
7398 case 0: /* Integer VMLA scalar */
7399 case 4: /* Integer VMLS scalar */
7400 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
7401 case 12: /* VQDMULH scalar */
7402 case 13: /* VQRDMULH scalar */
3e3326df
PM
7403 if (u && ((rd | rn) & 1)) {
7404 return 1;
7405 }
dd8fbd78
FN
7406 tmp = neon_get_scalar(size, rm);
7407 neon_store_scratch(0, tmp);
9ee6e8bb 7408 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
7409 tmp = neon_load_scratch(0);
7410 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
7411 if (op == 12) {
7412 if (size == 1) {
02da0b2d 7413 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 7414 } else {
02da0b2d 7415 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
7416 }
7417 } else if (op == 13) {
7418 if (size == 1) {
02da0b2d 7419 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 7420 } else {
02da0b2d 7421 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
7422 }
7423 } else if (op & 1) {
aa47cfdd
PM
7424 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7425 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
7426 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
7427 } else {
7428 switch (size) {
dd8fbd78
FN
7429 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
7430 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
7431 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 7432 default: abort();
9ee6e8bb
PB
7433 }
7434 }
7d1b0095 7435 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7436 if (op < 8) {
7437 /* Accumulate. */
dd8fbd78 7438 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
7439 switch (op) {
7440 case 0:
dd8fbd78 7441 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
7442 break;
7443 case 1:
aa47cfdd
PM
7444 {
7445 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7446 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
7447 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7448 break;
aa47cfdd 7449 }
9ee6e8bb 7450 case 4:
dd8fbd78 7451 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
7452 break;
7453 case 5:
aa47cfdd
PM
7454 {
7455 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7456 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
7457 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7458 break;
aa47cfdd 7459 }
9ee6e8bb
PB
7460 default:
7461 abort();
7462 }
7d1b0095 7463 tcg_temp_free_i32(tmp2);
9ee6e8bb 7464 }
dd8fbd78 7465 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7466 }
7467 break;
9ee6e8bb 7468 case 3: /* VQDMLAL scalar */
9ee6e8bb 7469 case 7: /* VQDMLSL scalar */
9ee6e8bb 7470 case 11: /* VQDMULL scalar */
3e3326df 7471 if (u == 1) {
ad69471c 7472 return 1;
3e3326df
PM
7473 }
7474 /* fall through */
7475 case 2: /* VMLAL sclar */
7476 case 6: /* VMLSL scalar */
7477 case 10: /* VMULL scalar */
7478 if (rd & 1) {
7479 return 1;
7480 }
dd8fbd78 7481 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
7482 /* We need a copy of tmp2 because gen_neon_mull
7483 * deletes it during pass 0. */
7d1b0095 7484 tmp4 = tcg_temp_new_i32();
c6067f04 7485 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 7486 tmp3 = neon_load_reg(rn, 1);
ad69471c 7487
9ee6e8bb 7488 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7489 if (pass == 0) {
7490 tmp = neon_load_reg(rn, 0);
9ee6e8bb 7491 } else {
dd8fbd78 7492 tmp = tmp3;
c6067f04 7493 tmp2 = tmp4;
9ee6e8bb 7494 }
ad69471c 7495 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
7496 if (op != 11) {
7497 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 7498 }
9ee6e8bb 7499 switch (op) {
4dc064e6
PM
7500 case 6:
7501 gen_neon_negl(cpu_V0, size);
7502 /* Fall through */
7503 case 2:
ad69471c 7504 gen_neon_addl(size);
9ee6e8bb
PB
7505 break;
7506 case 3: case 7:
ad69471c 7507 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
7508 if (op == 7) {
7509 gen_neon_negl(cpu_V0, size);
7510 }
ad69471c 7511 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
7512 break;
7513 case 10:
7514 /* no-op */
7515 break;
7516 case 11:
ad69471c 7517 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
7518 break;
7519 default:
7520 abort();
7521 }
ad69471c 7522 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 7523 }
61adacc8
RH
7524 break;
7525 case 14: /* VQRDMLAH scalar */
7526 case 15: /* VQRDMLSH scalar */
7527 {
7528 NeonGenThreeOpEnvFn *fn;
dd8fbd78 7529
962fcbf2 7530 if (!dc_isar_feature(aa32_rdm, s)) {
61adacc8
RH
7531 return 1;
7532 }
7533 if (u && ((rd | rn) & 1)) {
7534 return 1;
7535 }
7536 if (op == 14) {
7537 if (size == 1) {
7538 fn = gen_helper_neon_qrdmlah_s16;
7539 } else {
7540 fn = gen_helper_neon_qrdmlah_s32;
7541 }
7542 } else {
7543 if (size == 1) {
7544 fn = gen_helper_neon_qrdmlsh_s16;
7545 } else {
7546 fn = gen_helper_neon_qrdmlsh_s32;
7547 }
7548 }
dd8fbd78 7549
61adacc8
RH
7550 tmp2 = neon_get_scalar(size, rm);
7551 for (pass = 0; pass < (u ? 4 : 2); pass++) {
7552 tmp = neon_load_reg(rn, pass);
7553 tmp3 = neon_load_reg(rd, pass);
7554 fn(tmp, cpu_env, tmp, tmp2, tmp3);
7555 tcg_temp_free_i32(tmp3);
7556 neon_store_reg(rd, pass, tmp);
7557 }
7558 tcg_temp_free_i32(tmp2);
7559 }
9ee6e8bb 7560 break;
61adacc8
RH
7561 default:
7562 g_assert_not_reached();
9ee6e8bb
PB
7563 }
7564 }
7565 } else { /* size == 3 */
7566 if (!u) {
7567 /* Extract. */
9ee6e8bb 7568 imm = (insn >> 8) & 0xf;
ad69471c
PB
7569
7570 if (imm > 7 && !q)
7571 return 1;
7572
52579ea1
PM
7573 if (q && ((rd | rn | rm) & 1)) {
7574 return 1;
7575 }
7576
ad69471c
PB
7577 if (imm == 0) {
7578 neon_load_reg64(cpu_V0, rn);
7579 if (q) {
7580 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 7581 }
ad69471c
PB
7582 } else if (imm == 8) {
7583 neon_load_reg64(cpu_V0, rn + 1);
7584 if (q) {
7585 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 7586 }
ad69471c 7587 } else if (q) {
a7812ae4 7588 tmp64 = tcg_temp_new_i64();
ad69471c
PB
7589 if (imm < 8) {
7590 neon_load_reg64(cpu_V0, rn);
a7812ae4 7591 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
7592 } else {
7593 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 7594 neon_load_reg64(tmp64, rm);
ad69471c
PB
7595 }
7596 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 7597 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
7598 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7599 if (imm < 8) {
7600 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 7601 } else {
ad69471c
PB
7602 neon_load_reg64(cpu_V1, rm + 1);
7603 imm -= 8;
9ee6e8bb 7604 }
ad69471c 7605 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
7606 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
7607 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 7608 tcg_temp_free_i64(tmp64);
ad69471c 7609 } else {
a7812ae4 7610 /* BUGFIX */
ad69471c 7611 neon_load_reg64(cpu_V0, rn);
a7812ae4 7612 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 7613 neon_load_reg64(cpu_V1, rm);
a7812ae4 7614 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
7615 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
7616 }
7617 neon_store_reg64(cpu_V0, rd);
7618 if (q) {
7619 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
7620 }
7621 } else if ((insn & (1 << 11)) == 0) {
7622 /* Two register misc. */
7623 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
7624 size = (insn >> 18) & 3;
600b828c
PM
7625 /* UNDEF for unknown op values and bad op-size combinations */
7626 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
7627 return 1;
7628 }
fe8fcf3d
PM
7629 if (neon_2rm_is_v8_op(op) &&
7630 !arm_dc_feature(s, ARM_FEATURE_V8)) {
7631 return 1;
7632 }
fc2a9b37
PM
7633 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
7634 q && ((rm | rd) & 1)) {
7635 return 1;
7636 }
9ee6e8bb 7637 switch (op) {
600b828c 7638 case NEON_2RM_VREV64:
9ee6e8bb 7639 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
7640 tmp = neon_load_reg(rm, pass * 2);
7641 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 7642 switch (size) {
dd8fbd78
FN
7643 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7644 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
7645 case 2: /* no-op */ break;
7646 default: abort();
7647 }
dd8fbd78 7648 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 7649 if (size == 2) {
dd8fbd78 7650 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 7651 } else {
9ee6e8bb 7652 switch (size) {
dd8fbd78
FN
7653 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
7654 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
7655 default: abort();
7656 }
dd8fbd78 7657 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
7658 }
7659 }
7660 break;
600b828c
PM
7661 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
7662 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
7663 for (pass = 0; pass < q + 1; pass++) {
7664 tmp = neon_load_reg(rm, pass * 2);
7665 gen_neon_widen(cpu_V0, tmp, size, op & 1);
7666 tmp = neon_load_reg(rm, pass * 2 + 1);
7667 gen_neon_widen(cpu_V1, tmp, size, op & 1);
7668 switch (size) {
7669 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
7670 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
7671 case 2: tcg_gen_add_i64(CPU_V001); break;
7672 default: abort();
7673 }
600b828c 7674 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 7675 /* Accumulate. */
ad69471c
PB
7676 neon_load_reg64(cpu_V1, rd + pass);
7677 gen_neon_addl(size);
9ee6e8bb 7678 }
ad69471c 7679 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7680 }
7681 break;
600b828c 7682 case NEON_2RM_VTRN:
9ee6e8bb 7683 if (size == 2) {
a5a14945 7684 int n;
9ee6e8bb 7685 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
7686 tmp = neon_load_reg(rm, n);
7687 tmp2 = neon_load_reg(rd, n + 1);
7688 neon_store_reg(rm, n, tmp2);
7689 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
7690 }
7691 } else {
7692 goto elementwise;
7693 }
7694 break;
600b828c 7695 case NEON_2RM_VUZP:
02acedf9 7696 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 7697 return 1;
9ee6e8bb
PB
7698 }
7699 break;
600b828c 7700 case NEON_2RM_VZIP:
d68a6f3a 7701 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 7702 return 1;
9ee6e8bb
PB
7703 }
7704 break;
600b828c
PM
7705 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
7706 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
7707 if (rm & 1) {
7708 return 1;
7709 }
f764718d 7710 tmp2 = NULL;
9ee6e8bb 7711 for (pass = 0; pass < 2; pass++) {
ad69471c 7712 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 7713 tmp = tcg_temp_new_i32();
600b828c
PM
7714 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
7715 tmp, cpu_V0);
ad69471c
PB
7716 if (pass == 0) {
7717 tmp2 = tmp;
7718 } else {
7719 neon_store_reg(rd, 0, tmp2);
7720 neon_store_reg(rd, 1, tmp);
9ee6e8bb 7721 }
9ee6e8bb
PB
7722 }
7723 break;
600b828c 7724 case NEON_2RM_VSHLL:
fc2a9b37 7725 if (q || (rd & 1)) {
9ee6e8bb 7726 return 1;
600b828c 7727 }
ad69471c
PB
7728 tmp = neon_load_reg(rm, 0);
7729 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 7730 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7731 if (pass == 1)
7732 tmp = tmp2;
7733 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 7734 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 7735 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7736 }
7737 break;
600b828c 7738 case NEON_2RM_VCVT_F16_F32:
486624fc
AB
7739 {
7740 TCGv_ptr fpst;
7741 TCGv_i32 ahp;
7742
d614a513 7743 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7744 q || (rm & 1)) {
7745 return 1;
7746 }
7d1b0095
PM
7747 tmp = tcg_temp_new_i32();
7748 tmp2 = tcg_temp_new_i32();
486624fc
AB
7749 fpst = get_fpstatus_ptr(true);
7750 ahp = get_ahp_flag();
60011498 7751 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
486624fc 7752 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
60011498 7753 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
486624fc 7754 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
60011498
PB
7755 tcg_gen_shli_i32(tmp2, tmp2, 16);
7756 tcg_gen_or_i32(tmp2, tmp2, tmp);
7757 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
486624fc 7758 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, fpst, ahp);
60011498
PB
7759 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
7760 neon_store_reg(rd, 0, tmp2);
7d1b0095 7761 tmp2 = tcg_temp_new_i32();
486624fc 7762 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, fpst, ahp);
60011498
PB
7763 tcg_gen_shli_i32(tmp2, tmp2, 16);
7764 tcg_gen_or_i32(tmp2, tmp2, tmp);
7765 neon_store_reg(rd, 1, tmp2);
7d1b0095 7766 tcg_temp_free_i32(tmp);
486624fc
AB
7767 tcg_temp_free_i32(ahp);
7768 tcg_temp_free_ptr(fpst);
60011498 7769 break;
486624fc 7770 }
600b828c 7771 case NEON_2RM_VCVT_F32_F16:
486624fc
AB
7772 {
7773 TCGv_ptr fpst;
7774 TCGv_i32 ahp;
d614a513 7775 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7776 q || (rd & 1)) {
7777 return 1;
7778 }
486624fc
AB
7779 fpst = get_fpstatus_ptr(true);
7780 ahp = get_ahp_flag();
7d1b0095 7781 tmp3 = tcg_temp_new_i32();
60011498
PB
7782 tmp = neon_load_reg(rm, 0);
7783 tmp2 = neon_load_reg(rm, 1);
7784 tcg_gen_ext16u_i32(tmp3, tmp);
486624fc 7785 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498
PB
7786 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
7787 tcg_gen_shri_i32(tmp3, tmp, 16);
486624fc 7788 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498 7789 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 7790 tcg_temp_free_i32(tmp);
60011498 7791 tcg_gen_ext16u_i32(tmp3, tmp2);
486624fc 7792 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498
PB
7793 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
7794 tcg_gen_shri_i32(tmp3, tmp2, 16);
486624fc 7795 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, fpst, ahp);
60011498 7796 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
7797 tcg_temp_free_i32(tmp2);
7798 tcg_temp_free_i32(tmp3);
486624fc
AB
7799 tcg_temp_free_i32(ahp);
7800 tcg_temp_free_ptr(fpst);
60011498 7801 break;
486624fc 7802 }
9d935509 7803 case NEON_2RM_AESE: case NEON_2RM_AESMC:
962fcbf2 7804 if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) {
9d935509
AB
7805 return 1;
7806 }
1a66ac61
RH
7807 ptr1 = vfp_reg_ptr(true, rd);
7808 ptr2 = vfp_reg_ptr(true, rm);
9d935509
AB
7809
7810 /* Bit 6 is the lowest opcode bit; it distinguishes between
7811 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7812 */
7813 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7814
7815 if (op == NEON_2RM_AESE) {
1a66ac61 7816 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
9d935509 7817 } else {
1a66ac61 7818 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
9d935509 7819 }
1a66ac61
RH
7820 tcg_temp_free_ptr(ptr1);
7821 tcg_temp_free_ptr(ptr2);
9d935509
AB
7822 tcg_temp_free_i32(tmp3);
7823 break;
f1ecb913 7824 case NEON_2RM_SHA1H:
962fcbf2 7825 if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) {
f1ecb913
AB
7826 return 1;
7827 }
1a66ac61
RH
7828 ptr1 = vfp_reg_ptr(true, rd);
7829 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 7830
1a66ac61 7831 gen_helper_crypto_sha1h(ptr1, ptr2);
f1ecb913 7832
1a66ac61
RH
7833 tcg_temp_free_ptr(ptr1);
7834 tcg_temp_free_ptr(ptr2);
f1ecb913
AB
7835 break;
7836 case NEON_2RM_SHA1SU1:
7837 if ((rm | rd) & 1) {
7838 return 1;
7839 }
7840 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7841 if (q) {
962fcbf2 7842 if (!dc_isar_feature(aa32_sha2, s)) {
f1ecb913
AB
7843 return 1;
7844 }
962fcbf2 7845 } else if (!dc_isar_feature(aa32_sha1, s)) {
f1ecb913
AB
7846 return 1;
7847 }
1a66ac61
RH
7848 ptr1 = vfp_reg_ptr(true, rd);
7849 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 7850 if (q) {
1a66ac61 7851 gen_helper_crypto_sha256su0(ptr1, ptr2);
f1ecb913 7852 } else {
1a66ac61 7853 gen_helper_crypto_sha1su1(ptr1, ptr2);
f1ecb913 7854 }
1a66ac61
RH
7855 tcg_temp_free_ptr(ptr1);
7856 tcg_temp_free_ptr(ptr2);
f1ecb913 7857 break;
4bf940be
RH
7858
7859 case NEON_2RM_VMVN:
7860 tcg_gen_gvec_not(0, rd_ofs, rm_ofs, vec_size, vec_size);
7861 break;
7862 case NEON_2RM_VNEG:
7863 tcg_gen_gvec_neg(size, rd_ofs, rm_ofs, vec_size, vec_size);
7864 break;
7865
9ee6e8bb
PB
7866 default:
7867 elementwise:
7868 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 7869 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7870 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7871 neon_reg_offset(rm, pass));
f764718d 7872 tmp = NULL;
9ee6e8bb 7873 } else {
dd8fbd78 7874 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
7875 }
7876 switch (op) {
600b828c 7877 case NEON_2RM_VREV32:
9ee6e8bb 7878 switch (size) {
dd8fbd78
FN
7879 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7880 case 1: gen_swap_half(tmp); break;
600b828c 7881 default: abort();
9ee6e8bb
PB
7882 }
7883 break;
600b828c 7884 case NEON_2RM_VREV16:
dd8fbd78 7885 gen_rev16(tmp);
9ee6e8bb 7886 break;
600b828c 7887 case NEON_2RM_VCLS:
9ee6e8bb 7888 switch (size) {
dd8fbd78
FN
7889 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7890 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7891 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 7892 default: abort();
9ee6e8bb
PB
7893 }
7894 break;
600b828c 7895 case NEON_2RM_VCLZ:
9ee6e8bb 7896 switch (size) {
dd8fbd78
FN
7897 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7898 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7539a012 7899 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
600b828c 7900 default: abort();
9ee6e8bb
PB
7901 }
7902 break;
600b828c 7903 case NEON_2RM_VCNT:
dd8fbd78 7904 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 7905 break;
600b828c 7906 case NEON_2RM_VQABS:
9ee6e8bb 7907 switch (size) {
02da0b2d
PM
7908 case 0:
7909 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7910 break;
7911 case 1:
7912 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7913 break;
7914 case 2:
7915 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7916 break;
600b828c 7917 default: abort();
9ee6e8bb
PB
7918 }
7919 break;
600b828c 7920 case NEON_2RM_VQNEG:
9ee6e8bb 7921 switch (size) {
02da0b2d
PM
7922 case 0:
7923 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7924 break;
7925 case 1:
7926 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7927 break;
7928 case 2:
7929 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7930 break;
600b828c 7931 default: abort();
9ee6e8bb
PB
7932 }
7933 break;
600b828c 7934 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 7935 tmp2 = tcg_const_i32(0);
9ee6e8bb 7936 switch(size) {
dd8fbd78
FN
7937 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7938 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7939 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 7940 default: abort();
9ee6e8bb 7941 }
39d5492a 7942 tcg_temp_free_i32(tmp2);
600b828c 7943 if (op == NEON_2RM_VCLE0) {
dd8fbd78 7944 tcg_gen_not_i32(tmp, tmp);
600b828c 7945 }
9ee6e8bb 7946 break;
600b828c 7947 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 7948 tmp2 = tcg_const_i32(0);
9ee6e8bb 7949 switch(size) {
dd8fbd78
FN
7950 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7951 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7952 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 7953 default: abort();
9ee6e8bb 7954 }
39d5492a 7955 tcg_temp_free_i32(tmp2);
600b828c 7956 if (op == NEON_2RM_VCLT0) {
dd8fbd78 7957 tcg_gen_not_i32(tmp, tmp);
600b828c 7958 }
9ee6e8bb 7959 break;
600b828c 7960 case NEON_2RM_VCEQ0:
dd8fbd78 7961 tmp2 = tcg_const_i32(0);
9ee6e8bb 7962 switch(size) {
dd8fbd78
FN
7963 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7964 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7965 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 7966 default: abort();
9ee6e8bb 7967 }
39d5492a 7968 tcg_temp_free_i32(tmp2);
9ee6e8bb 7969 break;
600b828c 7970 case NEON_2RM_VABS:
9ee6e8bb 7971 switch(size) {
dd8fbd78
FN
7972 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7973 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7974 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 7975 default: abort();
9ee6e8bb
PB
7976 }
7977 break;
600b828c 7978 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
7979 {
7980 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7981 tmp2 = tcg_const_i32(0);
aa47cfdd 7982 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7983 tcg_temp_free_i32(tmp2);
aa47cfdd 7984 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7985 break;
aa47cfdd 7986 }
600b828c 7987 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
7988 {
7989 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7990 tmp2 = tcg_const_i32(0);
aa47cfdd 7991 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7992 tcg_temp_free_i32(tmp2);
aa47cfdd 7993 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7994 break;
aa47cfdd 7995 }
600b828c 7996 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
7997 {
7998 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7999 tmp2 = tcg_const_i32(0);
aa47cfdd 8000 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 8001 tcg_temp_free_i32(tmp2);
aa47cfdd 8002 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8003 break;
aa47cfdd 8004 }
600b828c 8005 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
8006 {
8007 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 8008 tmp2 = tcg_const_i32(0);
aa47cfdd 8009 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 8010 tcg_temp_free_i32(tmp2);
aa47cfdd 8011 tcg_temp_free_ptr(fpstatus);
0e326109 8012 break;
aa47cfdd 8013 }
600b828c 8014 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
8015 {
8016 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 8017 tmp2 = tcg_const_i32(0);
aa47cfdd 8018 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 8019 tcg_temp_free_i32(tmp2);
aa47cfdd 8020 tcg_temp_free_ptr(fpstatus);
0e326109 8021 break;
aa47cfdd 8022 }
600b828c 8023 case NEON_2RM_VABS_F:
4373f3ce 8024 gen_vfp_abs(0);
9ee6e8bb 8025 break;
600b828c 8026 case NEON_2RM_VNEG_F:
4373f3ce 8027 gen_vfp_neg(0);
9ee6e8bb 8028 break;
600b828c 8029 case NEON_2RM_VSWP:
dd8fbd78
FN
8030 tmp2 = neon_load_reg(rd, pass);
8031 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 8032 break;
600b828c 8033 case NEON_2RM_VTRN:
dd8fbd78 8034 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 8035 switch (size) {
dd8fbd78
FN
8036 case 0: gen_neon_trn_u8(tmp, tmp2); break;
8037 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 8038 default: abort();
9ee6e8bb 8039 }
dd8fbd78 8040 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 8041 break;
34f7b0a2
WN
8042 case NEON_2RM_VRINTN:
8043 case NEON_2RM_VRINTA:
8044 case NEON_2RM_VRINTM:
8045 case NEON_2RM_VRINTP:
8046 case NEON_2RM_VRINTZ:
8047 {
8048 TCGv_i32 tcg_rmode;
8049 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8050 int rmode;
8051
8052 if (op == NEON_2RM_VRINTZ) {
8053 rmode = FPROUNDING_ZERO;
8054 } else {
8055 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
8056 }
8057
8058 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
8059 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8060 cpu_env);
8061 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
8062 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8063 cpu_env);
8064 tcg_temp_free_ptr(fpstatus);
8065 tcg_temp_free_i32(tcg_rmode);
8066 break;
8067 }
2ce70625
WN
8068 case NEON_2RM_VRINTX:
8069 {
8070 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8071 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
8072 tcg_temp_free_ptr(fpstatus);
8073 break;
8074 }
901ad525
WN
8075 case NEON_2RM_VCVTAU:
8076 case NEON_2RM_VCVTAS:
8077 case NEON_2RM_VCVTNU:
8078 case NEON_2RM_VCVTNS:
8079 case NEON_2RM_VCVTPU:
8080 case NEON_2RM_VCVTPS:
8081 case NEON_2RM_VCVTMU:
8082 case NEON_2RM_VCVTMS:
8083 {
8084 bool is_signed = !extract32(insn, 7, 1);
8085 TCGv_ptr fpst = get_fpstatus_ptr(1);
8086 TCGv_i32 tcg_rmode, tcg_shift;
8087 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
8088
8089 tcg_shift = tcg_const_i32(0);
8090 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
8091 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8092 cpu_env);
8093
8094 if (is_signed) {
8095 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
8096 tcg_shift, fpst);
8097 } else {
8098 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
8099 tcg_shift, fpst);
8100 }
8101
8102 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
8103 cpu_env);
8104 tcg_temp_free_i32(tcg_rmode);
8105 tcg_temp_free_i32(tcg_shift);
8106 tcg_temp_free_ptr(fpst);
8107 break;
8108 }
600b828c 8109 case NEON_2RM_VRECPE:
b6d4443a
AB
8110 {
8111 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8112 gen_helper_recpe_u32(tmp, tmp, fpstatus);
8113 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8114 break;
b6d4443a 8115 }
600b828c 8116 case NEON_2RM_VRSQRTE:
c2fb418e
AB
8117 {
8118 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8119 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
8120 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8121 break;
c2fb418e 8122 }
600b828c 8123 case NEON_2RM_VRECPE_F:
b6d4443a
AB
8124 {
8125 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8126 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
8127 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8128 break;
b6d4443a 8129 }
600b828c 8130 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
8131 {
8132 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
8133 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
8134 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 8135 break;
c2fb418e 8136 }
600b828c 8137 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 8138 gen_vfp_sito(0, 1);
9ee6e8bb 8139 break;
600b828c 8140 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 8141 gen_vfp_uito(0, 1);
9ee6e8bb 8142 break;
600b828c 8143 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 8144 gen_vfp_tosiz(0, 1);
9ee6e8bb 8145 break;
600b828c 8146 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 8147 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
8148 break;
8149 default:
600b828c
PM
8150 /* Reserved op values were caught by the
8151 * neon_2rm_sizes[] check earlier.
8152 */
8153 abort();
9ee6e8bb 8154 }
600b828c 8155 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
8156 tcg_gen_st_f32(cpu_F0s, cpu_env,
8157 neon_reg_offset(rd, pass));
9ee6e8bb 8158 } else {
dd8fbd78 8159 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
8160 }
8161 }
8162 break;
8163 }
8164 } else if ((insn & (1 << 10)) == 0) {
8165 /* VTBL, VTBX. */
56907d77
PM
8166 int n = ((insn >> 8) & 3) + 1;
8167 if ((rn + n) > 32) {
8168 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
8169 * helper function running off the end of the register file.
8170 */
8171 return 1;
8172 }
8173 n <<= 3;
9ee6e8bb 8174 if (insn & (1 << 6)) {
8f8e3aa4 8175 tmp = neon_load_reg(rd, 0);
9ee6e8bb 8176 } else {
7d1b0095 8177 tmp = tcg_temp_new_i32();
8f8e3aa4 8178 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 8179 }
8f8e3aa4 8180 tmp2 = neon_load_reg(rm, 0);
e7c06c4e 8181 ptr1 = vfp_reg_ptr(true, rn);
b75263d6 8182 tmp5 = tcg_const_i32(n);
e7c06c4e 8183 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
7d1b0095 8184 tcg_temp_free_i32(tmp);
9ee6e8bb 8185 if (insn & (1 << 6)) {
8f8e3aa4 8186 tmp = neon_load_reg(rd, 1);
9ee6e8bb 8187 } else {
7d1b0095 8188 tmp = tcg_temp_new_i32();
8f8e3aa4 8189 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 8190 }
8f8e3aa4 8191 tmp3 = neon_load_reg(rm, 1);
e7c06c4e 8192 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
25aeb69b 8193 tcg_temp_free_i32(tmp5);
e7c06c4e 8194 tcg_temp_free_ptr(ptr1);
8f8e3aa4 8195 neon_store_reg(rd, 0, tmp2);
3018f259 8196 neon_store_reg(rd, 1, tmp3);
7d1b0095 8197 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8198 } else if ((insn & 0x380) == 0) {
8199 /* VDUP */
32f91fb7
RH
8200 int element;
8201 TCGMemOp size;
8202
133da6aa
JR
8203 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
8204 return 1;
8205 }
9ee6e8bb 8206 if (insn & (1 << 16)) {
32f91fb7
RH
8207 size = MO_8;
8208 element = (insn >> 17) & 7;
9ee6e8bb 8209 } else if (insn & (1 << 17)) {
32f91fb7
RH
8210 size = MO_16;
8211 element = (insn >> 18) & 3;
8212 } else {
8213 size = MO_32;
8214 element = (insn >> 19) & 1;
9ee6e8bb 8215 }
32f91fb7
RH
8216 tcg_gen_gvec_dup_mem(size, neon_reg_offset(rd, 0),
8217 neon_element_offset(rm, element, size),
8218 q ? 16 : 8, q ? 16 : 8);
9ee6e8bb
PB
8219 } else {
8220 return 1;
8221 }
8222 }
8223 }
8224 return 0;
8225}
8226
8b7209fa
RH
8227/* Advanced SIMD three registers of the same length extension.
8228 * 31 25 23 22 20 16 12 11 10 9 8 3 0
8229 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
8230 * | 1 1 1 1 1 1 0 | op1 | D | op2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
8231 * +---------------+-----+---+-----+----+----+---+----+---+----+---------+----+
8232 */
8233static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
8234{
26c470a7
RH
8235 gen_helper_gvec_3 *fn_gvec = NULL;
8236 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
8237 int rd, rn, rm, opr_sz;
8238 int data = 0;
8b7209fa
RH
8239 bool q;
8240
8241 q = extract32(insn, 6, 1);
8242 VFP_DREG_D(rd, insn);
8243 VFP_DREG_N(rn, insn);
8244 VFP_DREG_M(rm, insn);
8245 if ((rd | rn | rm) & q) {
8246 return 1;
8247 }
8248
8249 if ((insn & 0xfe200f10) == 0xfc200800) {
8250 /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
26c470a7
RH
8251 int size = extract32(insn, 20, 1);
8252 data = extract32(insn, 23, 2); /* rot */
962fcbf2 8253 if (!dc_isar_feature(aa32_vcma, s)
5763190f 8254 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8b7209fa
RH
8255 return 1;
8256 }
8257 fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
8258 } else if ((insn & 0xfea00f10) == 0xfc800800) {
8259 /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
26c470a7
RH
8260 int size = extract32(insn, 20, 1);
8261 data = extract32(insn, 24, 1); /* rot */
962fcbf2 8262 if (!dc_isar_feature(aa32_vcma, s)
5763190f 8263 || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
8b7209fa
RH
8264 return 1;
8265 }
8266 fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
26c470a7
RH
8267 } else if ((insn & 0xfeb00f00) == 0xfc200d00) {
8268 /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
8269 bool u = extract32(insn, 4, 1);
962fcbf2 8270 if (!dc_isar_feature(aa32_dp, s)) {
26c470a7
RH
8271 return 1;
8272 }
8273 fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
8b7209fa
RH
8274 } else {
8275 return 1;
8276 }
8277
8278 if (s->fp_excp_el) {
8279 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 8280 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
8b7209fa
RH
8281 return 0;
8282 }
8283 if (!s->vfp_enabled) {
8284 return 1;
8285 }
8286
8287 opr_sz = (1 + q) * 8;
26c470a7
RH
8288 if (fn_gvec_ptr) {
8289 TCGv_ptr fpst = get_fpstatus_ptr(1);
8290 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
8291 vfp_reg_offset(1, rn),
8292 vfp_reg_offset(1, rm), fpst,
8293 opr_sz, opr_sz, data, fn_gvec_ptr);
8294 tcg_temp_free_ptr(fpst);
8295 } else {
8296 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd),
8297 vfp_reg_offset(1, rn),
8298 vfp_reg_offset(1, rm),
8299 opr_sz, opr_sz, data, fn_gvec);
8300 }
8b7209fa
RH
8301 return 0;
8302}
8303
638808ff
RH
8304/* Advanced SIMD two registers and a scalar extension.
8305 * 31 24 23 22 20 16 12 11 10 9 8 3 0
8306 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
8307 * | 1 1 1 1 1 1 1 0 | o1 | D | o2 | Vn | Vd | 1 | o3 | 0 | o4 | N Q M U | Vm |
8308 * +-----------------+----+---+----+----+----+---+----+---+----+---------+----+
8309 *
8310 */
8311
8312static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
8313{
26c470a7
RH
8314 gen_helper_gvec_3 *fn_gvec = NULL;
8315 gen_helper_gvec_3_ptr *fn_gvec_ptr = NULL;
2cc99919 8316 int rd, rn, rm, opr_sz, data;
638808ff
RH
8317 bool q;
8318
8319 q = extract32(insn, 6, 1);
8320 VFP_DREG_D(rd, insn);
8321 VFP_DREG_N(rn, insn);
638808ff
RH
8322 if ((rd | rn) & q) {
8323 return 1;
8324 }
8325
8326 if ((insn & 0xff000f10) == 0xfe000800) {
8327 /* VCMLA (indexed) -- 1111 1110 S.RR .... .... 1000 ...0 .... */
2cc99919
RH
8328 int rot = extract32(insn, 20, 2);
8329 int size = extract32(insn, 23, 1);
8330 int index;
8331
962fcbf2 8332 if (!dc_isar_feature(aa32_vcma, s)) {
638808ff
RH
8333 return 1;
8334 }
2cc99919 8335 if (size == 0) {
5763190f 8336 if (!dc_isar_feature(aa32_fp16_arith, s)) {
2cc99919
RH
8337 return 1;
8338 }
8339 /* For fp16, rm is just Vm, and index is M. */
8340 rm = extract32(insn, 0, 4);
8341 index = extract32(insn, 5, 1);
8342 } else {
8343 /* For fp32, rm is the usual M:Vm, and index is 0. */
8344 VFP_DREG_M(rm, insn);
8345 index = 0;
8346 }
8347 data = (index << 2) | rot;
8348 fn_gvec_ptr = (size ? gen_helper_gvec_fcmlas_idx
8349 : gen_helper_gvec_fcmlah_idx);
26c470a7
RH
8350 } else if ((insn & 0xffb00f00) == 0xfe200d00) {
8351 /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
8352 int u = extract32(insn, 4, 1);
962fcbf2 8353 if (!dc_isar_feature(aa32_dp, s)) {
26c470a7
RH
8354 return 1;
8355 }
8356 fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
8357 /* rm is just Vm, and index is M. */
8358 data = extract32(insn, 5, 1); /* index */
8359 rm = extract32(insn, 0, 4);
638808ff
RH
8360 } else {
8361 return 1;
8362 }
8363
8364 if (s->fp_excp_el) {
8365 gen_exception_insn(s, 4, EXCP_UDEF,
4be42f40 8366 syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
638808ff
RH
8367 return 0;
8368 }
8369 if (!s->vfp_enabled) {
8370 return 1;
8371 }
8372
8373 opr_sz = (1 + q) * 8;
26c470a7
RH
8374 if (fn_gvec_ptr) {
8375 TCGv_ptr fpst = get_fpstatus_ptr(1);
8376 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
8377 vfp_reg_offset(1, rn),
8378 vfp_reg_offset(1, rm), fpst,
8379 opr_sz, opr_sz, data, fn_gvec_ptr);
8380 tcg_temp_free_ptr(fpst);
8381 } else {
8382 tcg_gen_gvec_3_ool(vfp_reg_offset(1, rd),
8383 vfp_reg_offset(1, rn),
8384 vfp_reg_offset(1, rm),
8385 opr_sz, opr_sz, data, fn_gvec);
8386 }
638808ff
RH
8387 return 0;
8388}
8389
7dcc1f89 8390static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 8391{
4b6a83fb
PM
8392 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
8393 const ARMCPRegInfo *ri;
9ee6e8bb
PB
8394
8395 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
8396
8397 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 8398 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
8399 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
8400 return 1;
8401 }
d614a513 8402 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 8403 return disas_iwmmxt_insn(s, insn);
d614a513 8404 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 8405 return disas_dsp_insn(s, insn);
c0f4af17
PM
8406 }
8407 return 1;
4b6a83fb
PM
8408 }
8409
8410 /* Otherwise treat as a generic register access */
8411 is64 = (insn & (1 << 25)) == 0;
8412 if (!is64 && ((insn & (1 << 4)) == 0)) {
8413 /* cdp */
8414 return 1;
8415 }
8416
8417 crm = insn & 0xf;
8418 if (is64) {
8419 crn = 0;
8420 opc1 = (insn >> 4) & 0xf;
8421 opc2 = 0;
8422 rt2 = (insn >> 16) & 0xf;
8423 } else {
8424 crn = (insn >> 16) & 0xf;
8425 opc1 = (insn >> 21) & 7;
8426 opc2 = (insn >> 5) & 7;
8427 rt2 = 0;
8428 }
8429 isread = (insn >> 20) & 1;
8430 rt = (insn >> 12) & 0xf;
8431
60322b39 8432 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 8433 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
8434 if (ri) {
8435 /* Check access permissions */
dcbff19b 8436 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
8437 return 1;
8438 }
8439
c0f4af17 8440 if (ri->accessfn ||
d614a513 8441 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
8442 /* Emit code to perform further access permissions checks at
8443 * runtime; this may result in an exception.
c0f4af17
PM
8444 * Note that on XScale all cp0..c13 registers do an access check
8445 * call in order to handle c15_cpar.
f59df3f2
PM
8446 */
8447 TCGv_ptr tmpptr;
3f208fd7 8448 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
8449 uint32_t syndrome;
8450
8451 /* Note that since we are an implementation which takes an
8452 * exception on a trapped conditional instruction only if the
8453 * instruction passes its condition code check, we can take
8454 * advantage of the clause in the ARM ARM that allows us to set
8455 * the COND field in the instruction to 0xE in all cases.
8456 * We could fish the actual condition out of the insn (ARM)
8457 * or the condexec bits (Thumb) but it isn't necessary.
8458 */
8459 switch (cpnum) {
8460 case 14:
8461 if (is64) {
8462 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 8463 isread, false);
8bcbf37c
PM
8464 } else {
8465 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 8466 rt, isread, false);
8bcbf37c
PM
8467 }
8468 break;
8469 case 15:
8470 if (is64) {
8471 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 8472 isread, false);
8bcbf37c
PM
8473 } else {
8474 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 8475 rt, isread, false);
8bcbf37c
PM
8476 }
8477 break;
8478 default:
8479 /* ARMv8 defines that only coprocessors 14 and 15 exist,
8480 * so this can only happen if this is an ARMv7 or earlier CPU,
8481 * in which case the syndrome information won't actually be
8482 * guest visible.
8483 */
d614a513 8484 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
8485 syndrome = syn_uncategorized();
8486 break;
8487 }
8488
43bfa4a1 8489 gen_set_condexec(s);
3977ee5d 8490 gen_set_pc_im(s, s->pc - 4);
f59df3f2 8491 tmpptr = tcg_const_ptr(ri);
8bcbf37c 8492 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
8493 tcg_isread = tcg_const_i32(isread);
8494 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
8495 tcg_isread);
f59df3f2 8496 tcg_temp_free_ptr(tmpptr);
8bcbf37c 8497 tcg_temp_free_i32(tcg_syn);
3f208fd7 8498 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
8499 }
8500
4b6a83fb
PM
8501 /* Handle special cases first */
8502 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
8503 case ARM_CP_NOP:
8504 return 0;
8505 case ARM_CP_WFI:
8506 if (isread) {
8507 return 1;
8508 }
eaed129d 8509 gen_set_pc_im(s, s->pc);
dcba3a8d 8510 s->base.is_jmp = DISAS_WFI;
2bee5105 8511 return 0;
4b6a83fb
PM
8512 default:
8513 break;
8514 }
8515
c5a49c63 8516 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
8517 gen_io_start();
8518 }
8519
4b6a83fb
PM
8520 if (isread) {
8521 /* Read */
8522 if (is64) {
8523 TCGv_i64 tmp64;
8524 TCGv_i32 tmp;
8525 if (ri->type & ARM_CP_CONST) {
8526 tmp64 = tcg_const_i64(ri->resetvalue);
8527 } else if (ri->readfn) {
8528 TCGv_ptr tmpptr;
4b6a83fb
PM
8529 tmp64 = tcg_temp_new_i64();
8530 tmpptr = tcg_const_ptr(ri);
8531 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
8532 tcg_temp_free_ptr(tmpptr);
8533 } else {
8534 tmp64 = tcg_temp_new_i64();
8535 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
8536 }
8537 tmp = tcg_temp_new_i32();
ecc7b3aa 8538 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
8539 store_reg(s, rt, tmp);
8540 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 8541 tmp = tcg_temp_new_i32();
ecc7b3aa 8542 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 8543 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
8544 store_reg(s, rt2, tmp);
8545 } else {
39d5492a 8546 TCGv_i32 tmp;
4b6a83fb
PM
8547 if (ri->type & ARM_CP_CONST) {
8548 tmp = tcg_const_i32(ri->resetvalue);
8549 } else if (ri->readfn) {
8550 TCGv_ptr tmpptr;
4b6a83fb
PM
8551 tmp = tcg_temp_new_i32();
8552 tmpptr = tcg_const_ptr(ri);
8553 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
8554 tcg_temp_free_ptr(tmpptr);
8555 } else {
8556 tmp = load_cpu_offset(ri->fieldoffset);
8557 }
8558 if (rt == 15) {
8559 /* Destination register of r15 for 32 bit loads sets
8560 * the condition codes from the high 4 bits of the value
8561 */
8562 gen_set_nzcv(tmp);
8563 tcg_temp_free_i32(tmp);
8564 } else {
8565 store_reg(s, rt, tmp);
8566 }
8567 }
8568 } else {
8569 /* Write */
8570 if (ri->type & ARM_CP_CONST) {
8571 /* If not forbidden by access permissions, treat as WI */
8572 return 0;
8573 }
8574
8575 if (is64) {
39d5492a 8576 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
8577 TCGv_i64 tmp64 = tcg_temp_new_i64();
8578 tmplo = load_reg(s, rt);
8579 tmphi = load_reg(s, rt2);
8580 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
8581 tcg_temp_free_i32(tmplo);
8582 tcg_temp_free_i32(tmphi);
8583 if (ri->writefn) {
8584 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
8585 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
8586 tcg_temp_free_ptr(tmpptr);
8587 } else {
8588 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
8589 }
8590 tcg_temp_free_i64(tmp64);
8591 } else {
8592 if (ri->writefn) {
39d5492a 8593 TCGv_i32 tmp;
4b6a83fb 8594 TCGv_ptr tmpptr;
4b6a83fb
PM
8595 tmp = load_reg(s, rt);
8596 tmpptr = tcg_const_ptr(ri);
8597 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
8598 tcg_temp_free_ptr(tmpptr);
8599 tcg_temp_free_i32(tmp);
8600 } else {
39d5492a 8601 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
8602 store_cpu_offset(tmp, ri->fieldoffset);
8603 }
8604 }
2452731c
PM
8605 }
8606
c5a49c63 8607 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
8608 /* I/O operations must end the TB here (whether read or write) */
8609 gen_io_end();
8610 gen_lookup_tb(s);
8611 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
8612 /* We default to ending the TB on a coprocessor register write,
8613 * but allow this to be suppressed by the register definition
8614 * (usually only necessary to work around guest bugs).
8615 */
2452731c 8616 gen_lookup_tb(s);
4b6a83fb 8617 }
2452731c 8618
4b6a83fb
PM
8619 return 0;
8620 }
8621
626187d8
PM
8622 /* Unknown register; this might be a guest error or a QEMU
8623 * unimplemented feature.
8624 */
8625 if (is64) {
8626 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
8627 "64 bit system register cp:%d opc1: %d crm:%d "
8628 "(%s)\n",
8629 isread ? "read" : "write", cpnum, opc1, crm,
8630 s->ns ? "non-secure" : "secure");
626187d8
PM
8631 } else {
8632 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
8633 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
8634 "(%s)\n",
8635 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
8636 s->ns ? "non-secure" : "secure");
626187d8
PM
8637 }
8638
4a9a539f 8639 return 1;
9ee6e8bb
PB
8640}
8641
5e3f878a
PB
8642
8643/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 8644static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 8645{
39d5492a 8646 TCGv_i32 tmp;
7d1b0095 8647 tmp = tcg_temp_new_i32();
ecc7b3aa 8648 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 8649 store_reg(s, rlow, tmp);
7d1b0095 8650 tmp = tcg_temp_new_i32();
5e3f878a 8651 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 8652 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
8653 store_reg(s, rhigh, tmp);
8654}
8655
8656/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 8657static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 8658{
a7812ae4 8659 TCGv_i64 tmp;
39d5492a 8660 TCGv_i32 tmp2;
5e3f878a 8661
36aa55dc 8662 /* Load value and extend to 64 bits. */
a7812ae4 8663 tmp = tcg_temp_new_i64();
5e3f878a
PB
8664 tmp2 = load_reg(s, rlow);
8665 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 8666 tcg_temp_free_i32(tmp2);
5e3f878a 8667 tcg_gen_add_i64(val, val, tmp);
b75263d6 8668 tcg_temp_free_i64(tmp);
5e3f878a
PB
8669}
8670
8671/* load and add a 64-bit value from a register pair. */
a7812ae4 8672static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 8673{
a7812ae4 8674 TCGv_i64 tmp;
39d5492a
PM
8675 TCGv_i32 tmpl;
8676 TCGv_i32 tmph;
5e3f878a
PB
8677
8678 /* Load 64-bit value rd:rn. */
36aa55dc
PB
8679 tmpl = load_reg(s, rlow);
8680 tmph = load_reg(s, rhigh);
a7812ae4 8681 tmp = tcg_temp_new_i64();
36aa55dc 8682 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
8683 tcg_temp_free_i32(tmpl);
8684 tcg_temp_free_i32(tmph);
5e3f878a 8685 tcg_gen_add_i64(val, val, tmp);
b75263d6 8686 tcg_temp_free_i64(tmp);
5e3f878a
PB
8687}
8688
c9f10124 8689/* Set N and Z flags from hi|lo. */
39d5492a 8690static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 8691{
c9f10124
RH
8692 tcg_gen_mov_i32(cpu_NF, hi);
8693 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
8694}
8695
426f5abc
PB
8696/* Load/Store exclusive instructions are implemented by remembering
8697 the value/address loaded, and seeing if these are the same
354161b3 8698 when the store is performed. This should be sufficient to implement
426f5abc 8699 the architecturally mandated semantics, and avoids having to monitor
354161b3
EC
8700 regular stores. The compare vs the remembered value is done during
8701 the cmpxchg operation, but we must compare the addresses manually. */
426f5abc 8702static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 8703 TCGv_i32 addr, int size)
426f5abc 8704{
94ee24e7 8705 TCGv_i32 tmp = tcg_temp_new_i32();
354161b3 8706 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc 8707
50225ad0
PM
8708 s->is_ldex = true;
8709
426f5abc 8710 if (size == 3) {
39d5492a 8711 TCGv_i32 tmp2 = tcg_temp_new_i32();
354161b3 8712 TCGv_i64 t64 = tcg_temp_new_i64();
03d05e2d 8713
3448d47b
PM
8714 /* For AArch32, architecturally the 32-bit word at the lowest
8715 * address is always Rt and the one at addr+4 is Rt2, even if
8716 * the CPU is big-endian. That means we don't want to do a
8717 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
8718 * for an architecturally 64-bit access, but instead do a
8719 * 64-bit access using MO_BE if appropriate and then split
8720 * the two halves.
8721 * This only makes a difference for BE32 user-mode, where
8722 * frob64() must not flip the two halves of the 64-bit data
8723 * but this code must treat BE32 user-mode like BE32 system.
8724 */
8725 TCGv taddr = gen_aa32_addr(s, addr, opc);
8726
8727 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
8728 tcg_temp_free(taddr);
354161b3 8729 tcg_gen_mov_i64(cpu_exclusive_val, t64);
3448d47b
PM
8730 if (s->be_data == MO_BE) {
8731 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
8732 } else {
8733 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
8734 }
354161b3
EC
8735 tcg_temp_free_i64(t64);
8736
8737 store_reg(s, rt2, tmp2);
03d05e2d 8738 } else {
354161b3 8739 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
03d05e2d 8740 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 8741 }
03d05e2d
PM
8742
8743 store_reg(s, rt, tmp);
8744 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
8745}
8746
8747static void gen_clrex(DisasContext *s)
8748{
03d05e2d 8749 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
8750}
8751
426f5abc 8752static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 8753 TCGv_i32 addr, int size)
426f5abc 8754{
354161b3
EC
8755 TCGv_i32 t0, t1, t2;
8756 TCGv_i64 extaddr;
8757 TCGv taddr;
42a268c2
RH
8758 TCGLabel *done_label;
8759 TCGLabel *fail_label;
354161b3 8760 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc
PB
8761
8762 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
8763 [addr] = {Rt};
8764 {Rd} = 0;
8765 } else {
8766 {Rd} = 1;
8767 } */
8768 fail_label = gen_new_label();
8769 done_label = gen_new_label();
03d05e2d
PM
8770 extaddr = tcg_temp_new_i64();
8771 tcg_gen_extu_i32_i64(extaddr, addr);
8772 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
8773 tcg_temp_free_i64(extaddr);
8774
354161b3
EC
8775 taddr = gen_aa32_addr(s, addr, opc);
8776 t0 = tcg_temp_new_i32();
8777 t1 = load_reg(s, rt);
426f5abc 8778 if (size == 3) {
354161b3
EC
8779 TCGv_i64 o64 = tcg_temp_new_i64();
8780 TCGv_i64 n64 = tcg_temp_new_i64();
03d05e2d 8781
354161b3 8782 t2 = load_reg(s, rt2);
3448d47b
PM
8783 /* For AArch32, architecturally the 32-bit word at the lowest
8784 * address is always Rt and the one at addr+4 is Rt2, even if
8785 * the CPU is big-endian. Since we're going to treat this as a
8786 * single 64-bit BE store, we need to put the two halves in the
8787 * opposite order for BE to LE, so that they end up in the right
8788 * places.
8789 * We don't want gen_aa32_frob64() because that does the wrong
8790 * thing for BE32 usermode.
8791 */
8792 if (s->be_data == MO_BE) {
8793 tcg_gen_concat_i32_i64(n64, t2, t1);
8794 } else {
8795 tcg_gen_concat_i32_i64(n64, t1, t2);
8796 }
354161b3 8797 tcg_temp_free_i32(t2);
03d05e2d 8798
354161b3
EC
8799 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
8800 get_mem_index(s), opc);
8801 tcg_temp_free_i64(n64);
8802
354161b3
EC
8803 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
8804 tcg_gen_extrl_i64_i32(t0, o64);
8805
8806 tcg_temp_free_i64(o64);
8807 } else {
8808 t2 = tcg_temp_new_i32();
8809 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
8810 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
8811 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
8812 tcg_temp_free_i32(t2);
426f5abc 8813 }
354161b3
EC
8814 tcg_temp_free_i32(t1);
8815 tcg_temp_free(taddr);
8816 tcg_gen_mov_i32(cpu_R[rd], t0);
8817 tcg_temp_free_i32(t0);
426f5abc 8818 tcg_gen_br(done_label);
354161b3 8819
426f5abc
PB
8820 gen_set_label(fail_label);
8821 tcg_gen_movi_i32(cpu_R[rd], 1);
8822 gen_set_label(done_label);
03d05e2d 8823 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc 8824}
426f5abc 8825
81465888
PM
8826/* gen_srs:
8827 * @env: CPUARMState
8828 * @s: DisasContext
8829 * @mode: mode field from insn (which stack to store to)
8830 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
8831 * @writeback: true if writeback bit set
8832 *
8833 * Generate code for the SRS (Store Return State) insn.
8834 */
8835static void gen_srs(DisasContext *s,
8836 uint32_t mode, uint32_t amode, bool writeback)
8837{
8838 int32_t offset;
cbc0326b
PM
8839 TCGv_i32 addr, tmp;
8840 bool undef = false;
8841
8842 /* SRS is:
8843 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 8844 * and specified mode is monitor mode
cbc0326b
PM
8845 * - UNDEFINED in Hyp mode
8846 * - UNPREDICTABLE in User or System mode
8847 * - UNPREDICTABLE if the specified mode is:
8848 * -- not implemented
8849 * -- not a valid mode number
8850 * -- a mode that's at a higher exception level
8851 * -- Monitor, if we are Non-secure
f01377f5 8852 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 8853 */
ba63cf47 8854 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
8855 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
8856 return;
8857 }
8858
8859 if (s->current_el == 0 || s->current_el == 2) {
8860 undef = true;
8861 }
8862
8863 switch (mode) {
8864 case ARM_CPU_MODE_USR:
8865 case ARM_CPU_MODE_FIQ:
8866 case ARM_CPU_MODE_IRQ:
8867 case ARM_CPU_MODE_SVC:
8868 case ARM_CPU_MODE_ABT:
8869 case ARM_CPU_MODE_UND:
8870 case ARM_CPU_MODE_SYS:
8871 break;
8872 case ARM_CPU_MODE_HYP:
8873 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
8874 undef = true;
8875 }
8876 break;
8877 case ARM_CPU_MODE_MON:
8878 /* No need to check specifically for "are we non-secure" because
8879 * we've already made EL0 UNDEF and handled the trap for S-EL1;
8880 * so if this isn't EL3 then we must be non-secure.
8881 */
8882 if (s->current_el != 3) {
8883 undef = true;
8884 }
8885 break;
8886 default:
8887 undef = true;
8888 }
8889
8890 if (undef) {
8891 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
8892 default_exception_el(s));
8893 return;
8894 }
8895
8896 addr = tcg_temp_new_i32();
8897 tmp = tcg_const_i32(mode);
f01377f5
PM
8898 /* get_r13_banked() will raise an exception if called from System mode */
8899 gen_set_condexec(s);
8900 gen_set_pc_im(s, s->pc - 4);
81465888
PM
8901 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8902 tcg_temp_free_i32(tmp);
8903 switch (amode) {
8904 case 0: /* DA */
8905 offset = -4;
8906 break;
8907 case 1: /* IA */
8908 offset = 0;
8909 break;
8910 case 2: /* DB */
8911 offset = -8;
8912 break;
8913 case 3: /* IB */
8914 offset = 4;
8915 break;
8916 default:
8917 abort();
8918 }
8919 tcg_gen_addi_i32(addr, addr, offset);
8920 tmp = load_reg(s, 14);
12dcc321 8921 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8922 tcg_temp_free_i32(tmp);
81465888
PM
8923 tmp = load_cpu_field(spsr);
8924 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 8925 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8926 tcg_temp_free_i32(tmp);
81465888
PM
8927 if (writeback) {
8928 switch (amode) {
8929 case 0:
8930 offset = -8;
8931 break;
8932 case 1:
8933 offset = 4;
8934 break;
8935 case 2:
8936 offset = -4;
8937 break;
8938 case 3:
8939 offset = 0;
8940 break;
8941 default:
8942 abort();
8943 }
8944 tcg_gen_addi_i32(addr, addr, offset);
8945 tmp = tcg_const_i32(mode);
8946 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8947 tcg_temp_free_i32(tmp);
8948 }
8949 tcg_temp_free_i32(addr);
dcba3a8d 8950 s->base.is_jmp = DISAS_UPDATE;
81465888
PM
8951}
8952
c2d9644e
RK
8953/* Generate a label used for skipping this instruction */
8954static void arm_gen_condlabel(DisasContext *s)
8955{
8956 if (!s->condjmp) {
8957 s->condlabel = gen_new_label();
8958 s->condjmp = 1;
8959 }
8960}
8961
8962/* Skip this instruction if the ARM condition is false */
8963static void arm_skip_unless(DisasContext *s, uint32_t cond)
8964{
8965 arm_gen_condlabel(s);
8966 arm_gen_test_cc(cond ^ 1, s->condlabel);
8967}
8968
f4df2210 8969static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 8970{
f4df2210 8971 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
8972 TCGv_i32 tmp;
8973 TCGv_i32 tmp2;
8974 TCGv_i32 tmp3;
8975 TCGv_i32 addr;
a7812ae4 8976 TCGv_i64 tmp64;
9ee6e8bb 8977
e13886e3
PM
8978 /* M variants do not implement ARM mode; this must raise the INVSTATE
8979 * UsageFault exception.
8980 */
b53d8923 8981 if (arm_dc_feature(s, ARM_FEATURE_M)) {
e13886e3
PM
8982 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
8983 default_exception_el(s));
8984 return;
b53d8923 8985 }
9ee6e8bb
PB
8986 cond = insn >> 28;
8987 if (cond == 0xf){
be5e7a76
DES
8988 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8989 * choose to UNDEF. In ARMv5 and above the space is used
8990 * for miscellaneous unconditional instructions.
8991 */
8992 ARCH(5);
8993
9ee6e8bb
PB
8994 /* Unconditional instructions. */
8995 if (((insn >> 25) & 7) == 1) {
8996 /* NEON Data processing. */
d614a513 8997 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8998 goto illegal_op;
d614a513 8999 }
9ee6e8bb 9000
7dcc1f89 9001 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 9002 goto illegal_op;
7dcc1f89 9003 }
9ee6e8bb
PB
9004 return;
9005 }
9006 if ((insn & 0x0f100000) == 0x04000000) {
9007 /* NEON load/store. */
d614a513 9008 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 9009 goto illegal_op;
d614a513 9010 }
9ee6e8bb 9011
7dcc1f89 9012 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 9013 goto illegal_op;
7dcc1f89 9014 }
9ee6e8bb
PB
9015 return;
9016 }
6a57f3eb
WN
9017 if ((insn & 0x0f000e10) == 0x0e000a00) {
9018 /* VFP. */
7dcc1f89 9019 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9020 goto illegal_op;
9021 }
9022 return;
9023 }
3d185e5d
PM
9024 if (((insn & 0x0f30f000) == 0x0510f000) ||
9025 ((insn & 0x0f30f010) == 0x0710f000)) {
9026 if ((insn & (1 << 22)) == 0) {
9027 /* PLDW; v7MP */
d614a513 9028 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
9029 goto illegal_op;
9030 }
9031 }
9032 /* Otherwise PLD; v5TE+ */
be5e7a76 9033 ARCH(5TE);
3d185e5d
PM
9034 return;
9035 }
9036 if (((insn & 0x0f70f000) == 0x0450f000) ||
9037 ((insn & 0x0f70f010) == 0x0650f000)) {
9038 ARCH(7);
9039 return; /* PLI; V7 */
9040 }
9041 if (((insn & 0x0f700000) == 0x04100000) ||
9042 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 9043 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
9044 goto illegal_op;
9045 }
9046 return; /* v7MP: Unallocated memory hint: must NOP */
9047 }
9048
9049 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
9050 ARCH(6);
9051 /* setend */
9886ecdf
PB
9052 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
9053 gen_helper_setend(cpu_env);
dcba3a8d 9054 s->base.is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
9055 }
9056 return;
9057 } else if ((insn & 0x0fffff00) == 0x057ff000) {
9058 switch ((insn >> 4) & 0xf) {
9059 case 1: /* clrex */
9060 ARCH(6K);
426f5abc 9061 gen_clrex(s);
9ee6e8bb
PB
9062 return;
9063 case 4: /* dsb */
9064 case 5: /* dmb */
9ee6e8bb 9065 ARCH(7);
61e4c432 9066 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 9067 return;
6df99dec
SS
9068 case 6: /* isb */
9069 /* We need to break the TB after this insn to execute
9070 * self-modifying code correctly and also to take
9071 * any pending interrupts immediately.
9072 */
0b609cc1 9073 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 9074 return;
9ee6e8bb
PB
9075 default:
9076 goto illegal_op;
9077 }
9078 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
9079 /* srs */
81465888
PM
9080 ARCH(6);
9081 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 9082 return;
ea825eee 9083 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 9084 /* rfe */
c67b6b71 9085 int32_t offset;
9ee6e8bb
PB
9086 if (IS_USER(s))
9087 goto illegal_op;
9088 ARCH(6);
9089 rn = (insn >> 16) & 0xf;
b0109805 9090 addr = load_reg(s, rn);
9ee6e8bb
PB
9091 i = (insn >> 23) & 3;
9092 switch (i) {
b0109805 9093 case 0: offset = -4; break; /* DA */
c67b6b71
FN
9094 case 1: offset = 0; break; /* IA */
9095 case 2: offset = -8; break; /* DB */
b0109805 9096 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
9097 default: abort();
9098 }
9099 if (offset)
b0109805
PB
9100 tcg_gen_addi_i32(addr, addr, offset);
9101 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 9102 tmp = tcg_temp_new_i32();
12dcc321 9103 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9104 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 9105 tmp2 = tcg_temp_new_i32();
12dcc321 9106 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9107 if (insn & (1 << 21)) {
9108 /* Base writeback. */
9109 switch (i) {
b0109805 9110 case 0: offset = -8; break;
c67b6b71
FN
9111 case 1: offset = 4; break;
9112 case 2: offset = -4; break;
b0109805 9113 case 3: offset = 0; break;
9ee6e8bb
PB
9114 default: abort();
9115 }
9116 if (offset)
b0109805
PB
9117 tcg_gen_addi_i32(addr, addr, offset);
9118 store_reg(s, rn, addr);
9119 } else {
7d1b0095 9120 tcg_temp_free_i32(addr);
9ee6e8bb 9121 }
b0109805 9122 gen_rfe(s, tmp, tmp2);
c67b6b71 9123 return;
9ee6e8bb
PB
9124 } else if ((insn & 0x0e000000) == 0x0a000000) {
9125 /* branch link and change to thumb (blx <offset>) */
9126 int32_t offset;
9127
9128 val = (uint32_t)s->pc;
7d1b0095 9129 tmp = tcg_temp_new_i32();
d9ba4830
PB
9130 tcg_gen_movi_i32(tmp, val);
9131 store_reg(s, 14, tmp);
9ee6e8bb
PB
9132 /* Sign-extend the 24-bit offset */
9133 offset = (((int32_t)insn) << 8) >> 8;
9134 /* offset * 4 + bit24 * 2 + (thumb bit) */
9135 val += (offset << 2) | ((insn >> 23) & 2) | 1;
9136 /* pipeline offset */
9137 val += 4;
be5e7a76 9138 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 9139 gen_bx_im(s, val);
9ee6e8bb
PB
9140 return;
9141 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 9142 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 9143 /* iWMMXt register transfer. */
c0f4af17 9144 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 9145 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 9146 return;
c0f4af17
PM
9147 }
9148 }
9ee6e8bb 9149 }
8b7209fa
RH
9150 } else if ((insn & 0x0e000a00) == 0x0c000800
9151 && arm_dc_feature(s, ARM_FEATURE_V8)) {
9152 if (disas_neon_insn_3same_ext(s, insn)) {
9153 goto illegal_op;
9154 }
9155 return;
638808ff
RH
9156 } else if ((insn & 0x0f000a00) == 0x0e000800
9157 && arm_dc_feature(s, ARM_FEATURE_V8)) {
9158 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
9159 goto illegal_op;
9160 }
9161 return;
9ee6e8bb
PB
9162 } else if ((insn & 0x0fe00000) == 0x0c400000) {
9163 /* Coprocessor double register transfer. */
be5e7a76 9164 ARCH(5TE);
9ee6e8bb
PB
9165 } else if ((insn & 0x0f000010) == 0x0e000010) {
9166 /* Additional coprocessor register transfer. */
7997d92f 9167 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
9168 uint32_t mask;
9169 uint32_t val;
9170 /* cps (privileged) */
9171 if (IS_USER(s))
9172 return;
9173 mask = val = 0;
9174 if (insn & (1 << 19)) {
9175 if (insn & (1 << 8))
9176 mask |= CPSR_A;
9177 if (insn & (1 << 7))
9178 mask |= CPSR_I;
9179 if (insn & (1 << 6))
9180 mask |= CPSR_F;
9181 if (insn & (1 << 18))
9182 val |= mask;
9183 }
7997d92f 9184 if (insn & (1 << 17)) {
9ee6e8bb
PB
9185 mask |= CPSR_M;
9186 val |= (insn & 0x1f);
9187 }
9188 if (mask) {
2fbac54b 9189 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
9190 }
9191 return;
9192 }
9193 goto illegal_op;
9194 }
9195 if (cond != 0xe) {
9196 /* if not always execute, we generate a conditional jump to
9197 next instruction */
c2d9644e 9198 arm_skip_unless(s, cond);
9ee6e8bb
PB
9199 }
9200 if ((insn & 0x0f900000) == 0x03000000) {
9201 if ((insn & (1 << 21)) == 0) {
9202 ARCH(6T2);
9203 rd = (insn >> 12) & 0xf;
9204 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
9205 if ((insn & (1 << 22)) == 0) {
9206 /* MOVW */
7d1b0095 9207 tmp = tcg_temp_new_i32();
5e3f878a 9208 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
9209 } else {
9210 /* MOVT */
5e3f878a 9211 tmp = load_reg(s, rd);
86831435 9212 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 9213 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 9214 }
5e3f878a 9215 store_reg(s, rd, tmp);
9ee6e8bb
PB
9216 } else {
9217 if (((insn >> 12) & 0xf) != 0xf)
9218 goto illegal_op;
9219 if (((insn >> 16) & 0xf) == 0) {
9220 gen_nop_hint(s, insn & 0xff);
9221 } else {
9222 /* CPSR = immediate */
9223 val = insn & 0xff;
9224 shift = ((insn >> 8) & 0xf) * 2;
9225 if (shift)
9226 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 9227 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
9228 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
9229 i, val)) {
9ee6e8bb 9230 goto illegal_op;
7dcc1f89 9231 }
9ee6e8bb
PB
9232 }
9233 }
9234 } else if ((insn & 0x0f900000) == 0x01000000
9235 && (insn & 0x00000090) != 0x00000090) {
9236 /* miscellaneous instructions */
9237 op1 = (insn >> 21) & 3;
9238 sh = (insn >> 4) & 0xf;
9239 rm = insn & 0xf;
9240 switch (sh) {
8bfd0550
PM
9241 case 0x0: /* MSR, MRS */
9242 if (insn & (1 << 9)) {
9243 /* MSR (banked) and MRS (banked) */
9244 int sysm = extract32(insn, 16, 4) |
9245 (extract32(insn, 8, 1) << 4);
9246 int r = extract32(insn, 22, 1);
9247
9248 if (op1 & 1) {
9249 /* MSR (banked) */
9250 gen_msr_banked(s, r, sysm, rm);
9251 } else {
9252 /* MRS (banked) */
9253 int rd = extract32(insn, 12, 4);
9254
9255 gen_mrs_banked(s, r, sysm, rd);
9256 }
9257 break;
9258 }
9259
9260 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
9261 if (op1 & 1) {
9262 /* PSR = reg */
2fbac54b 9263 tmp = load_reg(s, rm);
9ee6e8bb 9264 i = ((op1 & 2) != 0);
7dcc1f89 9265 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
9266 goto illegal_op;
9267 } else {
9268 /* reg = PSR */
9269 rd = (insn >> 12) & 0xf;
9270 if (op1 & 2) {
9271 if (IS_USER(s))
9272 goto illegal_op;
d9ba4830 9273 tmp = load_cpu_field(spsr);
9ee6e8bb 9274 } else {
7d1b0095 9275 tmp = tcg_temp_new_i32();
9ef39277 9276 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 9277 }
d9ba4830 9278 store_reg(s, rd, tmp);
9ee6e8bb
PB
9279 }
9280 break;
9281 case 0x1:
9282 if (op1 == 1) {
9283 /* branch/exchange thumb (bx). */
be5e7a76 9284 ARCH(4T);
d9ba4830
PB
9285 tmp = load_reg(s, rm);
9286 gen_bx(s, tmp);
9ee6e8bb
PB
9287 } else if (op1 == 3) {
9288 /* clz */
be5e7a76 9289 ARCH(5);
9ee6e8bb 9290 rd = (insn >> 12) & 0xf;
1497c961 9291 tmp = load_reg(s, rm);
7539a012 9292 tcg_gen_clzi_i32(tmp, tmp, 32);
1497c961 9293 store_reg(s, rd, tmp);
9ee6e8bb
PB
9294 } else {
9295 goto illegal_op;
9296 }
9297 break;
9298 case 0x2:
9299 if (op1 == 1) {
9300 ARCH(5J); /* bxj */
9301 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
9302 tmp = load_reg(s, rm);
9303 gen_bx(s, tmp);
9ee6e8bb
PB
9304 } else {
9305 goto illegal_op;
9306 }
9307 break;
9308 case 0x3:
9309 if (op1 != 1)
9310 goto illegal_op;
9311
be5e7a76 9312 ARCH(5);
9ee6e8bb 9313 /* branch link/exchange thumb (blx) */
d9ba4830 9314 tmp = load_reg(s, rm);
7d1b0095 9315 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
9316 tcg_gen_movi_i32(tmp2, s->pc);
9317 store_reg(s, 14, tmp2);
9318 gen_bx(s, tmp);
9ee6e8bb 9319 break;
eb0ecd5a
WN
9320 case 0x4:
9321 {
9322 /* crc32/crc32c */
9323 uint32_t c = extract32(insn, 8, 4);
9324
9325 /* Check this CPU supports ARMv8 CRC instructions.
9326 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
9327 * Bits 8, 10 and 11 should be zero.
9328 */
962fcbf2 9329 if (!dc_isar_feature(aa32_crc32, s) || op1 == 0x3 || (c & 0xd) != 0) {
eb0ecd5a
WN
9330 goto illegal_op;
9331 }
9332
9333 rn = extract32(insn, 16, 4);
9334 rd = extract32(insn, 12, 4);
9335
9336 tmp = load_reg(s, rn);
9337 tmp2 = load_reg(s, rm);
aa633469
PM
9338 if (op1 == 0) {
9339 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
9340 } else if (op1 == 1) {
9341 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
9342 }
eb0ecd5a
WN
9343 tmp3 = tcg_const_i32(1 << op1);
9344 if (c & 0x2) {
9345 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
9346 } else {
9347 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
9348 }
9349 tcg_temp_free_i32(tmp2);
9350 tcg_temp_free_i32(tmp3);
9351 store_reg(s, rd, tmp);
9352 break;
9353 }
9ee6e8bb 9354 case 0x5: /* saturating add/subtract */
be5e7a76 9355 ARCH(5TE);
9ee6e8bb
PB
9356 rd = (insn >> 12) & 0xf;
9357 rn = (insn >> 16) & 0xf;
b40d0353 9358 tmp = load_reg(s, rm);
5e3f878a 9359 tmp2 = load_reg(s, rn);
9ee6e8bb 9360 if (op1 & 2)
9ef39277 9361 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 9362 if (op1 & 1)
9ef39277 9363 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9364 else
9ef39277 9365 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 9366 tcg_temp_free_i32(tmp2);
5e3f878a 9367 store_reg(s, rd, tmp);
9ee6e8bb 9368 break;
55c544ed
PM
9369 case 0x6: /* ERET */
9370 if (op1 != 3) {
9371 goto illegal_op;
9372 }
9373 if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
9374 goto illegal_op;
9375 }
9376 if ((insn & 0x000fff0f) != 0x0000000e) {
9377 /* UNPREDICTABLE; we choose to UNDEF */
9378 goto illegal_op;
9379 }
9380
9381 if (s->current_el == 2) {
9382 tmp = load_cpu_field(elr_el[2]);
9383 } else {
9384 tmp = load_reg(s, 14);
9385 }
9386 gen_exception_return(s, tmp);
9387 break;
49e14940 9388 case 7:
d4a2dc67
PM
9389 {
9390 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e 9391 switch (op1) {
19a6e31c
PM
9392 case 0:
9393 /* HLT */
9394 gen_hlt(s, imm16);
9395 break;
37e6456e
PM
9396 case 1:
9397 /* bkpt */
9398 ARCH(5);
c900a2e6 9399 gen_exception_bkpt_insn(s, 4, syn_aa32_bkpt(imm16, false));
37e6456e
PM
9400 break;
9401 case 2:
9402 /* Hypervisor call (v7) */
9403 ARCH(7);
9404 if (IS_USER(s)) {
9405 goto illegal_op;
9406 }
9407 gen_hvc(s, imm16);
9408 break;
9409 case 3:
9410 /* Secure monitor call (v6+) */
9411 ARCH(6K);
9412 if (IS_USER(s)) {
9413 goto illegal_op;
9414 }
9415 gen_smc(s);
9416 break;
9417 default:
19a6e31c 9418 g_assert_not_reached();
49e14940 9419 }
9ee6e8bb 9420 break;
d4a2dc67 9421 }
9ee6e8bb
PB
9422 case 0x8: /* signed multiply */
9423 case 0xa:
9424 case 0xc:
9425 case 0xe:
be5e7a76 9426 ARCH(5TE);
9ee6e8bb
PB
9427 rs = (insn >> 8) & 0xf;
9428 rn = (insn >> 12) & 0xf;
9429 rd = (insn >> 16) & 0xf;
9430 if (op1 == 1) {
9431 /* (32 * 16) >> 16 */
5e3f878a
PB
9432 tmp = load_reg(s, rm);
9433 tmp2 = load_reg(s, rs);
9ee6e8bb 9434 if (sh & 4)
5e3f878a 9435 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 9436 else
5e3f878a 9437 gen_sxth(tmp2);
a7812ae4
PB
9438 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9439 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 9440 tmp = tcg_temp_new_i32();
ecc7b3aa 9441 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 9442 tcg_temp_free_i64(tmp64);
9ee6e8bb 9443 if ((sh & 2) == 0) {
5e3f878a 9444 tmp2 = load_reg(s, rn);
9ef39277 9445 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9446 tcg_temp_free_i32(tmp2);
9ee6e8bb 9447 }
5e3f878a 9448 store_reg(s, rd, tmp);
9ee6e8bb
PB
9449 } else {
9450 /* 16 * 16 */
5e3f878a
PB
9451 tmp = load_reg(s, rm);
9452 tmp2 = load_reg(s, rs);
9453 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 9454 tcg_temp_free_i32(tmp2);
9ee6e8bb 9455 if (op1 == 2) {
a7812ae4
PB
9456 tmp64 = tcg_temp_new_i64();
9457 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9458 tcg_temp_free_i32(tmp);
a7812ae4
PB
9459 gen_addq(s, tmp64, rn, rd);
9460 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 9461 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9462 } else {
9463 if (op1 == 0) {
5e3f878a 9464 tmp2 = load_reg(s, rn);
9ef39277 9465 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9466 tcg_temp_free_i32(tmp2);
9ee6e8bb 9467 }
5e3f878a 9468 store_reg(s, rd, tmp);
9ee6e8bb
PB
9469 }
9470 }
9471 break;
9472 default:
9473 goto illegal_op;
9474 }
9475 } else if (((insn & 0x0e000000) == 0 &&
9476 (insn & 0x00000090) != 0x90) ||
9477 ((insn & 0x0e000000) == (1 << 25))) {
9478 int set_cc, logic_cc, shiftop;
9479
9480 op1 = (insn >> 21) & 0xf;
9481 set_cc = (insn >> 20) & 1;
9482 logic_cc = table_logic_cc[op1] & set_cc;
9483
9484 /* data processing instruction */
9485 if (insn & (1 << 25)) {
9486 /* immediate operand */
9487 val = insn & 0xff;
9488 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 9489 if (shift) {
9ee6e8bb 9490 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 9491 }
7d1b0095 9492 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
9493 tcg_gen_movi_i32(tmp2, val);
9494 if (logic_cc && shift) {
9495 gen_set_CF_bit31(tmp2);
9496 }
9ee6e8bb
PB
9497 } else {
9498 /* register */
9499 rm = (insn) & 0xf;
e9bb4aa9 9500 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9501 shiftop = (insn >> 5) & 3;
9502 if (!(insn & (1 << 4))) {
9503 shift = (insn >> 7) & 0x1f;
e9bb4aa9 9504 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
9505 } else {
9506 rs = (insn >> 8) & 0xf;
8984bd2e 9507 tmp = load_reg(s, rs);
e9bb4aa9 9508 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
9509 }
9510 }
9511 if (op1 != 0x0f && op1 != 0x0d) {
9512 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
9513 tmp = load_reg(s, rn);
9514 } else {
f764718d 9515 tmp = NULL;
9ee6e8bb
PB
9516 }
9517 rd = (insn >> 12) & 0xf;
9518 switch(op1) {
9519 case 0x00:
e9bb4aa9
JR
9520 tcg_gen_and_i32(tmp, tmp, tmp2);
9521 if (logic_cc) {
9522 gen_logic_CC(tmp);
9523 }
7dcc1f89 9524 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9525 break;
9526 case 0x01:
e9bb4aa9
JR
9527 tcg_gen_xor_i32(tmp, tmp, tmp2);
9528 if (logic_cc) {
9529 gen_logic_CC(tmp);
9530 }
7dcc1f89 9531 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9532 break;
9533 case 0x02:
9534 if (set_cc && rd == 15) {
9535 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 9536 if (IS_USER(s)) {
9ee6e8bb 9537 goto illegal_op;
e9bb4aa9 9538 }
72485ec4 9539 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 9540 gen_exception_return(s, tmp);
9ee6e8bb 9541 } else {
e9bb4aa9 9542 if (set_cc) {
72485ec4 9543 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9544 } else {
9545 tcg_gen_sub_i32(tmp, tmp, tmp2);
9546 }
7dcc1f89 9547 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9548 }
9549 break;
9550 case 0x03:
e9bb4aa9 9551 if (set_cc) {
72485ec4 9552 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
9553 } else {
9554 tcg_gen_sub_i32(tmp, tmp2, tmp);
9555 }
7dcc1f89 9556 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9557 break;
9558 case 0x04:
e9bb4aa9 9559 if (set_cc) {
72485ec4 9560 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9561 } else {
9562 tcg_gen_add_i32(tmp, tmp, tmp2);
9563 }
7dcc1f89 9564 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9565 break;
9566 case 0x05:
e9bb4aa9 9567 if (set_cc) {
49b4c31e 9568 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9569 } else {
9570 gen_add_carry(tmp, tmp, tmp2);
9571 }
7dcc1f89 9572 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9573 break;
9574 case 0x06:
e9bb4aa9 9575 if (set_cc) {
2de68a49 9576 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
9577 } else {
9578 gen_sub_carry(tmp, tmp, tmp2);
9579 }
7dcc1f89 9580 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9581 break;
9582 case 0x07:
e9bb4aa9 9583 if (set_cc) {
2de68a49 9584 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
9585 } else {
9586 gen_sub_carry(tmp, tmp2, tmp);
9587 }
7dcc1f89 9588 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9589 break;
9590 case 0x08:
9591 if (set_cc) {
e9bb4aa9
JR
9592 tcg_gen_and_i32(tmp, tmp, tmp2);
9593 gen_logic_CC(tmp);
9ee6e8bb 9594 }
7d1b0095 9595 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9596 break;
9597 case 0x09:
9598 if (set_cc) {
e9bb4aa9
JR
9599 tcg_gen_xor_i32(tmp, tmp, tmp2);
9600 gen_logic_CC(tmp);
9ee6e8bb 9601 }
7d1b0095 9602 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9603 break;
9604 case 0x0a:
9605 if (set_cc) {
72485ec4 9606 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 9607 }
7d1b0095 9608 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9609 break;
9610 case 0x0b:
9611 if (set_cc) {
72485ec4 9612 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 9613 }
7d1b0095 9614 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9615 break;
9616 case 0x0c:
e9bb4aa9
JR
9617 tcg_gen_or_i32(tmp, tmp, tmp2);
9618 if (logic_cc) {
9619 gen_logic_CC(tmp);
9620 }
7dcc1f89 9621 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9622 break;
9623 case 0x0d:
9624 if (logic_cc && rd == 15) {
9625 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 9626 if (IS_USER(s)) {
9ee6e8bb 9627 goto illegal_op;
e9bb4aa9
JR
9628 }
9629 gen_exception_return(s, tmp2);
9ee6e8bb 9630 } else {
e9bb4aa9
JR
9631 if (logic_cc) {
9632 gen_logic_CC(tmp2);
9633 }
7dcc1f89 9634 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
9635 }
9636 break;
9637 case 0x0e:
f669df27 9638 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
9639 if (logic_cc) {
9640 gen_logic_CC(tmp);
9641 }
7dcc1f89 9642 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9643 break;
9644 default:
9645 case 0x0f:
e9bb4aa9
JR
9646 tcg_gen_not_i32(tmp2, tmp2);
9647 if (logic_cc) {
9648 gen_logic_CC(tmp2);
9649 }
7dcc1f89 9650 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
9651 break;
9652 }
e9bb4aa9 9653 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 9654 tcg_temp_free_i32(tmp2);
e9bb4aa9 9655 }
9ee6e8bb
PB
9656 } else {
9657 /* other instructions */
9658 op1 = (insn >> 24) & 0xf;
9659 switch(op1) {
9660 case 0x0:
9661 case 0x1:
9662 /* multiplies, extra load/stores */
9663 sh = (insn >> 5) & 3;
9664 if (sh == 0) {
9665 if (op1 == 0x0) {
9666 rd = (insn >> 16) & 0xf;
9667 rn = (insn >> 12) & 0xf;
9668 rs = (insn >> 8) & 0xf;
9669 rm = (insn) & 0xf;
9670 op1 = (insn >> 20) & 0xf;
9671 switch (op1) {
9672 case 0: case 1: case 2: case 3: case 6:
9673 /* 32 bit mul */
5e3f878a
PB
9674 tmp = load_reg(s, rs);
9675 tmp2 = load_reg(s, rm);
9676 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 9677 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9678 if (insn & (1 << 22)) {
9679 /* Subtract (mls) */
9680 ARCH(6T2);
5e3f878a
PB
9681 tmp2 = load_reg(s, rn);
9682 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 9683 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9684 } else if (insn & (1 << 21)) {
9685 /* Add */
5e3f878a
PB
9686 tmp2 = load_reg(s, rn);
9687 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9688 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9689 }
9690 if (insn & (1 << 20))
5e3f878a
PB
9691 gen_logic_CC(tmp);
9692 store_reg(s, rd, tmp);
9ee6e8bb 9693 break;
8aac08b1
AJ
9694 case 4:
9695 /* 64 bit mul double accumulate (UMAAL) */
9696 ARCH(6);
9697 tmp = load_reg(s, rs);
9698 tmp2 = load_reg(s, rm);
9699 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9700 gen_addq_lo(s, tmp64, rn);
9701 gen_addq_lo(s, tmp64, rd);
9702 gen_storeq_reg(s, rn, rd, tmp64);
9703 tcg_temp_free_i64(tmp64);
9704 break;
9705 case 8: case 9: case 10: case 11:
9706 case 12: case 13: case 14: case 15:
9707 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
9708 tmp = load_reg(s, rs);
9709 tmp2 = load_reg(s, rm);
8aac08b1 9710 if (insn & (1 << 22)) {
c9f10124 9711 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 9712 } else {
c9f10124 9713 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
9714 }
9715 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
9716 TCGv_i32 al = load_reg(s, rn);
9717 TCGv_i32 ah = load_reg(s, rd);
c9f10124 9718 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
9719 tcg_temp_free_i32(al);
9720 tcg_temp_free_i32(ah);
9ee6e8bb 9721 }
8aac08b1 9722 if (insn & (1 << 20)) {
c9f10124 9723 gen_logicq_cc(tmp, tmp2);
8aac08b1 9724 }
c9f10124
RH
9725 store_reg(s, rn, tmp);
9726 store_reg(s, rd, tmp2);
9ee6e8bb 9727 break;
8aac08b1
AJ
9728 default:
9729 goto illegal_op;
9ee6e8bb
PB
9730 }
9731 } else {
9732 rn = (insn >> 16) & 0xf;
9733 rd = (insn >> 12) & 0xf;
9734 if (insn & (1 << 23)) {
9735 /* load/store exclusive */
96c55295
PM
9736 bool is_ld = extract32(insn, 20, 1);
9737 bool is_lasr = !extract32(insn, 8, 1);
2359bf80 9738 int op2 = (insn >> 8) & 3;
86753403 9739 op1 = (insn >> 21) & 0x3;
2359bf80
MR
9740
9741 switch (op2) {
9742 case 0: /* lda/stl */
9743 if (op1 == 1) {
9744 goto illegal_op;
9745 }
9746 ARCH(8);
9747 break;
9748 case 1: /* reserved */
9749 goto illegal_op;
9750 case 2: /* ldaex/stlex */
9751 ARCH(8);
9752 break;
9753 case 3: /* ldrex/strex */
9754 if (op1) {
9755 ARCH(6K);
9756 } else {
9757 ARCH(6);
9758 }
9759 break;
9760 }
9761
3174f8e9 9762 addr = tcg_temp_local_new_i32();
98a46317 9763 load_reg_var(s, addr, rn);
2359bf80 9764
96c55295
PM
9765 if (is_lasr && !is_ld) {
9766 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
9767 }
9768
2359bf80 9769 if (op2 == 0) {
96c55295 9770 if (is_ld) {
2359bf80
MR
9771 tmp = tcg_temp_new_i32();
9772 switch (op1) {
9773 case 0: /* lda */
9bb6558a
PM
9774 gen_aa32_ld32u_iss(s, tmp, addr,
9775 get_mem_index(s),
9776 rd | ISSIsAcqRel);
2359bf80
MR
9777 break;
9778 case 2: /* ldab */
9bb6558a
PM
9779 gen_aa32_ld8u_iss(s, tmp, addr,
9780 get_mem_index(s),
9781 rd | ISSIsAcqRel);
2359bf80
MR
9782 break;
9783 case 3: /* ldah */
9bb6558a
PM
9784 gen_aa32_ld16u_iss(s, tmp, addr,
9785 get_mem_index(s),
9786 rd | ISSIsAcqRel);
2359bf80
MR
9787 break;
9788 default:
9789 abort();
9790 }
9791 store_reg(s, rd, tmp);
9792 } else {
9793 rm = insn & 0xf;
9794 tmp = load_reg(s, rm);
9795 switch (op1) {
9796 case 0: /* stl */
9bb6558a
PM
9797 gen_aa32_st32_iss(s, tmp, addr,
9798 get_mem_index(s),
9799 rm | ISSIsAcqRel);
2359bf80
MR
9800 break;
9801 case 2: /* stlb */
9bb6558a
PM
9802 gen_aa32_st8_iss(s, tmp, addr,
9803 get_mem_index(s),
9804 rm | ISSIsAcqRel);
2359bf80
MR
9805 break;
9806 case 3: /* stlh */
9bb6558a
PM
9807 gen_aa32_st16_iss(s, tmp, addr,
9808 get_mem_index(s),
9809 rm | ISSIsAcqRel);
2359bf80
MR
9810 break;
9811 default:
9812 abort();
9813 }
9814 tcg_temp_free_i32(tmp);
9815 }
96c55295 9816 } else if (is_ld) {
86753403
PB
9817 switch (op1) {
9818 case 0: /* ldrex */
426f5abc 9819 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
9820 break;
9821 case 1: /* ldrexd */
426f5abc 9822 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
9823 break;
9824 case 2: /* ldrexb */
426f5abc 9825 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
9826 break;
9827 case 3: /* ldrexh */
426f5abc 9828 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
9829 break;
9830 default:
9831 abort();
9832 }
9ee6e8bb
PB
9833 } else {
9834 rm = insn & 0xf;
86753403
PB
9835 switch (op1) {
9836 case 0: /* strex */
426f5abc 9837 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
9838 break;
9839 case 1: /* strexd */
502e64fe 9840 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
9841 break;
9842 case 2: /* strexb */
426f5abc 9843 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
9844 break;
9845 case 3: /* strexh */
426f5abc 9846 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
9847 break;
9848 default:
9849 abort();
9850 }
9ee6e8bb 9851 }
39d5492a 9852 tcg_temp_free_i32(addr);
96c55295
PM
9853
9854 if (is_lasr && is_ld) {
9855 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
9856 }
c4869ca6
OS
9857 } else if ((insn & 0x00300f00) == 0) {
9858 /* 0bcccc_0001_0x00_xxxx_xxxx_0000_1001_xxxx
9859 * - SWP, SWPB
9860 */
9861
cf12bce0
EC
9862 TCGv taddr;
9863 TCGMemOp opc = s->be_data;
9864
9ee6e8bb
PB
9865 rm = (insn) & 0xf;
9866
9ee6e8bb 9867 if (insn & (1 << 22)) {
cf12bce0 9868 opc |= MO_UB;
9ee6e8bb 9869 } else {
cf12bce0 9870 opc |= MO_UL | MO_ALIGN;
9ee6e8bb 9871 }
cf12bce0
EC
9872
9873 addr = load_reg(s, rn);
9874 taddr = gen_aa32_addr(s, addr, opc);
7d1b0095 9875 tcg_temp_free_i32(addr);
cf12bce0
EC
9876
9877 tmp = load_reg(s, rm);
9878 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
9879 get_mem_index(s), opc);
9880 tcg_temp_free(taddr);
9881 store_reg(s, rd, tmp);
c4869ca6
OS
9882 } else {
9883 goto illegal_op;
9ee6e8bb
PB
9884 }
9885 }
9886 } else {
9887 int address_offset;
3960c336 9888 bool load = insn & (1 << 20);
63f26fcf
PM
9889 bool wbit = insn & (1 << 21);
9890 bool pbit = insn & (1 << 24);
3960c336 9891 bool doubleword = false;
9bb6558a
PM
9892 ISSInfo issinfo;
9893
9ee6e8bb
PB
9894 /* Misc load/store */
9895 rn = (insn >> 16) & 0xf;
9896 rd = (insn >> 12) & 0xf;
3960c336 9897
9bb6558a
PM
9898 /* ISS not valid if writeback */
9899 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
9900
3960c336
PM
9901 if (!load && (sh & 2)) {
9902 /* doubleword */
9903 ARCH(5TE);
9904 if (rd & 1) {
9905 /* UNPREDICTABLE; we choose to UNDEF */
9906 goto illegal_op;
9907 }
9908 load = (sh & 1) == 0;
9909 doubleword = true;
9910 }
9911
b0109805 9912 addr = load_reg(s, rn);
63f26fcf 9913 if (pbit) {
b0109805 9914 gen_add_datah_offset(s, insn, 0, addr);
63f26fcf 9915 }
9ee6e8bb 9916 address_offset = 0;
3960c336
PM
9917
9918 if (doubleword) {
9919 if (!load) {
9ee6e8bb 9920 /* store */
b0109805 9921 tmp = load_reg(s, rd);
12dcc321 9922 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9923 tcg_temp_free_i32(tmp);
b0109805
PB
9924 tcg_gen_addi_i32(addr, addr, 4);
9925 tmp = load_reg(s, rd + 1);
12dcc321 9926 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9927 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9928 } else {
9929 /* load */
5a839c0d 9930 tmp = tcg_temp_new_i32();
12dcc321 9931 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9932 store_reg(s, rd, tmp);
9933 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 9934 tmp = tcg_temp_new_i32();
12dcc321 9935 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9936 rd++;
9ee6e8bb
PB
9937 }
9938 address_offset = -4;
3960c336
PM
9939 } else if (load) {
9940 /* load */
9941 tmp = tcg_temp_new_i32();
9942 switch (sh) {
9943 case 1:
9bb6558a
PM
9944 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9945 issinfo);
3960c336
PM
9946 break;
9947 case 2:
9bb6558a
PM
9948 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
9949 issinfo);
3960c336
PM
9950 break;
9951 default:
9952 case 3:
9bb6558a
PM
9953 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
9954 issinfo);
3960c336
PM
9955 break;
9956 }
9ee6e8bb
PB
9957 } else {
9958 /* store */
b0109805 9959 tmp = load_reg(s, rd);
9bb6558a 9960 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
5a839c0d 9961 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9962 }
9963 /* Perform base writeback before the loaded value to
9964 ensure correct behavior with overlapping index registers.
b6af0975 9965 ldrd with base writeback is undefined if the
9ee6e8bb 9966 destination and index registers overlap. */
63f26fcf 9967 if (!pbit) {
b0109805
PB
9968 gen_add_datah_offset(s, insn, address_offset, addr);
9969 store_reg(s, rn, addr);
63f26fcf 9970 } else if (wbit) {
9ee6e8bb 9971 if (address_offset)
b0109805
PB
9972 tcg_gen_addi_i32(addr, addr, address_offset);
9973 store_reg(s, rn, addr);
9974 } else {
7d1b0095 9975 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9976 }
9977 if (load) {
9978 /* Complete the load. */
b0109805 9979 store_reg(s, rd, tmp);
9ee6e8bb
PB
9980 }
9981 }
9982 break;
9983 case 0x4:
9984 case 0x5:
9985 goto do_ldst;
9986 case 0x6:
9987 case 0x7:
9988 if (insn & (1 << 4)) {
9989 ARCH(6);
9990 /* Armv6 Media instructions. */
9991 rm = insn & 0xf;
9992 rn = (insn >> 16) & 0xf;
2c0262af 9993 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
9994 rs = (insn >> 8) & 0xf;
9995 switch ((insn >> 23) & 3) {
9996 case 0: /* Parallel add/subtract. */
9997 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
9998 tmp = load_reg(s, rn);
9999 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10000 sh = (insn >> 5) & 7;
10001 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
10002 goto illegal_op;
6ddbc6e4 10003 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 10004 tcg_temp_free_i32(tmp2);
6ddbc6e4 10005 store_reg(s, rd, tmp);
9ee6e8bb
PB
10006 break;
10007 case 1:
10008 if ((insn & 0x00700020) == 0) {
6c95676b 10009 /* Halfword pack. */
3670669c
PB
10010 tmp = load_reg(s, rn);
10011 tmp2 = load_reg(s, rm);
9ee6e8bb 10012 shift = (insn >> 7) & 0x1f;
3670669c
PB
10013 if (insn & (1 << 6)) {
10014 /* pkhtb */
22478e79
AZ
10015 if (shift == 0)
10016 shift = 31;
10017 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 10018 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 10019 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
10020 } else {
10021 /* pkhbt */
22478e79
AZ
10022 if (shift)
10023 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 10024 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
10025 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
10026 }
10027 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 10028 tcg_temp_free_i32(tmp2);
3670669c 10029 store_reg(s, rd, tmp);
9ee6e8bb
PB
10030 } else if ((insn & 0x00200020) == 0x00200000) {
10031 /* [us]sat */
6ddbc6e4 10032 tmp = load_reg(s, rm);
9ee6e8bb
PB
10033 shift = (insn >> 7) & 0x1f;
10034 if (insn & (1 << 6)) {
10035 if (shift == 0)
10036 shift = 31;
6ddbc6e4 10037 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 10038 } else {
6ddbc6e4 10039 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
10040 }
10041 sh = (insn >> 16) & 0x1f;
40d3c433
CL
10042 tmp2 = tcg_const_i32(sh);
10043 if (insn & (1 << 22))
9ef39277 10044 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 10045 else
9ef39277 10046 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 10047 tcg_temp_free_i32(tmp2);
6ddbc6e4 10048 store_reg(s, rd, tmp);
9ee6e8bb
PB
10049 } else if ((insn & 0x00300fe0) == 0x00200f20) {
10050 /* [us]sat16 */
6ddbc6e4 10051 tmp = load_reg(s, rm);
9ee6e8bb 10052 sh = (insn >> 16) & 0x1f;
40d3c433
CL
10053 tmp2 = tcg_const_i32(sh);
10054 if (insn & (1 << 22))
9ef39277 10055 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 10056 else
9ef39277 10057 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 10058 tcg_temp_free_i32(tmp2);
6ddbc6e4 10059 store_reg(s, rd, tmp);
9ee6e8bb
PB
10060 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
10061 /* Select bytes. */
6ddbc6e4
PB
10062 tmp = load_reg(s, rn);
10063 tmp2 = load_reg(s, rm);
7d1b0095 10064 tmp3 = tcg_temp_new_i32();
0ecb72a5 10065 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 10066 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10067 tcg_temp_free_i32(tmp3);
10068 tcg_temp_free_i32(tmp2);
6ddbc6e4 10069 store_reg(s, rd, tmp);
9ee6e8bb 10070 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 10071 tmp = load_reg(s, rm);
9ee6e8bb 10072 shift = (insn >> 10) & 3;
1301f322 10073 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
10074 rotate, a shift is sufficient. */
10075 if (shift != 0)
f669df27 10076 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
10077 op1 = (insn >> 20) & 7;
10078 switch (op1) {
5e3f878a
PB
10079 case 0: gen_sxtb16(tmp); break;
10080 case 2: gen_sxtb(tmp); break;
10081 case 3: gen_sxth(tmp); break;
10082 case 4: gen_uxtb16(tmp); break;
10083 case 6: gen_uxtb(tmp); break;
10084 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
10085 default: goto illegal_op;
10086 }
10087 if (rn != 15) {
5e3f878a 10088 tmp2 = load_reg(s, rn);
9ee6e8bb 10089 if ((op1 & 3) == 0) {
5e3f878a 10090 gen_add16(tmp, tmp2);
9ee6e8bb 10091 } else {
5e3f878a 10092 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10093 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10094 }
10095 }
6c95676b 10096 store_reg(s, rd, tmp);
9ee6e8bb
PB
10097 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
10098 /* rev */
b0109805 10099 tmp = load_reg(s, rm);
9ee6e8bb
PB
10100 if (insn & (1 << 22)) {
10101 if (insn & (1 << 7)) {
b0109805 10102 gen_revsh(tmp);
9ee6e8bb
PB
10103 } else {
10104 ARCH(6T2);
b0109805 10105 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
10106 }
10107 } else {
10108 if (insn & (1 << 7))
b0109805 10109 gen_rev16(tmp);
9ee6e8bb 10110 else
66896cb8 10111 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 10112 }
b0109805 10113 store_reg(s, rd, tmp);
9ee6e8bb
PB
10114 } else {
10115 goto illegal_op;
10116 }
10117 break;
10118 case 2: /* Multiplies (Type 3). */
41e9564d
PM
10119 switch ((insn >> 20) & 0x7) {
10120 case 5:
10121 if (((insn >> 6) ^ (insn >> 7)) & 1) {
10122 /* op2 not 00x or 11x : UNDEF */
10123 goto illegal_op;
10124 }
838fa72d
AJ
10125 /* Signed multiply most significant [accumulate].
10126 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
10127 tmp = load_reg(s, rm);
10128 tmp2 = load_reg(s, rs);
a7812ae4 10129 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 10130
955a7dd5 10131 if (rd != 15) {
838fa72d 10132 tmp = load_reg(s, rd);
9ee6e8bb 10133 if (insn & (1 << 6)) {
838fa72d 10134 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 10135 } else {
838fa72d 10136 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
10137 }
10138 }
838fa72d
AJ
10139 if (insn & (1 << 5)) {
10140 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10141 }
10142 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 10143 tmp = tcg_temp_new_i32();
ecc7b3aa 10144 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 10145 tcg_temp_free_i64(tmp64);
955a7dd5 10146 store_reg(s, rn, tmp);
41e9564d
PM
10147 break;
10148 case 0:
10149 case 4:
10150 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
10151 if (insn & (1 << 7)) {
10152 goto illegal_op;
10153 }
10154 tmp = load_reg(s, rm);
10155 tmp2 = load_reg(s, rs);
9ee6e8bb 10156 if (insn & (1 << 5))
5e3f878a
PB
10157 gen_swap_half(tmp2);
10158 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10159 if (insn & (1 << 22)) {
5e3f878a 10160 /* smlald, smlsld */
33bbd75a
PC
10161 TCGv_i64 tmp64_2;
10162
a7812ae4 10163 tmp64 = tcg_temp_new_i64();
33bbd75a 10164 tmp64_2 = tcg_temp_new_i64();
a7812ae4 10165 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 10166 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 10167 tcg_temp_free_i32(tmp);
33bbd75a
PC
10168 tcg_temp_free_i32(tmp2);
10169 if (insn & (1 << 6)) {
10170 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
10171 } else {
10172 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
10173 }
10174 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
10175 gen_addq(s, tmp64, rd, rn);
10176 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 10177 tcg_temp_free_i64(tmp64);
9ee6e8bb 10178 } else {
5e3f878a 10179 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
10180 if (insn & (1 << 6)) {
10181 /* This subtraction cannot overflow. */
10182 tcg_gen_sub_i32(tmp, tmp, tmp2);
10183 } else {
10184 /* This addition cannot overflow 32 bits;
10185 * however it may overflow considered as a
10186 * signed operation, in which case we must set
10187 * the Q flag.
10188 */
10189 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
10190 }
10191 tcg_temp_free_i32(tmp2);
22478e79 10192 if (rd != 15)
9ee6e8bb 10193 {
22478e79 10194 tmp2 = load_reg(s, rd);
9ef39277 10195 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10196 tcg_temp_free_i32(tmp2);
9ee6e8bb 10197 }
22478e79 10198 store_reg(s, rn, tmp);
9ee6e8bb 10199 }
41e9564d 10200 break;
b8b8ea05
PM
10201 case 1:
10202 case 3:
10203 /* SDIV, UDIV */
7e0cf8b4 10204 if (!dc_isar_feature(arm_div, s)) {
b8b8ea05
PM
10205 goto illegal_op;
10206 }
10207 if (((insn >> 5) & 7) || (rd != 15)) {
10208 goto illegal_op;
10209 }
10210 tmp = load_reg(s, rm);
10211 tmp2 = load_reg(s, rs);
10212 if (insn & (1 << 21)) {
10213 gen_helper_udiv(tmp, tmp, tmp2);
10214 } else {
10215 gen_helper_sdiv(tmp, tmp, tmp2);
10216 }
10217 tcg_temp_free_i32(tmp2);
10218 store_reg(s, rn, tmp);
10219 break;
41e9564d
PM
10220 default:
10221 goto illegal_op;
9ee6e8bb
PB
10222 }
10223 break;
10224 case 3:
10225 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
10226 switch (op1) {
10227 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
10228 ARCH(6);
10229 tmp = load_reg(s, rm);
10230 tmp2 = load_reg(s, rs);
10231 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10232 tcg_temp_free_i32(tmp2);
ded9d295
AZ
10233 if (rd != 15) {
10234 tmp2 = load_reg(s, rd);
6ddbc6e4 10235 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10236 tcg_temp_free_i32(tmp2);
9ee6e8bb 10237 }
ded9d295 10238 store_reg(s, rn, tmp);
9ee6e8bb
PB
10239 break;
10240 case 0x20: case 0x24: case 0x28: case 0x2c:
10241 /* Bitfield insert/clear. */
10242 ARCH(6T2);
10243 shift = (insn >> 7) & 0x1f;
10244 i = (insn >> 16) & 0x1f;
45140a57
KB
10245 if (i < shift) {
10246 /* UNPREDICTABLE; we choose to UNDEF */
10247 goto illegal_op;
10248 }
9ee6e8bb
PB
10249 i = i + 1 - shift;
10250 if (rm == 15) {
7d1b0095 10251 tmp = tcg_temp_new_i32();
5e3f878a 10252 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 10253 } else {
5e3f878a 10254 tmp = load_reg(s, rm);
9ee6e8bb
PB
10255 }
10256 if (i != 32) {
5e3f878a 10257 tmp2 = load_reg(s, rd);
d593c48e 10258 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 10259 tcg_temp_free_i32(tmp2);
9ee6e8bb 10260 }
5e3f878a 10261 store_reg(s, rd, tmp);
9ee6e8bb
PB
10262 break;
10263 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
10264 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 10265 ARCH(6T2);
5e3f878a 10266 tmp = load_reg(s, rm);
9ee6e8bb
PB
10267 shift = (insn >> 7) & 0x1f;
10268 i = ((insn >> 16) & 0x1f) + 1;
10269 if (shift + i > 32)
10270 goto illegal_op;
10271 if (i < 32) {
10272 if (op1 & 0x20) {
59a71b4c 10273 tcg_gen_extract_i32(tmp, tmp, shift, i);
9ee6e8bb 10274 } else {
59a71b4c 10275 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9ee6e8bb
PB
10276 }
10277 }
5e3f878a 10278 store_reg(s, rd, tmp);
9ee6e8bb
PB
10279 break;
10280 default:
10281 goto illegal_op;
10282 }
10283 break;
10284 }
10285 break;
10286 }
10287 do_ldst:
10288 /* Check for undefined extension instructions
10289 * per the ARM Bible IE:
10290 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
10291 */
10292 sh = (0xf << 20) | (0xf << 4);
10293 if (op1 == 0x7 && ((insn & sh) == sh))
10294 {
10295 goto illegal_op;
10296 }
10297 /* load/store byte/word */
10298 rn = (insn >> 16) & 0xf;
10299 rd = (insn >> 12) & 0xf;
b0109805 10300 tmp2 = load_reg(s, rn);
a99caa48
PM
10301 if ((insn & 0x01200000) == 0x00200000) {
10302 /* ldrt/strt */
579d21cc 10303 i = get_a32_user_mem_index(s);
a99caa48
PM
10304 } else {
10305 i = get_mem_index(s);
10306 }
9ee6e8bb 10307 if (insn & (1 << 24))
b0109805 10308 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
10309 if (insn & (1 << 20)) {
10310 /* load */
5a839c0d 10311 tmp = tcg_temp_new_i32();
9ee6e8bb 10312 if (insn & (1 << 22)) {
9bb6558a 10313 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 10314 } else {
9bb6558a 10315 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 10316 }
9ee6e8bb
PB
10317 } else {
10318 /* store */
b0109805 10319 tmp = load_reg(s, rd);
5a839c0d 10320 if (insn & (1 << 22)) {
9bb6558a 10321 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
5a839c0d 10322 } else {
9bb6558a 10323 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
5a839c0d
PM
10324 }
10325 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10326 }
10327 if (!(insn & (1 << 24))) {
b0109805
PB
10328 gen_add_data_offset(s, insn, tmp2);
10329 store_reg(s, rn, tmp2);
10330 } else if (insn & (1 << 21)) {
10331 store_reg(s, rn, tmp2);
10332 } else {
7d1b0095 10333 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10334 }
10335 if (insn & (1 << 20)) {
10336 /* Complete the load. */
7dcc1f89 10337 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
10338 }
10339 break;
10340 case 0x08:
10341 case 0x09:
10342 {
da3e53dd
PM
10343 int j, n, loaded_base;
10344 bool exc_return = false;
10345 bool is_load = extract32(insn, 20, 1);
10346 bool user = false;
39d5492a 10347 TCGv_i32 loaded_var;
9ee6e8bb
PB
10348 /* load/store multiple words */
10349 /* XXX: store correct base if write back */
9ee6e8bb 10350 if (insn & (1 << 22)) {
da3e53dd 10351 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
10352 if (IS_USER(s))
10353 goto illegal_op; /* only usable in supervisor mode */
10354
da3e53dd
PM
10355 if (is_load && extract32(insn, 15, 1)) {
10356 exc_return = true;
10357 } else {
10358 user = true;
10359 }
9ee6e8bb
PB
10360 }
10361 rn = (insn >> 16) & 0xf;
b0109805 10362 addr = load_reg(s, rn);
9ee6e8bb
PB
10363
10364 /* compute total size */
10365 loaded_base = 0;
f764718d 10366 loaded_var = NULL;
9ee6e8bb
PB
10367 n = 0;
10368 for(i=0;i<16;i++) {
10369 if (insn & (1 << i))
10370 n++;
10371 }
10372 /* XXX: test invalid n == 0 case ? */
10373 if (insn & (1 << 23)) {
10374 if (insn & (1 << 24)) {
10375 /* pre increment */
b0109805 10376 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
10377 } else {
10378 /* post increment */
10379 }
10380 } else {
10381 if (insn & (1 << 24)) {
10382 /* pre decrement */
b0109805 10383 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
10384 } else {
10385 /* post decrement */
10386 if (n != 1)
b0109805 10387 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
10388 }
10389 }
10390 j = 0;
10391 for(i=0;i<16;i++) {
10392 if (insn & (1 << i)) {
da3e53dd 10393 if (is_load) {
9ee6e8bb 10394 /* load */
5a839c0d 10395 tmp = tcg_temp_new_i32();
12dcc321 10396 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 10397 if (user) {
b75263d6 10398 tmp2 = tcg_const_i32(i);
1ce94f81 10399 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 10400 tcg_temp_free_i32(tmp2);
7d1b0095 10401 tcg_temp_free_i32(tmp);
9ee6e8bb 10402 } else if (i == rn) {
b0109805 10403 loaded_var = tmp;
9ee6e8bb 10404 loaded_base = 1;
fb0e8e79
PM
10405 } else if (rn == 15 && exc_return) {
10406 store_pc_exc_ret(s, tmp);
9ee6e8bb 10407 } else {
7dcc1f89 10408 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
10409 }
10410 } else {
10411 /* store */
10412 if (i == 15) {
10413 /* special case: r15 = PC + 8 */
10414 val = (long)s->pc + 4;
7d1b0095 10415 tmp = tcg_temp_new_i32();
b0109805 10416 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 10417 } else if (user) {
7d1b0095 10418 tmp = tcg_temp_new_i32();
b75263d6 10419 tmp2 = tcg_const_i32(i);
9ef39277 10420 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 10421 tcg_temp_free_i32(tmp2);
9ee6e8bb 10422 } else {
b0109805 10423 tmp = load_reg(s, i);
9ee6e8bb 10424 }
12dcc321 10425 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 10426 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10427 }
10428 j++;
10429 /* no need to add after the last transfer */
10430 if (j != n)
b0109805 10431 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
10432 }
10433 }
10434 if (insn & (1 << 21)) {
10435 /* write back */
10436 if (insn & (1 << 23)) {
10437 if (insn & (1 << 24)) {
10438 /* pre increment */
10439 } else {
10440 /* post increment */
b0109805 10441 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
10442 }
10443 } else {
10444 if (insn & (1 << 24)) {
10445 /* pre decrement */
10446 if (n != 1)
b0109805 10447 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
10448 } else {
10449 /* post decrement */
b0109805 10450 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
10451 }
10452 }
b0109805
PB
10453 store_reg(s, rn, addr);
10454 } else {
7d1b0095 10455 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10456 }
10457 if (loaded_base) {
b0109805 10458 store_reg(s, rn, loaded_var);
9ee6e8bb 10459 }
da3e53dd 10460 if (exc_return) {
9ee6e8bb 10461 /* Restore CPSR from SPSR. */
d9ba4830 10462 tmp = load_cpu_field(spsr);
e69ad9df
AL
10463 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
10464 gen_io_start();
10465 }
235ea1f5 10466 gen_helper_cpsr_write_eret(cpu_env, tmp);
e69ad9df
AL
10467 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
10468 gen_io_end();
10469 }
7d1b0095 10470 tcg_temp_free_i32(tmp);
b29fd33d 10471 /* Must exit loop to check un-masked IRQs */
dcba3a8d 10472 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb
PB
10473 }
10474 }
10475 break;
10476 case 0xa:
10477 case 0xb:
10478 {
10479 int32_t offset;
10480
10481 /* branch (and link) */
10482 val = (int32_t)s->pc;
10483 if (insn & (1 << 24)) {
7d1b0095 10484 tmp = tcg_temp_new_i32();
5e3f878a
PB
10485 tcg_gen_movi_i32(tmp, val);
10486 store_reg(s, 14, tmp);
9ee6e8bb 10487 }
534df156
PM
10488 offset = sextract32(insn << 2, 0, 26);
10489 val += offset + 4;
9ee6e8bb
PB
10490 gen_jmp(s, val);
10491 }
10492 break;
10493 case 0xc:
10494 case 0xd:
10495 case 0xe:
6a57f3eb
WN
10496 if (((insn >> 8) & 0xe) == 10) {
10497 /* VFP. */
7dcc1f89 10498 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10499 goto illegal_op;
10500 }
7dcc1f89 10501 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 10502 /* Coprocessor. */
9ee6e8bb 10503 goto illegal_op;
6a57f3eb 10504 }
9ee6e8bb
PB
10505 break;
10506 case 0xf:
10507 /* swi */
eaed129d 10508 gen_set_pc_im(s, s->pc);
d4a2dc67 10509 s->svc_imm = extract32(insn, 0, 24);
dcba3a8d 10510 s->base.is_jmp = DISAS_SWI;
9ee6e8bb
PB
10511 break;
10512 default:
10513 illegal_op:
73710361
GB
10514 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
10515 default_exception_el(s));
9ee6e8bb
PB
10516 break;
10517 }
10518 }
10519}
10520
296e5a0a
PM
10521static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
10522{
10523 /* Return true if this is a 16 bit instruction. We must be precise
10524 * about this (matching the decode). We assume that s->pc still
10525 * points to the first 16 bits of the insn.
10526 */
10527 if ((insn >> 11) < 0x1d) {
10528 /* Definitely a 16-bit instruction */
10529 return true;
10530 }
10531
10532 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
10533 * first half of a 32-bit Thumb insn. Thumb-1 cores might
10534 * end up actually treating this as two 16-bit insns, though,
10535 * if it's half of a bl/blx pair that might span a page boundary.
10536 */
14120108
JS
10537 if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
10538 arm_dc_feature(s, ARM_FEATURE_M)) {
296e5a0a
PM
10539 /* Thumb2 cores (including all M profile ones) always treat
10540 * 32-bit insns as 32-bit.
10541 */
10542 return false;
10543 }
10544
bfe7ad5b 10545 if ((insn >> 11) == 0x1e && s->pc - s->page_start < TARGET_PAGE_SIZE - 3) {
296e5a0a
PM
10546 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
10547 * is not on the next page; we merge this into a 32-bit
10548 * insn.
10549 */
10550 return false;
10551 }
10552 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
10553 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
10554 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
10555 * -- handle as single 16 bit insn
10556 */
10557 return true;
10558}
10559
9ee6e8bb
PB
10560/* Return true if this is a Thumb-2 logical op. */
10561static int
10562thumb2_logic_op(int op)
10563{
10564 return (op < 8);
10565}
10566
10567/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
10568 then set condition code flags based on the result of the operation.
10569 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
10570 to the high bit of T1.
10571 Returns zero if the opcode is valid. */
10572
10573static int
39d5492a
PM
10574gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
10575 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
10576{
10577 int logic_cc;
10578
10579 logic_cc = 0;
10580 switch (op) {
10581 case 0: /* and */
396e467c 10582 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
10583 logic_cc = conds;
10584 break;
10585 case 1: /* bic */
f669df27 10586 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
10587 logic_cc = conds;
10588 break;
10589 case 2: /* orr */
396e467c 10590 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
10591 logic_cc = conds;
10592 break;
10593 case 3: /* orn */
29501f1b 10594 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
10595 logic_cc = conds;
10596 break;
10597 case 4: /* eor */
396e467c 10598 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
10599 logic_cc = conds;
10600 break;
10601 case 8: /* add */
10602 if (conds)
72485ec4 10603 gen_add_CC(t0, t0, t1);
9ee6e8bb 10604 else
396e467c 10605 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
10606 break;
10607 case 10: /* adc */
10608 if (conds)
49b4c31e 10609 gen_adc_CC(t0, t0, t1);
9ee6e8bb 10610 else
396e467c 10611 gen_adc(t0, t1);
9ee6e8bb
PB
10612 break;
10613 case 11: /* sbc */
2de68a49
RH
10614 if (conds) {
10615 gen_sbc_CC(t0, t0, t1);
10616 } else {
396e467c 10617 gen_sub_carry(t0, t0, t1);
2de68a49 10618 }
9ee6e8bb
PB
10619 break;
10620 case 13: /* sub */
10621 if (conds)
72485ec4 10622 gen_sub_CC(t0, t0, t1);
9ee6e8bb 10623 else
396e467c 10624 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
10625 break;
10626 case 14: /* rsb */
10627 if (conds)
72485ec4 10628 gen_sub_CC(t0, t1, t0);
9ee6e8bb 10629 else
396e467c 10630 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
10631 break;
10632 default: /* 5, 6, 7, 9, 12, 15. */
10633 return 1;
10634 }
10635 if (logic_cc) {
396e467c 10636 gen_logic_CC(t0);
9ee6e8bb 10637 if (shifter_out)
396e467c 10638 gen_set_CF_bit31(t1);
9ee6e8bb
PB
10639 }
10640 return 0;
10641}
10642
2eea841c
PM
10643/* Translate a 32-bit thumb instruction. */
10644static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 10645{
296e5a0a 10646 uint32_t imm, shift, offset;
9ee6e8bb 10647 uint32_t rd, rn, rm, rs;
39d5492a
PM
10648 TCGv_i32 tmp;
10649 TCGv_i32 tmp2;
10650 TCGv_i32 tmp3;
10651 TCGv_i32 addr;
a7812ae4 10652 TCGv_i64 tmp64;
9ee6e8bb
PB
10653 int op;
10654 int shiftop;
10655 int conds;
10656 int logic_cc;
10657
14120108
JS
10658 /*
10659 * ARMv6-M supports a limited subset of Thumb2 instructions.
10660 * Other Thumb1 architectures allow only 32-bit
10661 * combined BL/BLX prefix and suffix.
296e5a0a 10662 */
14120108
JS
10663 if (arm_dc_feature(s, ARM_FEATURE_M) &&
10664 !arm_dc_feature(s, ARM_FEATURE_V7)) {
10665 int i;
10666 bool found = false;
8297cb13
JS
10667 static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
10668 0xf3b08040 /* dsb */,
10669 0xf3b08050 /* dmb */,
10670 0xf3b08060 /* isb */,
10671 0xf3e08000 /* mrs */,
10672 0xf000d000 /* bl */};
10673 static const uint32_t armv6m_mask[] = {0xffe0d000,
10674 0xfff0d0f0,
10675 0xfff0d0f0,
10676 0xfff0d0f0,
10677 0xffe0d000,
10678 0xf800d000};
14120108
JS
10679
10680 for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
10681 if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
10682 found = true;
10683 break;
10684 }
10685 }
10686 if (!found) {
10687 goto illegal_op;
10688 }
10689 } else if ((insn & 0xf800e800) != 0xf000e800) {
9ee6e8bb
PB
10690 ARCH(6T2);
10691 }
10692
10693 rn = (insn >> 16) & 0xf;
10694 rs = (insn >> 12) & 0xf;
10695 rd = (insn >> 8) & 0xf;
10696 rm = insn & 0xf;
10697 switch ((insn >> 25) & 0xf) {
10698 case 0: case 1: case 2: case 3:
10699 /* 16-bit instructions. Should never happen. */
10700 abort();
10701 case 4:
10702 if (insn & (1 << 22)) {
ebfe27c5
PM
10703 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
10704 * - load/store doubleword, load/store exclusive, ldacq/strel,
5158de24 10705 * table branch, TT.
ebfe27c5 10706 */
76eff04d
PM
10707 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
10708 arm_dc_feature(s, ARM_FEATURE_V8)) {
10709 /* 0b1110_1001_0111_1111_1110_1001_0111_111
10710 * - SG (v8M only)
10711 * The bulk of the behaviour for this instruction is implemented
10712 * in v7m_handle_execute_nsc(), which deals with the insn when
10713 * it is executed by a CPU in non-secure state from memory
10714 * which is Secure & NonSecure-Callable.
10715 * Here we only need to handle the remaining cases:
10716 * * in NS memory (including the "security extension not
10717 * implemented" case) : NOP
10718 * * in S memory but CPU already secure (clear IT bits)
10719 * We know that the attribute for the memory this insn is
10720 * in must match the current CPU state, because otherwise
10721 * get_phys_addr_pmsav8 would have generated an exception.
10722 */
10723 if (s->v8m_secure) {
10724 /* Like the IT insn, we don't need to generate any code */
10725 s->condexec_cond = 0;
10726 s->condexec_mask = 0;
10727 }
10728 } else if (insn & 0x01200000) {
ebfe27c5
PM
10729 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10730 * - load/store dual (post-indexed)
10731 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
10732 * - load/store dual (literal and immediate)
10733 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
10734 * - load/store dual (pre-indexed)
10735 */
910d7692
PM
10736 bool wback = extract32(insn, 21, 1);
10737
9ee6e8bb 10738 if (rn == 15) {
ebfe27c5
PM
10739 if (insn & (1 << 21)) {
10740 /* UNPREDICTABLE */
10741 goto illegal_op;
10742 }
7d1b0095 10743 addr = tcg_temp_new_i32();
b0109805 10744 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 10745 } else {
b0109805 10746 addr = load_reg(s, rn);
9ee6e8bb
PB
10747 }
10748 offset = (insn & 0xff) * 4;
910d7692 10749 if ((insn & (1 << 23)) == 0) {
9ee6e8bb 10750 offset = -offset;
910d7692
PM
10751 }
10752
10753 if (s->v8m_stackcheck && rn == 13 && wback) {
10754 /*
10755 * Here 'addr' is the current SP; if offset is +ve we're
10756 * moving SP up, else down. It is UNKNOWN whether the limit
10757 * check triggers when SP starts below the limit and ends
10758 * up above it; check whichever of the current and final
10759 * SP is lower, so QEMU will trigger in that situation.
10760 */
10761 if ((int32_t)offset < 0) {
10762 TCGv_i32 newsp = tcg_temp_new_i32();
10763
10764 tcg_gen_addi_i32(newsp, addr, offset);
10765 gen_helper_v8m_stackcheck(cpu_env, newsp);
10766 tcg_temp_free_i32(newsp);
10767 } else {
10768 gen_helper_v8m_stackcheck(cpu_env, addr);
10769 }
10770 }
10771
9ee6e8bb 10772 if (insn & (1 << 24)) {
b0109805 10773 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
10774 offset = 0;
10775 }
10776 if (insn & (1 << 20)) {
10777 /* ldrd */
e2592fad 10778 tmp = tcg_temp_new_i32();
12dcc321 10779 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
10780 store_reg(s, rs, tmp);
10781 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 10782 tmp = tcg_temp_new_i32();
12dcc321 10783 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 10784 store_reg(s, rd, tmp);
9ee6e8bb
PB
10785 } else {
10786 /* strd */
b0109805 10787 tmp = load_reg(s, rs);
12dcc321 10788 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10789 tcg_temp_free_i32(tmp);
b0109805
PB
10790 tcg_gen_addi_i32(addr, addr, 4);
10791 tmp = load_reg(s, rd);
12dcc321 10792 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10793 tcg_temp_free_i32(tmp);
9ee6e8bb 10794 }
910d7692 10795 if (wback) {
9ee6e8bb 10796 /* Base writeback. */
b0109805
PB
10797 tcg_gen_addi_i32(addr, addr, offset - 4);
10798 store_reg(s, rn, addr);
10799 } else {
7d1b0095 10800 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10801 }
10802 } else if ((insn & (1 << 23)) == 0) {
ebfe27c5
PM
10803 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
10804 * - load/store exclusive word
5158de24 10805 * - TT (v8M only)
ebfe27c5
PM
10806 */
10807 if (rs == 15) {
5158de24
PM
10808 if (!(insn & (1 << 20)) &&
10809 arm_dc_feature(s, ARM_FEATURE_M) &&
10810 arm_dc_feature(s, ARM_FEATURE_V8)) {
10811 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
10812 * - TT (v8M only)
10813 */
10814 bool alt = insn & (1 << 7);
10815 TCGv_i32 addr, op, ttresp;
10816
10817 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
10818 /* we UNDEF for these UNPREDICTABLE cases */
10819 goto illegal_op;
10820 }
10821
10822 if (alt && !s->v8m_secure) {
10823 goto illegal_op;
10824 }
10825
10826 addr = load_reg(s, rn);
10827 op = tcg_const_i32(extract32(insn, 6, 2));
10828 ttresp = tcg_temp_new_i32();
10829 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
10830 tcg_temp_free_i32(addr);
10831 tcg_temp_free_i32(op);
10832 store_reg(s, rd, ttresp);
384c6c03 10833 break;
5158de24 10834 }
ebfe27c5
PM
10835 goto illegal_op;
10836 }
39d5492a 10837 addr = tcg_temp_local_new_i32();
98a46317 10838 load_reg_var(s, addr, rn);
426f5abc 10839 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 10840 if (insn & (1 << 20)) {
426f5abc 10841 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 10842 } else {
426f5abc 10843 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 10844 }
39d5492a 10845 tcg_temp_free_i32(addr);
2359bf80 10846 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
10847 /* Table Branch. */
10848 if (rn == 15) {
7d1b0095 10849 addr = tcg_temp_new_i32();
b0109805 10850 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 10851 } else {
b0109805 10852 addr = load_reg(s, rn);
9ee6e8bb 10853 }
b26eefb6 10854 tmp = load_reg(s, rm);
b0109805 10855 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
10856 if (insn & (1 << 4)) {
10857 /* tbh */
b0109805 10858 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10859 tcg_temp_free_i32(tmp);
e2592fad 10860 tmp = tcg_temp_new_i32();
12dcc321 10861 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10862 } else { /* tbb */
7d1b0095 10863 tcg_temp_free_i32(tmp);
e2592fad 10864 tmp = tcg_temp_new_i32();
12dcc321 10865 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10866 }
7d1b0095 10867 tcg_temp_free_i32(addr);
b0109805
PB
10868 tcg_gen_shli_i32(tmp, tmp, 1);
10869 tcg_gen_addi_i32(tmp, tmp, s->pc);
10870 store_reg(s, 15, tmp);
9ee6e8bb 10871 } else {
96c55295
PM
10872 bool is_lasr = false;
10873 bool is_ld = extract32(insn, 20, 1);
2359bf80 10874 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 10875 op = (insn >> 4) & 0x3;
2359bf80
MR
10876 switch (op2) {
10877 case 0:
426f5abc 10878 goto illegal_op;
2359bf80
MR
10879 case 1:
10880 /* Load/store exclusive byte/halfword/doubleword */
10881 if (op == 2) {
10882 goto illegal_op;
10883 }
10884 ARCH(7);
10885 break;
10886 case 2:
10887 /* Load-acquire/store-release */
10888 if (op == 3) {
10889 goto illegal_op;
10890 }
10891 /* Fall through */
10892 case 3:
10893 /* Load-acquire/store-release exclusive */
10894 ARCH(8);
96c55295 10895 is_lasr = true;
2359bf80 10896 break;
426f5abc 10897 }
96c55295
PM
10898
10899 if (is_lasr && !is_ld) {
10900 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
10901 }
10902
39d5492a 10903 addr = tcg_temp_local_new_i32();
98a46317 10904 load_reg_var(s, addr, rn);
2359bf80 10905 if (!(op2 & 1)) {
96c55295 10906 if (is_ld) {
2359bf80
MR
10907 tmp = tcg_temp_new_i32();
10908 switch (op) {
10909 case 0: /* ldab */
9bb6558a
PM
10910 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
10911 rs | ISSIsAcqRel);
2359bf80
MR
10912 break;
10913 case 1: /* ldah */
9bb6558a
PM
10914 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
10915 rs | ISSIsAcqRel);
2359bf80
MR
10916 break;
10917 case 2: /* lda */
9bb6558a
PM
10918 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
10919 rs | ISSIsAcqRel);
2359bf80
MR
10920 break;
10921 default:
10922 abort();
10923 }
10924 store_reg(s, rs, tmp);
10925 } else {
10926 tmp = load_reg(s, rs);
10927 switch (op) {
10928 case 0: /* stlb */
9bb6558a
PM
10929 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
10930 rs | ISSIsAcqRel);
2359bf80
MR
10931 break;
10932 case 1: /* stlh */
9bb6558a
PM
10933 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
10934 rs | ISSIsAcqRel);
2359bf80
MR
10935 break;
10936 case 2: /* stl */
9bb6558a
PM
10937 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
10938 rs | ISSIsAcqRel);
2359bf80
MR
10939 break;
10940 default:
10941 abort();
10942 }
10943 tcg_temp_free_i32(tmp);
10944 }
96c55295 10945 } else if (is_ld) {
426f5abc 10946 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 10947 } else {
426f5abc 10948 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 10949 }
39d5492a 10950 tcg_temp_free_i32(addr);
96c55295
PM
10951
10952 if (is_lasr && is_ld) {
10953 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
10954 }
9ee6e8bb
PB
10955 }
10956 } else {
10957 /* Load/store multiple, RFE, SRS. */
10958 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 10959 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 10960 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10961 goto illegal_op;
00115976 10962 }
9ee6e8bb
PB
10963 if (insn & (1 << 20)) {
10964 /* rfe */
b0109805
PB
10965 addr = load_reg(s, rn);
10966 if ((insn & (1 << 24)) == 0)
10967 tcg_gen_addi_i32(addr, addr, -8);
10968 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 10969 tmp = tcg_temp_new_i32();
12dcc321 10970 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 10971 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 10972 tmp2 = tcg_temp_new_i32();
12dcc321 10973 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
10974 if (insn & (1 << 21)) {
10975 /* Base writeback. */
b0109805
PB
10976 if (insn & (1 << 24)) {
10977 tcg_gen_addi_i32(addr, addr, 4);
10978 } else {
10979 tcg_gen_addi_i32(addr, addr, -4);
10980 }
10981 store_reg(s, rn, addr);
10982 } else {
7d1b0095 10983 tcg_temp_free_i32(addr);
9ee6e8bb 10984 }
b0109805 10985 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
10986 } else {
10987 /* srs */
81465888
PM
10988 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
10989 insn & (1 << 21));
9ee6e8bb
PB
10990 }
10991 } else {
5856d44e 10992 int i, loaded_base = 0;
39d5492a 10993 TCGv_i32 loaded_var;
7c0ed88e 10994 bool wback = extract32(insn, 21, 1);
9ee6e8bb 10995 /* Load/store multiple. */
b0109805 10996 addr = load_reg(s, rn);
9ee6e8bb
PB
10997 offset = 0;
10998 for (i = 0; i < 16; i++) {
10999 if (insn & (1 << i))
11000 offset += 4;
11001 }
7c0ed88e 11002
9ee6e8bb 11003 if (insn & (1 << 24)) {
b0109805 11004 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
11005 }
11006
7c0ed88e
PM
11007 if (s->v8m_stackcheck && rn == 13 && wback) {
11008 /*
11009 * If the writeback is incrementing SP rather than
11010 * decrementing it, and the initial SP is below the
11011 * stack limit but the final written-back SP would
11012 * be above, then then we must not perform any memory
11013 * accesses, but it is IMPDEF whether we generate
11014 * an exception. We choose to do so in this case.
11015 * At this point 'addr' is the lowest address, so
11016 * either the original SP (if incrementing) or our
11017 * final SP (if decrementing), so that's what we check.
11018 */
11019 gen_helper_v8m_stackcheck(cpu_env, addr);
11020 }
11021
f764718d 11022 loaded_var = NULL;
9ee6e8bb
PB
11023 for (i = 0; i < 16; i++) {
11024 if ((insn & (1 << i)) == 0)
11025 continue;
11026 if (insn & (1 << 20)) {
11027 /* Load. */
e2592fad 11028 tmp = tcg_temp_new_i32();
12dcc321 11029 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 11030 if (i == 15) {
3bb8a96f 11031 gen_bx_excret(s, tmp);
5856d44e
YO
11032 } else if (i == rn) {
11033 loaded_var = tmp;
11034 loaded_base = 1;
9ee6e8bb 11035 } else {
b0109805 11036 store_reg(s, i, tmp);
9ee6e8bb
PB
11037 }
11038 } else {
11039 /* Store. */
b0109805 11040 tmp = load_reg(s, i);
12dcc321 11041 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 11042 tcg_temp_free_i32(tmp);
9ee6e8bb 11043 }
b0109805 11044 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 11045 }
5856d44e
YO
11046 if (loaded_base) {
11047 store_reg(s, rn, loaded_var);
11048 }
7c0ed88e 11049 if (wback) {
9ee6e8bb
PB
11050 /* Base register writeback. */
11051 if (insn & (1 << 24)) {
b0109805 11052 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
11053 }
11054 /* Fault if writeback register is in register list. */
11055 if (insn & (1 << rn))
11056 goto illegal_op;
b0109805
PB
11057 store_reg(s, rn, addr);
11058 } else {
7d1b0095 11059 tcg_temp_free_i32(addr);
9ee6e8bb
PB
11060 }
11061 }
11062 }
11063 break;
2af9ab77
JB
11064 case 5:
11065
9ee6e8bb 11066 op = (insn >> 21) & 0xf;
2af9ab77 11067 if (op == 6) {
62b44f05
AR
11068 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11069 goto illegal_op;
11070 }
2af9ab77
JB
11071 /* Halfword pack. */
11072 tmp = load_reg(s, rn);
11073 tmp2 = load_reg(s, rm);
11074 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
11075 if (insn & (1 << 5)) {
11076 /* pkhtb */
11077 if (shift == 0)
11078 shift = 31;
11079 tcg_gen_sari_i32(tmp2, tmp2, shift);
11080 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
11081 tcg_gen_ext16u_i32(tmp2, tmp2);
11082 } else {
11083 /* pkhbt */
11084 if (shift)
11085 tcg_gen_shli_i32(tmp2, tmp2, shift);
11086 tcg_gen_ext16u_i32(tmp, tmp);
11087 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
11088 }
11089 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 11090 tcg_temp_free_i32(tmp2);
3174f8e9
FN
11091 store_reg(s, rd, tmp);
11092 } else {
2af9ab77
JB
11093 /* Data processing register constant shift. */
11094 if (rn == 15) {
7d1b0095 11095 tmp = tcg_temp_new_i32();
2af9ab77
JB
11096 tcg_gen_movi_i32(tmp, 0);
11097 } else {
11098 tmp = load_reg(s, rn);
11099 }
11100 tmp2 = load_reg(s, rm);
11101
11102 shiftop = (insn >> 4) & 3;
11103 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
11104 conds = (insn & (1 << 20)) != 0;
11105 logic_cc = (conds && thumb2_logic_op(op));
11106 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
11107 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
11108 goto illegal_op;
7d1b0095 11109 tcg_temp_free_i32(tmp2);
55203189
PM
11110 if (rd == 13 &&
11111 ((op == 2 && rn == 15) ||
11112 (op == 8 && rn == 13) ||
11113 (op == 13 && rn == 13))) {
11114 /* MOV SP, ... or ADD SP, SP, ... or SUB SP, SP, ... */
11115 store_sp_checked(s, tmp);
11116 } else if (rd != 15) {
2af9ab77
JB
11117 store_reg(s, rd, tmp);
11118 } else {
7d1b0095 11119 tcg_temp_free_i32(tmp);
2af9ab77 11120 }
3174f8e9 11121 }
9ee6e8bb
PB
11122 break;
11123 case 13: /* Misc data processing. */
11124 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
11125 if (op < 4 && (insn & 0xf000) != 0xf000)
11126 goto illegal_op;
11127 switch (op) {
11128 case 0: /* Register controlled shift. */
8984bd2e
PB
11129 tmp = load_reg(s, rn);
11130 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
11131 if ((insn & 0x70) != 0)
11132 goto illegal_op;
a2d12f0f
PM
11133 /*
11134 * 0b1111_1010_0xxx_xxxx_1111_xxxx_0000_xxxx:
11135 * - MOV, MOVS (register-shifted register), flagsetting
11136 */
9ee6e8bb 11137 op = (insn >> 21) & 3;
8984bd2e
PB
11138 logic_cc = (insn & (1 << 20)) != 0;
11139 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
11140 if (logic_cc)
11141 gen_logic_CC(tmp);
bedb8a6b 11142 store_reg(s, rd, tmp);
9ee6e8bb
PB
11143 break;
11144 case 1: /* Sign/zero extend. */
62b44f05
AR
11145 op = (insn >> 20) & 7;
11146 switch (op) {
11147 case 0: /* SXTAH, SXTH */
11148 case 1: /* UXTAH, UXTH */
11149 case 4: /* SXTAB, SXTB */
11150 case 5: /* UXTAB, UXTB */
11151 break;
11152 case 2: /* SXTAB16, SXTB16 */
11153 case 3: /* UXTAB16, UXTB16 */
11154 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11155 goto illegal_op;
11156 }
11157 break;
11158 default:
11159 goto illegal_op;
11160 }
11161 if (rn != 15) {
11162 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11163 goto illegal_op;
11164 }
11165 }
5e3f878a 11166 tmp = load_reg(s, rm);
9ee6e8bb 11167 shift = (insn >> 4) & 3;
1301f322 11168 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
11169 rotate, a shift is sufficient. */
11170 if (shift != 0)
f669df27 11171 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
11172 op = (insn >> 20) & 7;
11173 switch (op) {
5e3f878a
PB
11174 case 0: gen_sxth(tmp); break;
11175 case 1: gen_uxth(tmp); break;
11176 case 2: gen_sxtb16(tmp); break;
11177 case 3: gen_uxtb16(tmp); break;
11178 case 4: gen_sxtb(tmp); break;
11179 case 5: gen_uxtb(tmp); break;
62b44f05
AR
11180 default:
11181 g_assert_not_reached();
9ee6e8bb
PB
11182 }
11183 if (rn != 15) {
5e3f878a 11184 tmp2 = load_reg(s, rn);
9ee6e8bb 11185 if ((op >> 1) == 1) {
5e3f878a 11186 gen_add16(tmp, tmp2);
9ee6e8bb 11187 } else {
5e3f878a 11188 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11189 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
11190 }
11191 }
5e3f878a 11192 store_reg(s, rd, tmp);
9ee6e8bb
PB
11193 break;
11194 case 2: /* SIMD add/subtract. */
62b44f05
AR
11195 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11196 goto illegal_op;
11197 }
9ee6e8bb
PB
11198 op = (insn >> 20) & 7;
11199 shift = (insn >> 4) & 7;
11200 if ((op & 3) == 3 || (shift & 3) == 3)
11201 goto illegal_op;
6ddbc6e4
PB
11202 tmp = load_reg(s, rn);
11203 tmp2 = load_reg(s, rm);
11204 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 11205 tcg_temp_free_i32(tmp2);
6ddbc6e4 11206 store_reg(s, rd, tmp);
9ee6e8bb
PB
11207 break;
11208 case 3: /* Other data processing. */
11209 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
11210 if (op < 4) {
11211 /* Saturating add/subtract. */
62b44f05
AR
11212 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11213 goto illegal_op;
11214 }
d9ba4830
PB
11215 tmp = load_reg(s, rn);
11216 tmp2 = load_reg(s, rm);
9ee6e8bb 11217 if (op & 1)
9ef39277 11218 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 11219 if (op & 2)
9ef39277 11220 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 11221 else
9ef39277 11222 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 11223 tcg_temp_free_i32(tmp2);
9ee6e8bb 11224 } else {
62b44f05
AR
11225 switch (op) {
11226 case 0x0a: /* rbit */
11227 case 0x08: /* rev */
11228 case 0x09: /* rev16 */
11229 case 0x0b: /* revsh */
11230 case 0x18: /* clz */
11231 break;
11232 case 0x10: /* sel */
11233 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11234 goto illegal_op;
11235 }
11236 break;
11237 case 0x20: /* crc32/crc32c */
11238 case 0x21:
11239 case 0x22:
11240 case 0x28:
11241 case 0x29:
11242 case 0x2a:
962fcbf2 11243 if (!dc_isar_feature(aa32_crc32, s)) {
62b44f05
AR
11244 goto illegal_op;
11245 }
11246 break;
11247 default:
11248 goto illegal_op;
11249 }
d9ba4830 11250 tmp = load_reg(s, rn);
9ee6e8bb
PB
11251 switch (op) {
11252 case 0x0a: /* rbit */
d9ba4830 11253 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
11254 break;
11255 case 0x08: /* rev */
66896cb8 11256 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
11257 break;
11258 case 0x09: /* rev16 */
d9ba4830 11259 gen_rev16(tmp);
9ee6e8bb
PB
11260 break;
11261 case 0x0b: /* revsh */
d9ba4830 11262 gen_revsh(tmp);
9ee6e8bb
PB
11263 break;
11264 case 0x10: /* sel */
d9ba4830 11265 tmp2 = load_reg(s, rm);
7d1b0095 11266 tmp3 = tcg_temp_new_i32();
0ecb72a5 11267 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 11268 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
11269 tcg_temp_free_i32(tmp3);
11270 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
11271 break;
11272 case 0x18: /* clz */
7539a012 11273 tcg_gen_clzi_i32(tmp, tmp, 32);
9ee6e8bb 11274 break;
eb0ecd5a
WN
11275 case 0x20:
11276 case 0x21:
11277 case 0x22:
11278 case 0x28:
11279 case 0x29:
11280 case 0x2a:
11281 {
11282 /* crc32/crc32c */
11283 uint32_t sz = op & 0x3;
11284 uint32_t c = op & 0x8;
11285
eb0ecd5a 11286 tmp2 = load_reg(s, rm);
aa633469
PM
11287 if (sz == 0) {
11288 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
11289 } else if (sz == 1) {
11290 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
11291 }
eb0ecd5a
WN
11292 tmp3 = tcg_const_i32(1 << sz);
11293 if (c) {
11294 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
11295 } else {
11296 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
11297 }
11298 tcg_temp_free_i32(tmp2);
11299 tcg_temp_free_i32(tmp3);
11300 break;
11301 }
9ee6e8bb 11302 default:
62b44f05 11303 g_assert_not_reached();
9ee6e8bb
PB
11304 }
11305 }
d9ba4830 11306 store_reg(s, rd, tmp);
9ee6e8bb
PB
11307 break;
11308 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
11309 switch ((insn >> 20) & 7) {
11310 case 0: /* 32 x 32 -> 32 */
11311 case 7: /* Unsigned sum of absolute differences. */
11312 break;
11313 case 1: /* 16 x 16 -> 32 */
11314 case 2: /* Dual multiply add. */
11315 case 3: /* 32 * 16 -> 32msb */
11316 case 4: /* Dual multiply subtract. */
11317 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
11318 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11319 goto illegal_op;
11320 }
11321 break;
11322 }
9ee6e8bb 11323 op = (insn >> 4) & 0xf;
d9ba4830
PB
11324 tmp = load_reg(s, rn);
11325 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
11326 switch ((insn >> 20) & 7) {
11327 case 0: /* 32 x 32 -> 32 */
d9ba4830 11328 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 11329 tcg_temp_free_i32(tmp2);
9ee6e8bb 11330 if (rs != 15) {
d9ba4830 11331 tmp2 = load_reg(s, rs);
9ee6e8bb 11332 if (op)
d9ba4830 11333 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 11334 else
d9ba4830 11335 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11336 tcg_temp_free_i32(tmp2);
9ee6e8bb 11337 }
9ee6e8bb
PB
11338 break;
11339 case 1: /* 16 x 16 -> 32 */
d9ba4830 11340 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 11341 tcg_temp_free_i32(tmp2);
9ee6e8bb 11342 if (rs != 15) {
d9ba4830 11343 tmp2 = load_reg(s, rs);
9ef39277 11344 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 11345 tcg_temp_free_i32(tmp2);
9ee6e8bb 11346 }
9ee6e8bb
PB
11347 break;
11348 case 2: /* Dual multiply add. */
11349 case 4: /* Dual multiply subtract. */
11350 if (op)
d9ba4830
PB
11351 gen_swap_half(tmp2);
11352 gen_smul_dual(tmp, tmp2);
9ee6e8bb 11353 if (insn & (1 << 22)) {
e1d177b9 11354 /* This subtraction cannot overflow. */
d9ba4830 11355 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 11356 } else {
e1d177b9
PM
11357 /* This addition cannot overflow 32 bits;
11358 * however it may overflow considered as a signed
11359 * operation, in which case we must set the Q flag.
11360 */
9ef39277 11361 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 11362 }
7d1b0095 11363 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
11364 if (rs != 15)
11365 {
d9ba4830 11366 tmp2 = load_reg(s, rs);
9ef39277 11367 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 11368 tcg_temp_free_i32(tmp2);
9ee6e8bb 11369 }
9ee6e8bb
PB
11370 break;
11371 case 3: /* 32 * 16 -> 32msb */
11372 if (op)
d9ba4830 11373 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 11374 else
d9ba4830 11375 gen_sxth(tmp2);
a7812ae4
PB
11376 tmp64 = gen_muls_i64_i32(tmp, tmp2);
11377 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 11378 tmp = tcg_temp_new_i32();
ecc7b3aa 11379 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 11380 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
11381 if (rs != 15)
11382 {
d9ba4830 11383 tmp2 = load_reg(s, rs);
9ef39277 11384 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 11385 tcg_temp_free_i32(tmp2);
9ee6e8bb 11386 }
9ee6e8bb 11387 break;
838fa72d
AJ
11388 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
11389 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 11390 if (rs != 15) {
838fa72d
AJ
11391 tmp = load_reg(s, rs);
11392 if (insn & (1 << 20)) {
11393 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 11394 } else {
838fa72d 11395 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 11396 }
2c0262af 11397 }
838fa72d
AJ
11398 if (insn & (1 << 4)) {
11399 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
11400 }
11401 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 11402 tmp = tcg_temp_new_i32();
ecc7b3aa 11403 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 11404 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
11405 break;
11406 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 11407 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 11408 tcg_temp_free_i32(tmp2);
9ee6e8bb 11409 if (rs != 15) {
d9ba4830
PB
11410 tmp2 = load_reg(s, rs);
11411 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11412 tcg_temp_free_i32(tmp2);
5fd46862 11413 }
9ee6e8bb 11414 break;
2c0262af 11415 }
d9ba4830 11416 store_reg(s, rd, tmp);
2c0262af 11417 break;
9ee6e8bb
PB
11418 case 6: case 7: /* 64-bit multiply, Divide. */
11419 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
11420 tmp = load_reg(s, rn);
11421 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
11422 if ((op & 0x50) == 0x10) {
11423 /* sdiv, udiv */
7e0cf8b4 11424 if (!dc_isar_feature(thumb_div, s)) {
9ee6e8bb 11425 goto illegal_op;
47789990 11426 }
9ee6e8bb 11427 if (op & 0x20)
5e3f878a 11428 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 11429 else
5e3f878a 11430 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 11431 tcg_temp_free_i32(tmp2);
5e3f878a 11432 store_reg(s, rd, tmp);
9ee6e8bb
PB
11433 } else if ((op & 0xe) == 0xc) {
11434 /* Dual multiply accumulate long. */
62b44f05
AR
11435 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11436 tcg_temp_free_i32(tmp);
11437 tcg_temp_free_i32(tmp2);
11438 goto illegal_op;
11439 }
9ee6e8bb 11440 if (op & 1)
5e3f878a
PB
11441 gen_swap_half(tmp2);
11442 gen_smul_dual(tmp, tmp2);
9ee6e8bb 11443 if (op & 0x10) {
5e3f878a 11444 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 11445 } else {
5e3f878a 11446 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 11447 }
7d1b0095 11448 tcg_temp_free_i32(tmp2);
a7812ae4
PB
11449 /* BUGFIX */
11450 tmp64 = tcg_temp_new_i64();
11451 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 11452 tcg_temp_free_i32(tmp);
a7812ae4
PB
11453 gen_addq(s, tmp64, rs, rd);
11454 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 11455 tcg_temp_free_i64(tmp64);
2c0262af 11456 } else {
9ee6e8bb
PB
11457 if (op & 0x20) {
11458 /* Unsigned 64-bit multiply */
a7812ae4 11459 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 11460 } else {
9ee6e8bb
PB
11461 if (op & 8) {
11462 /* smlalxy */
62b44f05
AR
11463 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11464 tcg_temp_free_i32(tmp2);
11465 tcg_temp_free_i32(tmp);
11466 goto illegal_op;
11467 }
5e3f878a 11468 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 11469 tcg_temp_free_i32(tmp2);
a7812ae4
PB
11470 tmp64 = tcg_temp_new_i64();
11471 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 11472 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11473 } else {
11474 /* Signed 64-bit multiply */
a7812ae4 11475 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 11476 }
b5ff1b31 11477 }
9ee6e8bb
PB
11478 if (op & 4) {
11479 /* umaal */
62b44f05
AR
11480 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11481 tcg_temp_free_i64(tmp64);
11482 goto illegal_op;
11483 }
a7812ae4
PB
11484 gen_addq_lo(s, tmp64, rs);
11485 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
11486 } else if (op & 0x40) {
11487 /* 64-bit accumulate. */
a7812ae4 11488 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 11489 }
a7812ae4 11490 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 11491 tcg_temp_free_i64(tmp64);
5fd46862 11492 }
2c0262af 11493 break;
9ee6e8bb
PB
11494 }
11495 break;
11496 case 6: case 7: case 14: case 15:
11497 /* Coprocessor. */
7517748e
PM
11498 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11499 /* We don't currently implement M profile FP support,
b1e5336a
PM
11500 * so this entire space should give a NOCP fault, with
11501 * the exception of the v8M VLLDM and VLSTM insns, which
11502 * must be NOPs in Secure state and UNDEF in Nonsecure state.
7517748e 11503 */
b1e5336a
PM
11504 if (arm_dc_feature(s, ARM_FEATURE_V8) &&
11505 (insn & 0xffa00f00) == 0xec200a00) {
11506 /* 0b1110_1100_0x1x_xxxx_xxxx_1010_xxxx_xxxx
11507 * - VLLDM, VLSTM
11508 * We choose to UNDEF if the RAZ bits are non-zero.
11509 */
11510 if (!s->v8m_secure || (insn & 0x0040f0ff)) {
11511 goto illegal_op;
11512 }
11513 /* Just NOP since FP support is not implemented */
11514 break;
11515 }
11516 /* All other insns: NOCP */
7517748e
PM
11517 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
11518 default_exception_el(s));
11519 break;
11520 }
0052087e
RH
11521 if ((insn & 0xfe000a00) == 0xfc000800
11522 && arm_dc_feature(s, ARM_FEATURE_V8)) {
11523 /* The Thumb2 and ARM encodings are identical. */
11524 if (disas_neon_insn_3same_ext(s, insn)) {
11525 goto illegal_op;
11526 }
11527 } else if ((insn & 0xff000a00) == 0xfe000800
11528 && arm_dc_feature(s, ARM_FEATURE_V8)) {
11529 /* The Thumb2 and ARM encodings are identical. */
11530 if (disas_neon_insn_2reg_scalar_ext(s, insn)) {
11531 goto illegal_op;
11532 }
11533 } else if (((insn >> 24) & 3) == 3) {
9ee6e8bb 11534 /* Translate into the equivalent ARM encoding. */
f06053e3 11535 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 11536 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 11537 goto illegal_op;
7dcc1f89 11538 }
6a57f3eb 11539 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 11540 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
11541 goto illegal_op;
11542 }
9ee6e8bb
PB
11543 } else {
11544 if (insn & (1 << 28))
11545 goto illegal_op;
7dcc1f89 11546 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 11547 goto illegal_op;
7dcc1f89 11548 }
9ee6e8bb
PB
11549 }
11550 break;
11551 case 8: case 9: case 10: case 11:
11552 if (insn & (1 << 15)) {
11553 /* Branches, misc control. */
11554 if (insn & 0x5000) {
11555 /* Unconditional branch. */
11556 /* signextend(hw1[10:0]) -> offset[:12]. */
11557 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
11558 /* hw1[10:0] -> offset[11:1]. */
11559 offset |= (insn & 0x7ff) << 1;
11560 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
11561 offset[24:22] already have the same value because of the
11562 sign extension above. */
11563 offset ^= ((~insn) & (1 << 13)) << 10;
11564 offset ^= ((~insn) & (1 << 11)) << 11;
11565
9ee6e8bb
PB
11566 if (insn & (1 << 14)) {
11567 /* Branch and link. */
3174f8e9 11568 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 11569 }
3b46e624 11570
b0109805 11571 offset += s->pc;
9ee6e8bb
PB
11572 if (insn & (1 << 12)) {
11573 /* b/bl */
b0109805 11574 gen_jmp(s, offset);
9ee6e8bb
PB
11575 } else {
11576 /* blx */
b0109805 11577 offset &= ~(uint32_t)2;
be5e7a76 11578 /* thumb2 bx, no need to check */
b0109805 11579 gen_bx_im(s, offset);
2c0262af 11580 }
9ee6e8bb
PB
11581 } else if (((insn >> 23) & 7) == 7) {
11582 /* Misc control */
11583 if (insn & (1 << 13))
11584 goto illegal_op;
11585
11586 if (insn & (1 << 26)) {
001b3cab
PM
11587 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11588 goto illegal_op;
11589 }
37e6456e
PM
11590 if (!(insn & (1 << 20))) {
11591 /* Hypervisor call (v7) */
11592 int imm16 = extract32(insn, 16, 4) << 12
11593 | extract32(insn, 0, 12);
11594 ARCH(7);
11595 if (IS_USER(s)) {
11596 goto illegal_op;
11597 }
11598 gen_hvc(s, imm16);
11599 } else {
11600 /* Secure monitor call (v6+) */
11601 ARCH(6K);
11602 if (IS_USER(s)) {
11603 goto illegal_op;
11604 }
11605 gen_smc(s);
11606 }
2c0262af 11607 } else {
9ee6e8bb
PB
11608 op = (insn >> 20) & 7;
11609 switch (op) {
11610 case 0: /* msr cpsr. */
b53d8923 11611 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e 11612 tmp = load_reg(s, rn);
b28b3377
PM
11613 /* the constant is the mask and SYSm fields */
11614 addr = tcg_const_i32(insn & 0xfff);
8984bd2e 11615 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 11616 tcg_temp_free_i32(addr);
7d1b0095 11617 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11618 gen_lookup_tb(s);
11619 break;
11620 }
11621 /* fall through */
11622 case 1: /* msr spsr. */
b53d8923 11623 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 11624 goto illegal_op;
b53d8923 11625 }
8bfd0550
PM
11626
11627 if (extract32(insn, 5, 1)) {
11628 /* MSR (banked) */
11629 int sysm = extract32(insn, 8, 4) |
11630 (extract32(insn, 4, 1) << 4);
11631 int r = op & 1;
11632
11633 gen_msr_banked(s, r, sysm, rm);
11634 break;
11635 }
11636
11637 /* MSR (for PSRs) */
2fbac54b
FN
11638 tmp = load_reg(s, rn);
11639 if (gen_set_psr(s,
7dcc1f89 11640 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 11641 op == 1, tmp))
9ee6e8bb
PB
11642 goto illegal_op;
11643 break;
11644 case 2: /* cps, nop-hint. */
11645 if (((insn >> 8) & 7) == 0) {
11646 gen_nop_hint(s, insn & 0xff);
11647 }
11648 /* Implemented as NOP in user mode. */
11649 if (IS_USER(s))
11650 break;
11651 offset = 0;
11652 imm = 0;
11653 if (insn & (1 << 10)) {
11654 if (insn & (1 << 7))
11655 offset |= CPSR_A;
11656 if (insn & (1 << 6))
11657 offset |= CPSR_I;
11658 if (insn & (1 << 5))
11659 offset |= CPSR_F;
11660 if (insn & (1 << 9))
11661 imm = CPSR_A | CPSR_I | CPSR_F;
11662 }
11663 if (insn & (1 << 8)) {
11664 offset |= 0x1f;
11665 imm |= (insn & 0x1f);
11666 }
11667 if (offset) {
2fbac54b 11668 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
11669 }
11670 break;
11671 case 3: /* Special control operations. */
14120108 11672 if (!arm_dc_feature(s, ARM_FEATURE_V7) &&
8297cb13 11673 !arm_dc_feature(s, ARM_FEATURE_M)) {
14120108
JS
11674 goto illegal_op;
11675 }
9ee6e8bb
PB
11676 op = (insn >> 4) & 0xf;
11677 switch (op) {
11678 case 2: /* clrex */
426f5abc 11679 gen_clrex(s);
9ee6e8bb
PB
11680 break;
11681 case 4: /* dsb */
11682 case 5: /* dmb */
61e4c432 11683 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 11684 break;
6df99dec
SS
11685 case 6: /* isb */
11686 /* We need to break the TB after this insn
11687 * to execute self-modifying code correctly
11688 * and also to take any pending interrupts
11689 * immediately.
11690 */
0b609cc1 11691 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 11692 break;
9ee6e8bb
PB
11693 default:
11694 goto illegal_op;
11695 }
11696 break;
11697 case 4: /* bxj */
9d7c59c8
PM
11698 /* Trivial implementation equivalent to bx.
11699 * This instruction doesn't exist at all for M-profile.
11700 */
11701 if (arm_dc_feature(s, ARM_FEATURE_M)) {
11702 goto illegal_op;
11703 }
d9ba4830
PB
11704 tmp = load_reg(s, rn);
11705 gen_bx(s, tmp);
9ee6e8bb
PB
11706 break;
11707 case 5: /* Exception return. */
b8b45b68
RV
11708 if (IS_USER(s)) {
11709 goto illegal_op;
11710 }
11711 if (rn != 14 || rd != 15) {
11712 goto illegal_op;
11713 }
55c544ed
PM
11714 if (s->current_el == 2) {
11715 /* ERET from Hyp uses ELR_Hyp, not LR */
11716 if (insn & 0xff) {
11717 goto illegal_op;
11718 }
11719 tmp = load_cpu_field(elr_el[2]);
11720 } else {
11721 tmp = load_reg(s, rn);
11722 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
11723 }
b8b45b68
RV
11724 gen_exception_return(s, tmp);
11725 break;
8bfd0550 11726 case 6: /* MRS */
43ac6574
PM
11727 if (extract32(insn, 5, 1) &&
11728 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
11729 /* MRS (banked) */
11730 int sysm = extract32(insn, 16, 4) |
11731 (extract32(insn, 4, 1) << 4);
11732
11733 gen_mrs_banked(s, 0, sysm, rd);
11734 break;
11735 }
11736
3d54026f
PM
11737 if (extract32(insn, 16, 4) != 0xf) {
11738 goto illegal_op;
11739 }
11740 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
11741 extract32(insn, 0, 8) != 0) {
11742 goto illegal_op;
11743 }
11744
8bfd0550 11745 /* mrs cpsr */
7d1b0095 11746 tmp = tcg_temp_new_i32();
b53d8923 11747 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
11748 addr = tcg_const_i32(insn & 0xff);
11749 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 11750 tcg_temp_free_i32(addr);
9ee6e8bb 11751 } else {
9ef39277 11752 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 11753 }
8984bd2e 11754 store_reg(s, rd, tmp);
9ee6e8bb 11755 break;
8bfd0550 11756 case 7: /* MRS */
43ac6574
PM
11757 if (extract32(insn, 5, 1) &&
11758 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
11759 /* MRS (banked) */
11760 int sysm = extract32(insn, 16, 4) |
11761 (extract32(insn, 4, 1) << 4);
11762
11763 gen_mrs_banked(s, 1, sysm, rd);
11764 break;
11765 }
11766
11767 /* mrs spsr. */
9ee6e8bb 11768 /* Not accessible in user mode. */
b53d8923 11769 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 11770 goto illegal_op;
b53d8923 11771 }
3d54026f
PM
11772
11773 if (extract32(insn, 16, 4) != 0xf ||
11774 extract32(insn, 0, 8) != 0) {
11775 goto illegal_op;
11776 }
11777
d9ba4830
PB
11778 tmp = load_cpu_field(spsr);
11779 store_reg(s, rd, tmp);
9ee6e8bb 11780 break;
2c0262af
FB
11781 }
11782 }
9ee6e8bb
PB
11783 } else {
11784 /* Conditional branch. */
11785 op = (insn >> 22) & 0xf;
11786 /* Generate a conditional jump to next instruction. */
c2d9644e 11787 arm_skip_unless(s, op);
9ee6e8bb
PB
11788
11789 /* offset[11:1] = insn[10:0] */
11790 offset = (insn & 0x7ff) << 1;
11791 /* offset[17:12] = insn[21:16]. */
11792 offset |= (insn & 0x003f0000) >> 4;
11793 /* offset[31:20] = insn[26]. */
11794 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
11795 /* offset[18] = insn[13]. */
11796 offset |= (insn & (1 << 13)) << 5;
11797 /* offset[19] = insn[11]. */
11798 offset |= (insn & (1 << 11)) << 8;
11799
11800 /* jump to the offset */
b0109805 11801 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
11802 }
11803 } else {
55203189
PM
11804 /*
11805 * 0b1111_0xxx_xxxx_0xxx_xxxx_xxxx
11806 * - Data-processing (modified immediate, plain binary immediate)
11807 */
9ee6e8bb 11808 if (insn & (1 << 25)) {
55203189
PM
11809 /*
11810 * 0b1111_0x1x_xxxx_0xxx_xxxx_xxxx
11811 * - Data-processing (plain binary immediate)
11812 */
9ee6e8bb
PB
11813 if (insn & (1 << 24)) {
11814 if (insn & (1 << 20))
11815 goto illegal_op;
11816 /* Bitfield/Saturate. */
11817 op = (insn >> 21) & 7;
11818 imm = insn & 0x1f;
11819 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 11820 if (rn == 15) {
7d1b0095 11821 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
11822 tcg_gen_movi_i32(tmp, 0);
11823 } else {
11824 tmp = load_reg(s, rn);
11825 }
9ee6e8bb
PB
11826 switch (op) {
11827 case 2: /* Signed bitfield extract. */
11828 imm++;
11829 if (shift + imm > 32)
11830 goto illegal_op;
59a71b4c
RH
11831 if (imm < 32) {
11832 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
11833 }
9ee6e8bb
PB
11834 break;
11835 case 6: /* Unsigned bitfield extract. */
11836 imm++;
11837 if (shift + imm > 32)
11838 goto illegal_op;
59a71b4c
RH
11839 if (imm < 32) {
11840 tcg_gen_extract_i32(tmp, tmp, shift, imm);
11841 }
9ee6e8bb
PB
11842 break;
11843 case 3: /* Bitfield insert/clear. */
11844 if (imm < shift)
11845 goto illegal_op;
11846 imm = imm + 1 - shift;
11847 if (imm != 32) {
6ddbc6e4 11848 tmp2 = load_reg(s, rd);
d593c48e 11849 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 11850 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
11851 }
11852 break;
11853 case 7:
11854 goto illegal_op;
11855 default: /* Saturate. */
9ee6e8bb
PB
11856 if (shift) {
11857 if (op & 1)
6ddbc6e4 11858 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 11859 else
6ddbc6e4 11860 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 11861 }
6ddbc6e4 11862 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
11863 if (op & 4) {
11864 /* Unsigned. */
62b44f05
AR
11865 if ((op & 1) && shift == 0) {
11866 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11867 tcg_temp_free_i32(tmp);
11868 tcg_temp_free_i32(tmp2);
11869 goto illegal_op;
11870 }
9ef39277 11871 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 11872 } else {
9ef39277 11873 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 11874 }
2c0262af 11875 } else {
9ee6e8bb 11876 /* Signed. */
62b44f05
AR
11877 if ((op & 1) && shift == 0) {
11878 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
11879 tcg_temp_free_i32(tmp);
11880 tcg_temp_free_i32(tmp2);
11881 goto illegal_op;
11882 }
9ef39277 11883 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 11884 } else {
9ef39277 11885 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 11886 }
2c0262af 11887 }
b75263d6 11888 tcg_temp_free_i32(tmp2);
9ee6e8bb 11889 break;
2c0262af 11890 }
6ddbc6e4 11891 store_reg(s, rd, tmp);
9ee6e8bb
PB
11892 } else {
11893 imm = ((insn & 0x04000000) >> 15)
11894 | ((insn & 0x7000) >> 4) | (insn & 0xff);
11895 if (insn & (1 << 22)) {
11896 /* 16-bit immediate. */
11897 imm |= (insn >> 4) & 0xf000;
11898 if (insn & (1 << 23)) {
11899 /* movt */
5e3f878a 11900 tmp = load_reg(s, rd);
86831435 11901 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 11902 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 11903 } else {
9ee6e8bb 11904 /* movw */
7d1b0095 11905 tmp = tcg_temp_new_i32();
5e3f878a 11906 tcg_gen_movi_i32(tmp, imm);
2c0262af 11907 }
55203189 11908 store_reg(s, rd, tmp);
2c0262af 11909 } else {
9ee6e8bb
PB
11910 /* Add/sub 12-bit immediate. */
11911 if (rn == 15) {
b0109805 11912 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 11913 if (insn & (1 << 23))
b0109805 11914 offset -= imm;
9ee6e8bb 11915 else
b0109805 11916 offset += imm;
7d1b0095 11917 tmp = tcg_temp_new_i32();
5e3f878a 11918 tcg_gen_movi_i32(tmp, offset);
55203189 11919 store_reg(s, rd, tmp);
2c0262af 11920 } else {
5e3f878a 11921 tmp = load_reg(s, rn);
9ee6e8bb 11922 if (insn & (1 << 23))
5e3f878a 11923 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 11924 else
5e3f878a 11925 tcg_gen_addi_i32(tmp, tmp, imm);
55203189
PM
11926 if (rn == 13 && rd == 13) {
11927 /* ADD SP, SP, imm or SUB SP, SP, imm */
11928 store_sp_checked(s, tmp);
11929 } else {
11930 store_reg(s, rd, tmp);
11931 }
2c0262af 11932 }
9ee6e8bb 11933 }
191abaa2 11934 }
9ee6e8bb 11935 } else {
55203189
PM
11936 /*
11937 * 0b1111_0x0x_xxxx_0xxx_xxxx_xxxx
11938 * - Data-processing (modified immediate)
11939 */
9ee6e8bb
PB
11940 int shifter_out = 0;
11941 /* modified 12-bit immediate. */
11942 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
11943 imm = (insn & 0xff);
11944 switch (shift) {
11945 case 0: /* XY */
11946 /* Nothing to do. */
11947 break;
11948 case 1: /* 00XY00XY */
11949 imm |= imm << 16;
11950 break;
11951 case 2: /* XY00XY00 */
11952 imm |= imm << 16;
11953 imm <<= 8;
11954 break;
11955 case 3: /* XYXYXYXY */
11956 imm |= imm << 16;
11957 imm |= imm << 8;
11958 break;
11959 default: /* Rotated constant. */
11960 shift = (shift << 1) | (imm >> 7);
11961 imm |= 0x80;
11962 imm = imm << (32 - shift);
11963 shifter_out = 1;
11964 break;
b5ff1b31 11965 }
7d1b0095 11966 tmp2 = tcg_temp_new_i32();
3174f8e9 11967 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 11968 rn = (insn >> 16) & 0xf;
3174f8e9 11969 if (rn == 15) {
7d1b0095 11970 tmp = tcg_temp_new_i32();
3174f8e9
FN
11971 tcg_gen_movi_i32(tmp, 0);
11972 } else {
11973 tmp = load_reg(s, rn);
11974 }
9ee6e8bb
PB
11975 op = (insn >> 21) & 0xf;
11976 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 11977 shifter_out, tmp, tmp2))
9ee6e8bb 11978 goto illegal_op;
7d1b0095 11979 tcg_temp_free_i32(tmp2);
9ee6e8bb 11980 rd = (insn >> 8) & 0xf;
55203189
PM
11981 if (rd == 13 && rn == 13
11982 && (op == 8 || op == 13)) {
11983 /* ADD(S) SP, SP, imm or SUB(S) SP, SP, imm */
11984 store_sp_checked(s, tmp);
11985 } else if (rd != 15) {
3174f8e9
FN
11986 store_reg(s, rd, tmp);
11987 } else {
7d1b0095 11988 tcg_temp_free_i32(tmp);
2c0262af 11989 }
2c0262af 11990 }
9ee6e8bb
PB
11991 }
11992 break;
11993 case 12: /* Load/store single data item. */
11994 {
11995 int postinc = 0;
11996 int writeback = 0;
a99caa48 11997 int memidx;
9bb6558a
PM
11998 ISSInfo issinfo;
11999
9ee6e8bb 12000 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 12001 if (disas_neon_ls_insn(s, insn)) {
c1713132 12002 goto illegal_op;
7dcc1f89 12003 }
9ee6e8bb
PB
12004 break;
12005 }
a2fdc890
PM
12006 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
12007 if (rs == 15) {
12008 if (!(insn & (1 << 20))) {
12009 goto illegal_op;
12010 }
12011 if (op != 2) {
12012 /* Byte or halfword load space with dest == r15 : memory hints.
12013 * Catch them early so we don't emit pointless addressing code.
12014 * This space is a mix of:
12015 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
12016 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
12017 * cores)
12018 * unallocated hints, which must be treated as NOPs
12019 * UNPREDICTABLE space, which we NOP or UNDEF depending on
12020 * which is easiest for the decoding logic
12021 * Some space which must UNDEF
12022 */
12023 int op1 = (insn >> 23) & 3;
12024 int op2 = (insn >> 6) & 0x3f;
12025 if (op & 2) {
12026 goto illegal_op;
12027 }
12028 if (rn == 15) {
02afbf64
PM
12029 /* UNPREDICTABLE, unallocated hint or
12030 * PLD/PLDW/PLI (literal)
12031 */
2eea841c 12032 return;
a2fdc890
PM
12033 }
12034 if (op1 & 1) {
2eea841c 12035 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
12036 }
12037 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
2eea841c 12038 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
12039 }
12040 /* UNDEF space, or an UNPREDICTABLE */
2eea841c 12041 goto illegal_op;
a2fdc890
PM
12042 }
12043 }
a99caa48 12044 memidx = get_mem_index(s);
9ee6e8bb 12045 if (rn == 15) {
7d1b0095 12046 addr = tcg_temp_new_i32();
9ee6e8bb
PB
12047 /* PC relative. */
12048 /* s->pc has already been incremented by 4. */
12049 imm = s->pc & 0xfffffffc;
12050 if (insn & (1 << 23))
12051 imm += insn & 0xfff;
12052 else
12053 imm -= insn & 0xfff;
b0109805 12054 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 12055 } else {
b0109805 12056 addr = load_reg(s, rn);
9ee6e8bb
PB
12057 if (insn & (1 << 23)) {
12058 /* Positive offset. */
12059 imm = insn & 0xfff;
b0109805 12060 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 12061 } else {
9ee6e8bb 12062 imm = insn & 0xff;
2a0308c5
PM
12063 switch ((insn >> 8) & 0xf) {
12064 case 0x0: /* Shifted Register. */
9ee6e8bb 12065 shift = (insn >> 4) & 0xf;
2a0308c5
PM
12066 if (shift > 3) {
12067 tcg_temp_free_i32(addr);
18c9b560 12068 goto illegal_op;
2a0308c5 12069 }
b26eefb6 12070 tmp = load_reg(s, rm);
9ee6e8bb 12071 if (shift)
b26eefb6 12072 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 12073 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 12074 tcg_temp_free_i32(tmp);
9ee6e8bb 12075 break;
2a0308c5 12076 case 0xc: /* Negative offset. */
b0109805 12077 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 12078 break;
2a0308c5 12079 case 0xe: /* User privilege. */
b0109805 12080 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 12081 memidx = get_a32_user_mem_index(s);
9ee6e8bb 12082 break;
2a0308c5 12083 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
12084 imm = -imm;
12085 /* Fall through. */
2a0308c5 12086 case 0xb: /* Post-increment. */
9ee6e8bb
PB
12087 postinc = 1;
12088 writeback = 1;
12089 break;
2a0308c5 12090 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
12091 imm = -imm;
12092 /* Fall through. */
2a0308c5 12093 case 0xf: /* Pre-increment. */
9ee6e8bb
PB
12094 writeback = 1;
12095 break;
12096 default:
2a0308c5 12097 tcg_temp_free_i32(addr);
b7bcbe95 12098 goto illegal_op;
9ee6e8bb
PB
12099 }
12100 }
12101 }
9bb6558a
PM
12102
12103 issinfo = writeback ? ISSInvalid : rs;
12104
0bc003ba
PM
12105 if (s->v8m_stackcheck && rn == 13 && writeback) {
12106 /*
12107 * Stackcheck. Here we know 'addr' is the current SP;
12108 * if imm is +ve we're moving SP up, else down. It is
12109 * UNKNOWN whether the limit check triggers when SP starts
12110 * below the limit and ends up above it; we chose to do so.
12111 */
12112 if ((int32_t)imm < 0) {
12113 TCGv_i32 newsp = tcg_temp_new_i32();
12114
12115 tcg_gen_addi_i32(newsp, addr, imm);
12116 gen_helper_v8m_stackcheck(cpu_env, newsp);
12117 tcg_temp_free_i32(newsp);
12118 } else {
12119 gen_helper_v8m_stackcheck(cpu_env, addr);
12120 }
12121 }
12122
12123 if (writeback && !postinc) {
12124 tcg_gen_addi_i32(addr, addr, imm);
12125 }
12126
9ee6e8bb
PB
12127 if (insn & (1 << 20)) {
12128 /* Load. */
5a839c0d 12129 tmp = tcg_temp_new_i32();
a2fdc890 12130 switch (op) {
5a839c0d 12131 case 0:
9bb6558a 12132 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12133 break;
12134 case 4:
9bb6558a 12135 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12136 break;
12137 case 1:
9bb6558a 12138 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12139 break;
12140 case 5:
9bb6558a 12141 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12142 break;
12143 case 2:
9bb6558a 12144 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 12145 break;
2a0308c5 12146 default:
5a839c0d 12147 tcg_temp_free_i32(tmp);
2a0308c5
PM
12148 tcg_temp_free_i32(addr);
12149 goto illegal_op;
a2fdc890
PM
12150 }
12151 if (rs == 15) {
3bb8a96f 12152 gen_bx_excret(s, tmp);
9ee6e8bb 12153 } else {
a2fdc890 12154 store_reg(s, rs, tmp);
9ee6e8bb
PB
12155 }
12156 } else {
12157 /* Store. */
b0109805 12158 tmp = load_reg(s, rs);
9ee6e8bb 12159 switch (op) {
5a839c0d 12160 case 0:
9bb6558a 12161 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12162 break;
12163 case 1:
9bb6558a 12164 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
12165 break;
12166 case 2:
9bb6558a 12167 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 12168 break;
2a0308c5 12169 default:
5a839c0d 12170 tcg_temp_free_i32(tmp);
2a0308c5
PM
12171 tcg_temp_free_i32(addr);
12172 goto illegal_op;
b7bcbe95 12173 }
5a839c0d 12174 tcg_temp_free_i32(tmp);
2c0262af 12175 }
9ee6e8bb 12176 if (postinc)
b0109805
PB
12177 tcg_gen_addi_i32(addr, addr, imm);
12178 if (writeback) {
12179 store_reg(s, rn, addr);
12180 } else {
7d1b0095 12181 tcg_temp_free_i32(addr);
b0109805 12182 }
9ee6e8bb
PB
12183 }
12184 break;
12185 default:
12186 goto illegal_op;
2c0262af 12187 }
2eea841c 12188 return;
9ee6e8bb 12189illegal_op:
2eea841c
PM
12190 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
12191 default_exception_el(s));
2c0262af
FB
12192}
12193
296e5a0a 12194static void disas_thumb_insn(DisasContext *s, uint32_t insn)
99c475ab 12195{
296e5a0a 12196 uint32_t val, op, rm, rn, rd, shift, cond;
99c475ab
FB
12197 int32_t offset;
12198 int i;
39d5492a
PM
12199 TCGv_i32 tmp;
12200 TCGv_i32 tmp2;
12201 TCGv_i32 addr;
99c475ab 12202
99c475ab
FB
12203 switch (insn >> 12) {
12204 case 0: case 1:
396e467c 12205
99c475ab
FB
12206 rd = insn & 7;
12207 op = (insn >> 11) & 3;
12208 if (op == 3) {
a2d12f0f
PM
12209 /*
12210 * 0b0001_1xxx_xxxx_xxxx
12211 * - Add, subtract (three low registers)
12212 * - Add, subtract (two low registers and immediate)
12213 */
99c475ab 12214 rn = (insn >> 3) & 7;
396e467c 12215 tmp = load_reg(s, rn);
99c475ab
FB
12216 if (insn & (1 << 10)) {
12217 /* immediate */
7d1b0095 12218 tmp2 = tcg_temp_new_i32();
396e467c 12219 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
12220 } else {
12221 /* reg */
12222 rm = (insn >> 6) & 7;
396e467c 12223 tmp2 = load_reg(s, rm);
99c475ab 12224 }
9ee6e8bb
PB
12225 if (insn & (1 << 9)) {
12226 if (s->condexec_mask)
396e467c 12227 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 12228 else
72485ec4 12229 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
12230 } else {
12231 if (s->condexec_mask)
396e467c 12232 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 12233 else
72485ec4 12234 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 12235 }
7d1b0095 12236 tcg_temp_free_i32(tmp2);
396e467c 12237 store_reg(s, rd, tmp);
99c475ab
FB
12238 } else {
12239 /* shift immediate */
12240 rm = (insn >> 3) & 7;
12241 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
12242 tmp = load_reg(s, rm);
12243 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
12244 if (!s->condexec_mask)
12245 gen_logic_CC(tmp);
12246 store_reg(s, rd, tmp);
99c475ab
FB
12247 }
12248 break;
12249 case 2: case 3:
a2d12f0f
PM
12250 /*
12251 * 0b001x_xxxx_xxxx_xxxx
12252 * - Add, subtract, compare, move (one low register and immediate)
12253 */
99c475ab
FB
12254 op = (insn >> 11) & 3;
12255 rd = (insn >> 8) & 0x7;
396e467c 12256 if (op == 0) { /* mov */
7d1b0095 12257 tmp = tcg_temp_new_i32();
396e467c 12258 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 12259 if (!s->condexec_mask)
396e467c
FN
12260 gen_logic_CC(tmp);
12261 store_reg(s, rd, tmp);
12262 } else {
12263 tmp = load_reg(s, rd);
7d1b0095 12264 tmp2 = tcg_temp_new_i32();
396e467c
FN
12265 tcg_gen_movi_i32(tmp2, insn & 0xff);
12266 switch (op) {
12267 case 1: /* cmp */
72485ec4 12268 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
12269 tcg_temp_free_i32(tmp);
12270 tcg_temp_free_i32(tmp2);
396e467c
FN
12271 break;
12272 case 2: /* add */
12273 if (s->condexec_mask)
12274 tcg_gen_add_i32(tmp, tmp, tmp2);
12275 else
72485ec4 12276 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 12277 tcg_temp_free_i32(tmp2);
396e467c
FN
12278 store_reg(s, rd, tmp);
12279 break;
12280 case 3: /* sub */
12281 if (s->condexec_mask)
12282 tcg_gen_sub_i32(tmp, tmp, tmp2);
12283 else
72485ec4 12284 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 12285 tcg_temp_free_i32(tmp2);
396e467c
FN
12286 store_reg(s, rd, tmp);
12287 break;
12288 }
99c475ab 12289 }
99c475ab
FB
12290 break;
12291 case 4:
12292 if (insn & (1 << 11)) {
12293 rd = (insn >> 8) & 7;
5899f386
FB
12294 /* load pc-relative. Bit 1 of PC is ignored. */
12295 val = s->pc + 2 + ((insn & 0xff) * 4);
12296 val &= ~(uint32_t)2;
7d1b0095 12297 addr = tcg_temp_new_i32();
b0109805 12298 tcg_gen_movi_i32(addr, val);
c40c8556 12299 tmp = tcg_temp_new_i32();
9bb6558a
PM
12300 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
12301 rd | ISSIs16Bit);
7d1b0095 12302 tcg_temp_free_i32(addr);
b0109805 12303 store_reg(s, rd, tmp);
99c475ab
FB
12304 break;
12305 }
12306 if (insn & (1 << 10)) {
ebfe27c5
PM
12307 /* 0b0100_01xx_xxxx_xxxx
12308 * - data processing extended, branch and exchange
12309 */
99c475ab
FB
12310 rd = (insn & 7) | ((insn >> 4) & 8);
12311 rm = (insn >> 3) & 0xf;
12312 op = (insn >> 8) & 3;
12313 switch (op) {
12314 case 0: /* add */
396e467c
FN
12315 tmp = load_reg(s, rd);
12316 tmp2 = load_reg(s, rm);
12317 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 12318 tcg_temp_free_i32(tmp2);
55203189
PM
12319 if (rd == 13) {
12320 /* ADD SP, SP, reg */
12321 store_sp_checked(s, tmp);
12322 } else {
12323 store_reg(s, rd, tmp);
12324 }
99c475ab
FB
12325 break;
12326 case 1: /* cmp */
396e467c
FN
12327 tmp = load_reg(s, rd);
12328 tmp2 = load_reg(s, rm);
72485ec4 12329 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
12330 tcg_temp_free_i32(tmp2);
12331 tcg_temp_free_i32(tmp);
99c475ab
FB
12332 break;
12333 case 2: /* mov/cpy */
396e467c 12334 tmp = load_reg(s, rm);
55203189
PM
12335 if (rd == 13) {
12336 /* MOV SP, reg */
12337 store_sp_checked(s, tmp);
12338 } else {
12339 store_reg(s, rd, tmp);
12340 }
99c475ab 12341 break;
ebfe27c5
PM
12342 case 3:
12343 {
12344 /* 0b0100_0111_xxxx_xxxx
12345 * - branch [and link] exchange thumb register
12346 */
12347 bool link = insn & (1 << 7);
12348
fb602cb7 12349 if (insn & 3) {
ebfe27c5
PM
12350 goto undef;
12351 }
12352 if (link) {
be5e7a76 12353 ARCH(5);
ebfe27c5 12354 }
fb602cb7
PM
12355 if ((insn & 4)) {
12356 /* BXNS/BLXNS: only exists for v8M with the
12357 * security extensions, and always UNDEF if NonSecure.
12358 * We don't implement these in the user-only mode
12359 * either (in theory you can use them from Secure User
12360 * mode but they are too tied in to system emulation.)
12361 */
12362 if (!s->v8m_secure || IS_USER_ONLY) {
12363 goto undef;
12364 }
12365 if (link) {
3e3fa230 12366 gen_blxns(s, rm);
fb602cb7
PM
12367 } else {
12368 gen_bxns(s, rm);
12369 }
12370 break;
12371 }
12372 /* BLX/BX */
ebfe27c5
PM
12373 tmp = load_reg(s, rm);
12374 if (link) {
99c475ab 12375 val = (uint32_t)s->pc | 1;
7d1b0095 12376 tmp2 = tcg_temp_new_i32();
b0109805
PB
12377 tcg_gen_movi_i32(tmp2, val);
12378 store_reg(s, 14, tmp2);
3bb8a96f
PM
12379 gen_bx(s, tmp);
12380 } else {
12381 /* Only BX works as exception-return, not BLX */
12382 gen_bx_excret(s, tmp);
99c475ab 12383 }
99c475ab
FB
12384 break;
12385 }
ebfe27c5 12386 }
99c475ab
FB
12387 break;
12388 }
12389
a2d12f0f
PM
12390 /*
12391 * 0b0100_00xx_xxxx_xxxx
12392 * - Data-processing (two low registers)
12393 */
99c475ab
FB
12394 rd = insn & 7;
12395 rm = (insn >> 3) & 7;
12396 op = (insn >> 6) & 0xf;
12397 if (op == 2 || op == 3 || op == 4 || op == 7) {
12398 /* the shift/rotate ops want the operands backwards */
12399 val = rm;
12400 rm = rd;
12401 rd = val;
12402 val = 1;
12403 } else {
12404 val = 0;
12405 }
12406
396e467c 12407 if (op == 9) { /* neg */
7d1b0095 12408 tmp = tcg_temp_new_i32();
396e467c
FN
12409 tcg_gen_movi_i32(tmp, 0);
12410 } else if (op != 0xf) { /* mvn doesn't read its first operand */
12411 tmp = load_reg(s, rd);
12412 } else {
f764718d 12413 tmp = NULL;
396e467c 12414 }
99c475ab 12415
396e467c 12416 tmp2 = load_reg(s, rm);
5899f386 12417 switch (op) {
99c475ab 12418 case 0x0: /* and */
396e467c 12419 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 12420 if (!s->condexec_mask)
396e467c 12421 gen_logic_CC(tmp);
99c475ab
FB
12422 break;
12423 case 0x1: /* eor */
396e467c 12424 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 12425 if (!s->condexec_mask)
396e467c 12426 gen_logic_CC(tmp);
99c475ab
FB
12427 break;
12428 case 0x2: /* lsl */
9ee6e8bb 12429 if (s->condexec_mask) {
365af80e 12430 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 12431 } else {
9ef39277 12432 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 12433 gen_logic_CC(tmp2);
9ee6e8bb 12434 }
99c475ab
FB
12435 break;
12436 case 0x3: /* lsr */
9ee6e8bb 12437 if (s->condexec_mask) {
365af80e 12438 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 12439 } else {
9ef39277 12440 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 12441 gen_logic_CC(tmp2);
9ee6e8bb 12442 }
99c475ab
FB
12443 break;
12444 case 0x4: /* asr */
9ee6e8bb 12445 if (s->condexec_mask) {
365af80e 12446 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 12447 } else {
9ef39277 12448 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 12449 gen_logic_CC(tmp2);
9ee6e8bb 12450 }
99c475ab
FB
12451 break;
12452 case 0x5: /* adc */
49b4c31e 12453 if (s->condexec_mask) {
396e467c 12454 gen_adc(tmp, tmp2);
49b4c31e
RH
12455 } else {
12456 gen_adc_CC(tmp, tmp, tmp2);
12457 }
99c475ab
FB
12458 break;
12459 case 0x6: /* sbc */
2de68a49 12460 if (s->condexec_mask) {
396e467c 12461 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
12462 } else {
12463 gen_sbc_CC(tmp, tmp, tmp2);
12464 }
99c475ab
FB
12465 break;
12466 case 0x7: /* ror */
9ee6e8bb 12467 if (s->condexec_mask) {
f669df27
AJ
12468 tcg_gen_andi_i32(tmp, tmp, 0x1f);
12469 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 12470 } else {
9ef39277 12471 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 12472 gen_logic_CC(tmp2);
9ee6e8bb 12473 }
99c475ab
FB
12474 break;
12475 case 0x8: /* tst */
396e467c
FN
12476 tcg_gen_and_i32(tmp, tmp, tmp2);
12477 gen_logic_CC(tmp);
99c475ab 12478 rd = 16;
5899f386 12479 break;
99c475ab 12480 case 0x9: /* neg */
9ee6e8bb 12481 if (s->condexec_mask)
396e467c 12482 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 12483 else
72485ec4 12484 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
12485 break;
12486 case 0xa: /* cmp */
72485ec4 12487 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
12488 rd = 16;
12489 break;
12490 case 0xb: /* cmn */
72485ec4 12491 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
12492 rd = 16;
12493 break;
12494 case 0xc: /* orr */
396e467c 12495 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 12496 if (!s->condexec_mask)
396e467c 12497 gen_logic_CC(tmp);
99c475ab
FB
12498 break;
12499 case 0xd: /* mul */
7b2919a0 12500 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 12501 if (!s->condexec_mask)
396e467c 12502 gen_logic_CC(tmp);
99c475ab
FB
12503 break;
12504 case 0xe: /* bic */
f669df27 12505 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 12506 if (!s->condexec_mask)
396e467c 12507 gen_logic_CC(tmp);
99c475ab
FB
12508 break;
12509 case 0xf: /* mvn */
396e467c 12510 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 12511 if (!s->condexec_mask)
396e467c 12512 gen_logic_CC(tmp2);
99c475ab 12513 val = 1;
5899f386 12514 rm = rd;
99c475ab
FB
12515 break;
12516 }
12517 if (rd != 16) {
396e467c
FN
12518 if (val) {
12519 store_reg(s, rm, tmp2);
12520 if (op != 0xf)
7d1b0095 12521 tcg_temp_free_i32(tmp);
396e467c
FN
12522 } else {
12523 store_reg(s, rd, tmp);
7d1b0095 12524 tcg_temp_free_i32(tmp2);
396e467c
FN
12525 }
12526 } else {
7d1b0095
PM
12527 tcg_temp_free_i32(tmp);
12528 tcg_temp_free_i32(tmp2);
99c475ab
FB
12529 }
12530 break;
12531
12532 case 5:
12533 /* load/store register offset. */
12534 rd = insn & 7;
12535 rn = (insn >> 3) & 7;
12536 rm = (insn >> 6) & 7;
12537 op = (insn >> 9) & 7;
b0109805 12538 addr = load_reg(s, rn);
b26eefb6 12539 tmp = load_reg(s, rm);
b0109805 12540 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 12541 tcg_temp_free_i32(tmp);
99c475ab 12542
c40c8556 12543 if (op < 3) { /* store */
b0109805 12544 tmp = load_reg(s, rd);
c40c8556
PM
12545 } else {
12546 tmp = tcg_temp_new_i32();
12547 }
99c475ab
FB
12548
12549 switch (op) {
12550 case 0: /* str */
9bb6558a 12551 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12552 break;
12553 case 1: /* strh */
9bb6558a 12554 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12555 break;
12556 case 2: /* strb */
9bb6558a 12557 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12558 break;
12559 case 3: /* ldrsb */
9bb6558a 12560 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12561 break;
12562 case 4: /* ldr */
9bb6558a 12563 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12564 break;
12565 case 5: /* ldrh */
9bb6558a 12566 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12567 break;
12568 case 6: /* ldrb */
9bb6558a 12569 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12570 break;
12571 case 7: /* ldrsh */
9bb6558a 12572 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
12573 break;
12574 }
c40c8556 12575 if (op >= 3) { /* load */
b0109805 12576 store_reg(s, rd, tmp);
c40c8556
PM
12577 } else {
12578 tcg_temp_free_i32(tmp);
12579 }
7d1b0095 12580 tcg_temp_free_i32(addr);
99c475ab
FB
12581 break;
12582
12583 case 6:
12584 /* load/store word immediate offset */
12585 rd = insn & 7;
12586 rn = (insn >> 3) & 7;
b0109805 12587 addr = load_reg(s, rn);
99c475ab 12588 val = (insn >> 4) & 0x7c;
b0109805 12589 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12590
12591 if (insn & (1 << 11)) {
12592 /* load */
c40c8556 12593 tmp = tcg_temp_new_i32();
12dcc321 12594 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 12595 store_reg(s, rd, tmp);
99c475ab
FB
12596 } else {
12597 /* store */
b0109805 12598 tmp = load_reg(s, rd);
12dcc321 12599 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12600 tcg_temp_free_i32(tmp);
99c475ab 12601 }
7d1b0095 12602 tcg_temp_free_i32(addr);
99c475ab
FB
12603 break;
12604
12605 case 7:
12606 /* load/store byte immediate offset */
12607 rd = insn & 7;
12608 rn = (insn >> 3) & 7;
b0109805 12609 addr = load_reg(s, rn);
99c475ab 12610 val = (insn >> 6) & 0x1f;
b0109805 12611 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12612
12613 if (insn & (1 << 11)) {
12614 /* load */
c40c8556 12615 tmp = tcg_temp_new_i32();
9bb6558a 12616 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 12617 store_reg(s, rd, tmp);
99c475ab
FB
12618 } else {
12619 /* store */
b0109805 12620 tmp = load_reg(s, rd);
9bb6558a 12621 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12622 tcg_temp_free_i32(tmp);
99c475ab 12623 }
7d1b0095 12624 tcg_temp_free_i32(addr);
99c475ab
FB
12625 break;
12626
12627 case 8:
12628 /* load/store halfword immediate offset */
12629 rd = insn & 7;
12630 rn = (insn >> 3) & 7;
b0109805 12631 addr = load_reg(s, rn);
99c475ab 12632 val = (insn >> 5) & 0x3e;
b0109805 12633 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12634
12635 if (insn & (1 << 11)) {
12636 /* load */
c40c8556 12637 tmp = tcg_temp_new_i32();
9bb6558a 12638 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 12639 store_reg(s, rd, tmp);
99c475ab
FB
12640 } else {
12641 /* store */
b0109805 12642 tmp = load_reg(s, rd);
9bb6558a 12643 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12644 tcg_temp_free_i32(tmp);
99c475ab 12645 }
7d1b0095 12646 tcg_temp_free_i32(addr);
99c475ab
FB
12647 break;
12648
12649 case 9:
12650 /* load/store from stack */
12651 rd = (insn >> 8) & 7;
b0109805 12652 addr = load_reg(s, 13);
99c475ab 12653 val = (insn & 0xff) * 4;
b0109805 12654 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
12655
12656 if (insn & (1 << 11)) {
12657 /* load */
c40c8556 12658 tmp = tcg_temp_new_i32();
9bb6558a 12659 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 12660 store_reg(s, rd, tmp);
99c475ab
FB
12661 } else {
12662 /* store */
b0109805 12663 tmp = load_reg(s, rd);
9bb6558a 12664 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 12665 tcg_temp_free_i32(tmp);
99c475ab 12666 }
7d1b0095 12667 tcg_temp_free_i32(addr);
99c475ab
FB
12668 break;
12669
12670 case 10:
55203189
PM
12671 /*
12672 * 0b1010_xxxx_xxxx_xxxx
12673 * - Add PC/SP (immediate)
12674 */
99c475ab 12675 rd = (insn >> 8) & 7;
5899f386
FB
12676 if (insn & (1 << 11)) {
12677 /* SP */
5e3f878a 12678 tmp = load_reg(s, 13);
5899f386
FB
12679 } else {
12680 /* PC. bit 1 is ignored. */
7d1b0095 12681 tmp = tcg_temp_new_i32();
5e3f878a 12682 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 12683 }
99c475ab 12684 val = (insn & 0xff) * 4;
5e3f878a
PB
12685 tcg_gen_addi_i32(tmp, tmp, val);
12686 store_reg(s, rd, tmp);
99c475ab
FB
12687 break;
12688
12689 case 11:
12690 /* misc */
12691 op = (insn >> 8) & 0xf;
12692 switch (op) {
12693 case 0:
55203189
PM
12694 /*
12695 * 0b1011_0000_xxxx_xxxx
12696 * - ADD (SP plus immediate)
12697 * - SUB (SP minus immediate)
12698 */
b26eefb6 12699 tmp = load_reg(s, 13);
99c475ab
FB
12700 val = (insn & 0x7f) * 4;
12701 if (insn & (1 << 7))
6a0d8a1d 12702 val = -(int32_t)val;
b26eefb6 12703 tcg_gen_addi_i32(tmp, tmp, val);
55203189 12704 store_sp_checked(s, tmp);
99c475ab
FB
12705 break;
12706
9ee6e8bb
PB
12707 case 2: /* sign/zero extend. */
12708 ARCH(6);
12709 rd = insn & 7;
12710 rm = (insn >> 3) & 7;
b0109805 12711 tmp = load_reg(s, rm);
9ee6e8bb 12712 switch ((insn >> 6) & 3) {
b0109805
PB
12713 case 0: gen_sxth(tmp); break;
12714 case 1: gen_sxtb(tmp); break;
12715 case 2: gen_uxth(tmp); break;
12716 case 3: gen_uxtb(tmp); break;
9ee6e8bb 12717 }
b0109805 12718 store_reg(s, rd, tmp);
9ee6e8bb 12719 break;
99c475ab 12720 case 4: case 5: case 0xc: case 0xd:
aa369e5c
PM
12721 /*
12722 * 0b1011_x10x_xxxx_xxxx
12723 * - push/pop
12724 */
b0109805 12725 addr = load_reg(s, 13);
5899f386
FB
12726 if (insn & (1 << 8))
12727 offset = 4;
99c475ab 12728 else
5899f386
FB
12729 offset = 0;
12730 for (i = 0; i < 8; i++) {
12731 if (insn & (1 << i))
12732 offset += 4;
12733 }
12734 if ((insn & (1 << 11)) == 0) {
b0109805 12735 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 12736 }
aa369e5c
PM
12737
12738 if (s->v8m_stackcheck) {
12739 /*
12740 * Here 'addr' is the lower of "old SP" and "new SP";
12741 * if this is a pop that starts below the limit and ends
12742 * above it, it is UNKNOWN whether the limit check triggers;
12743 * we choose to trigger.
12744 */
12745 gen_helper_v8m_stackcheck(cpu_env, addr);
12746 }
12747
99c475ab
FB
12748 for (i = 0; i < 8; i++) {
12749 if (insn & (1 << i)) {
12750 if (insn & (1 << 11)) {
12751 /* pop */
c40c8556 12752 tmp = tcg_temp_new_i32();
12dcc321 12753 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 12754 store_reg(s, i, tmp);
99c475ab
FB
12755 } else {
12756 /* push */
b0109805 12757 tmp = load_reg(s, i);
12dcc321 12758 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12759 tcg_temp_free_i32(tmp);
99c475ab 12760 }
5899f386 12761 /* advance to the next address. */
b0109805 12762 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
12763 }
12764 }
f764718d 12765 tmp = NULL;
99c475ab
FB
12766 if (insn & (1 << 8)) {
12767 if (insn & (1 << 11)) {
12768 /* pop pc */
c40c8556 12769 tmp = tcg_temp_new_i32();
12dcc321 12770 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
12771 /* don't set the pc until the rest of the instruction
12772 has completed */
12773 } else {
12774 /* push lr */
b0109805 12775 tmp = load_reg(s, 14);
12dcc321 12776 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12777 tcg_temp_free_i32(tmp);
99c475ab 12778 }
b0109805 12779 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 12780 }
5899f386 12781 if ((insn & (1 << 11)) == 0) {
b0109805 12782 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 12783 }
99c475ab 12784 /* write back the new stack pointer */
b0109805 12785 store_reg(s, 13, addr);
99c475ab 12786 /* set the new PC value */
be5e7a76 12787 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 12788 store_reg_from_load(s, 15, tmp);
be5e7a76 12789 }
99c475ab
FB
12790 break;
12791
9ee6e8bb
PB
12792 case 1: case 3: case 9: case 11: /* czb */
12793 rm = insn & 7;
d9ba4830 12794 tmp = load_reg(s, rm);
c2d9644e 12795 arm_gen_condlabel(s);
9ee6e8bb 12796 if (insn & (1 << 11))
cb63669a 12797 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 12798 else
cb63669a 12799 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 12800 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
12801 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
12802 val = (uint32_t)s->pc + 2;
12803 val += offset;
12804 gen_jmp(s, val);
12805 break;
12806
12807 case 15: /* IT, nop-hint. */
12808 if ((insn & 0xf) == 0) {
12809 gen_nop_hint(s, (insn >> 4) & 0xf);
12810 break;
12811 }
12812 /* If Then. */
12813 s->condexec_cond = (insn >> 4) & 0xe;
12814 s->condexec_mask = insn & 0x1f;
12815 /* No actual code generated for this insn, just setup state. */
12816 break;
12817
06c949e6 12818 case 0xe: /* bkpt */
d4a2dc67
PM
12819 {
12820 int imm8 = extract32(insn, 0, 8);
be5e7a76 12821 ARCH(5);
c900a2e6 12822 gen_exception_bkpt_insn(s, 2, syn_aa32_bkpt(imm8, true));
06c949e6 12823 break;
d4a2dc67 12824 }
06c949e6 12825
19a6e31c
PM
12826 case 0xa: /* rev, and hlt */
12827 {
12828 int op1 = extract32(insn, 6, 2);
12829
12830 if (op1 == 2) {
12831 /* HLT */
12832 int imm6 = extract32(insn, 0, 6);
12833
12834 gen_hlt(s, imm6);
12835 break;
12836 }
12837
12838 /* Otherwise this is rev */
9ee6e8bb
PB
12839 ARCH(6);
12840 rn = (insn >> 3) & 0x7;
12841 rd = insn & 0x7;
b0109805 12842 tmp = load_reg(s, rn);
19a6e31c 12843 switch (op1) {
66896cb8 12844 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
12845 case 1: gen_rev16(tmp); break;
12846 case 3: gen_revsh(tmp); break;
19a6e31c
PM
12847 default:
12848 g_assert_not_reached();
9ee6e8bb 12849 }
b0109805 12850 store_reg(s, rd, tmp);
9ee6e8bb 12851 break;
19a6e31c 12852 }
9ee6e8bb 12853
d9e028c1
PM
12854 case 6:
12855 switch ((insn >> 5) & 7) {
12856 case 2:
12857 /* setend */
12858 ARCH(6);
9886ecdf
PB
12859 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
12860 gen_helper_setend(cpu_env);
dcba3a8d 12861 s->base.is_jmp = DISAS_UPDATE;
d9e028c1 12862 }
9ee6e8bb 12863 break;
d9e028c1
PM
12864 case 3:
12865 /* cps */
12866 ARCH(6);
12867 if (IS_USER(s)) {
12868 break;
8984bd2e 12869 }
b53d8923 12870 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
12871 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
12872 /* FAULTMASK */
12873 if (insn & 1) {
12874 addr = tcg_const_i32(19);
12875 gen_helper_v7m_msr(cpu_env, addr, tmp);
12876 tcg_temp_free_i32(addr);
12877 }
12878 /* PRIMASK */
12879 if (insn & 2) {
12880 addr = tcg_const_i32(16);
12881 gen_helper_v7m_msr(cpu_env, addr, tmp);
12882 tcg_temp_free_i32(addr);
12883 }
12884 tcg_temp_free_i32(tmp);
12885 gen_lookup_tb(s);
12886 } else {
12887 if (insn & (1 << 4)) {
12888 shift = CPSR_A | CPSR_I | CPSR_F;
12889 } else {
12890 shift = 0;
12891 }
12892 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 12893 }
d9e028c1
PM
12894 break;
12895 default:
12896 goto undef;
9ee6e8bb
PB
12897 }
12898 break;
12899
99c475ab
FB
12900 default:
12901 goto undef;
12902 }
12903 break;
12904
12905 case 12:
a7d3970d 12906 {
99c475ab 12907 /* load/store multiple */
f764718d 12908 TCGv_i32 loaded_var = NULL;
99c475ab 12909 rn = (insn >> 8) & 0x7;
b0109805 12910 addr = load_reg(s, rn);
99c475ab
FB
12911 for (i = 0; i < 8; i++) {
12912 if (insn & (1 << i)) {
99c475ab
FB
12913 if (insn & (1 << 11)) {
12914 /* load */
c40c8556 12915 tmp = tcg_temp_new_i32();
12dcc321 12916 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
12917 if (i == rn) {
12918 loaded_var = tmp;
12919 } else {
12920 store_reg(s, i, tmp);
12921 }
99c475ab
FB
12922 } else {
12923 /* store */
b0109805 12924 tmp = load_reg(s, i);
12dcc321 12925 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 12926 tcg_temp_free_i32(tmp);
99c475ab 12927 }
5899f386 12928 /* advance to the next address */
b0109805 12929 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
12930 }
12931 }
b0109805 12932 if ((insn & (1 << rn)) == 0) {
a7d3970d 12933 /* base reg not in list: base register writeback */
b0109805
PB
12934 store_reg(s, rn, addr);
12935 } else {
a7d3970d
PM
12936 /* base reg in list: if load, complete it now */
12937 if (insn & (1 << 11)) {
12938 store_reg(s, rn, loaded_var);
12939 }
7d1b0095 12940 tcg_temp_free_i32(addr);
b0109805 12941 }
99c475ab 12942 break;
a7d3970d 12943 }
99c475ab
FB
12944 case 13:
12945 /* conditional branch or swi */
12946 cond = (insn >> 8) & 0xf;
12947 if (cond == 0xe)
12948 goto undef;
12949
12950 if (cond == 0xf) {
12951 /* swi */
eaed129d 12952 gen_set_pc_im(s, s->pc);
d4a2dc67 12953 s->svc_imm = extract32(insn, 0, 8);
dcba3a8d 12954 s->base.is_jmp = DISAS_SWI;
99c475ab
FB
12955 break;
12956 }
12957 /* generate a conditional jump to next instruction */
c2d9644e 12958 arm_skip_unless(s, cond);
99c475ab
FB
12959
12960 /* jump to the offset */
5899f386 12961 val = (uint32_t)s->pc + 2;
99c475ab 12962 offset = ((int32_t)insn << 24) >> 24;
5899f386 12963 val += offset << 1;
8aaca4c0 12964 gen_jmp(s, val);
99c475ab
FB
12965 break;
12966
12967 case 14:
358bf29e 12968 if (insn & (1 << 11)) {
296e5a0a
PM
12969 /* thumb_insn_is_16bit() ensures we can't get here for
12970 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
12971 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
12972 */
12973 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
12974 ARCH(5);
12975 offset = ((insn & 0x7ff) << 1);
12976 tmp = load_reg(s, 14);
12977 tcg_gen_addi_i32(tmp, tmp, offset);
12978 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
12979
12980 tmp2 = tcg_temp_new_i32();
12981 tcg_gen_movi_i32(tmp2, s->pc | 1);
12982 store_reg(s, 14, tmp2);
12983 gen_bx(s, tmp);
358bf29e
PB
12984 break;
12985 }
9ee6e8bb 12986 /* unconditional branch */
99c475ab
FB
12987 val = (uint32_t)s->pc;
12988 offset = ((int32_t)insn << 21) >> 21;
12989 val += (offset << 1) + 2;
8aaca4c0 12990 gen_jmp(s, val);
99c475ab
FB
12991 break;
12992
12993 case 15:
296e5a0a
PM
12994 /* thumb_insn_is_16bit() ensures we can't get here for
12995 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
12996 */
12997 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
12998
12999 if (insn & (1 << 11)) {
13000 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
13001 offset = ((insn & 0x7ff) << 1) | 1;
13002 tmp = load_reg(s, 14);
13003 tcg_gen_addi_i32(tmp, tmp, offset);
13004
13005 tmp2 = tcg_temp_new_i32();
13006 tcg_gen_movi_i32(tmp2, s->pc | 1);
13007 store_reg(s, 14, tmp2);
13008 gen_bx(s, tmp);
13009 } else {
13010 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
13011 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
13012
13013 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
13014 }
9ee6e8bb 13015 break;
99c475ab
FB
13016 }
13017 return;
9ee6e8bb 13018illegal_op:
99c475ab 13019undef:
73710361
GB
13020 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
13021 default_exception_el(s));
99c475ab
FB
13022}
13023
541ebcd4
PM
13024static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
13025{
13026 /* Return true if the insn at dc->pc might cross a page boundary.
13027 * (False positives are OK, false negatives are not.)
5b8d7289
PM
13028 * We know this is a Thumb insn, and our caller ensures we are
13029 * only called if dc->pc is less than 4 bytes from the page
13030 * boundary, so we cross the page if the first 16 bits indicate
13031 * that this is a 32 bit insn.
541ebcd4 13032 */
5b8d7289 13033 uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4 13034
5b8d7289 13035 return !thumb_insn_is_16bit(s, insn);
541ebcd4
PM
13036}
13037
b542683d 13038static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2c0262af 13039{
1d8a5535 13040 DisasContext *dc = container_of(dcbase, DisasContext, base);
9c489ea6 13041 CPUARMState *env = cs->env_ptr;
4e5e1215 13042 ARMCPU *cpu = arm_env_get_cpu(env);
aad821ac
RH
13043 uint32_t tb_flags = dc->base.tb->flags;
13044 uint32_t condexec, core_mmu_idx;
3b46e624 13045
962fcbf2 13046 dc->isar = &cpu->isar;
dcba3a8d 13047 dc->pc = dc->base.pc_first;
e50e6a20 13048 dc->condjmp = 0;
3926cc84 13049
40f860cd 13050 dc->aarch64 = 0;
cef9ee70
SS
13051 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
13052 * there is no secure EL1, so we route exceptions to EL3.
13053 */
13054 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
13055 !arm_el_is_aa64(env, 3);
aad821ac
RH
13056 dc->thumb = FIELD_EX32(tb_flags, TBFLAG_A32, THUMB);
13057 dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
13058 dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
13059 condexec = FIELD_EX32(tb_flags, TBFLAG_A32, CONDEXEC);
13060 dc->condexec_mask = (condexec & 0xf) << 1;
13061 dc->condexec_cond = condexec >> 4;
13062 core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
13063 dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
c1e37810 13064 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 13065#if !defined(CONFIG_USER_ONLY)
c1e37810 13066 dc->user = (dc->current_el == 0);
3926cc84 13067#endif
aad821ac
RH
13068 dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
13069 dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
13070 dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
13071 dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
13072 dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
13073 dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
13074 dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_A32, HANDLER);
fb602cb7
PM
13075 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
13076 regime_is_secure(env, dc->mmu_idx);
aad821ac 13077 dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_A32, STACKCHECK);
60322b39 13078 dc->cp_regs = cpu->cp_regs;
a984e42c 13079 dc->features = env->features;
40f860cd 13080
50225ad0
PM
13081 /* Single step state. The code-generation logic here is:
13082 * SS_ACTIVE == 0:
13083 * generate code with no special handling for single-stepping (except
13084 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
13085 * this happens anyway because those changes are all system register or
13086 * PSTATE writes).
13087 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
13088 * emit code for one insn
13089 * emit code to clear PSTATE.SS
13090 * emit code to generate software step exception for completed step
13091 * end TB (as usual for having generated an exception)
13092 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
13093 * emit code to generate a software step exception
13094 * end the TB
13095 */
aad821ac
RH
13096 dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
13097 dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
50225ad0
PM
13098 dc->is_ldex = false;
13099 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
13100
bfe7ad5b 13101 dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
1d8a5535 13102
f7708456
RH
13103 /* If architectural single step active, limit to 1. */
13104 if (is_singlestepping(dc)) {
b542683d 13105 dc->base.max_insns = 1;
f7708456
RH
13106 }
13107
d0264d86
RH
13108 /* ARM is a fixed-length ISA. Bound the number of insns to execute
13109 to those left on the page. */
13110 if (!dc->thumb) {
bfe7ad5b 13111 int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
b542683d 13112 dc->base.max_insns = MIN(dc->base.max_insns, bound);
d0264d86
RH
13113 }
13114
a7812ae4
PB
13115 cpu_F0s = tcg_temp_new_i32();
13116 cpu_F1s = tcg_temp_new_i32();
13117 cpu_F0d = tcg_temp_new_i64();
13118 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
13119 cpu_V0 = cpu_F0d;
13120 cpu_V1 = cpu_F1d;
e677137d 13121 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 13122 cpu_M0 = tcg_temp_new_i64();
1d8a5535
LV
13123}
13124
b1476854
LV
13125static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
13126{
13127 DisasContext *dc = container_of(dcbase, DisasContext, base);
13128
13129 /* A note on handling of the condexec (IT) bits:
13130 *
13131 * We want to avoid the overhead of having to write the updated condexec
13132 * bits back to the CPUARMState for every instruction in an IT block. So:
13133 * (1) if the condexec bits are not already zero then we write
13134 * zero back into the CPUARMState now. This avoids complications trying
13135 * to do it at the end of the block. (For example if we don't do this
13136 * it's hard to identify whether we can safely skip writing condexec
13137 * at the end of the TB, which we definitely want to do for the case
13138 * where a TB doesn't do anything with the IT state at all.)
13139 * (2) if we are going to leave the TB then we call gen_set_condexec()
13140 * which will write the correct value into CPUARMState if zero is wrong.
13141 * This is done both for leaving the TB at the end, and for leaving
13142 * it because of an exception we know will happen, which is done in
13143 * gen_exception_insn(). The latter is necessary because we need to
13144 * leave the TB with the PC/IT state just prior to execution of the
13145 * instruction which caused the exception.
13146 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
13147 * then the CPUARMState will be wrong and we need to reset it.
13148 * This is handled in the same way as restoration of the
13149 * PC in these situations; we save the value of the condexec bits
13150 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
13151 * then uses this to restore them after an exception.
13152 *
13153 * Note that there are no instructions which can read the condexec
13154 * bits, and none which can write non-static values to them, so
13155 * we don't need to care about whether CPUARMState is correct in the
13156 * middle of a TB.
13157 */
13158
13159 /* Reset the conditional execution bits immediately. This avoids
13160 complications trying to do it at the end of the block. */
13161 if (dc->condexec_mask || dc->condexec_cond) {
13162 TCGv_i32 tmp = tcg_temp_new_i32();
13163 tcg_gen_movi_i32(tmp, 0);
13164 store_cpu_field(tmp, condexec_bits);
13165 }
13166}
13167
f62bd897
LV
13168static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
13169{
13170 DisasContext *dc = container_of(dcbase, DisasContext, base);
13171
f62bd897
LV
13172 tcg_gen_insn_start(dc->pc,
13173 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
13174 0);
15fa08f8 13175 dc->insn_start = tcg_last_op();
f62bd897
LV
13176}
13177
a68956ad
LV
13178static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
13179 const CPUBreakpoint *bp)
13180{
13181 DisasContext *dc = container_of(dcbase, DisasContext, base);
13182
13183 if (bp->flags & BP_CPU) {
13184 gen_set_condexec(dc);
13185 gen_set_pc_im(dc, dc->pc);
13186 gen_helper_check_breakpoints(cpu_env);
13187 /* End the TB early; it's likely not going to be executed */
13188 dc->base.is_jmp = DISAS_TOO_MANY;
13189 } else {
13190 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
13191 /* The address covered by the breakpoint must be
13192 included in [tb->pc, tb->pc + tb->size) in order
13193 to for it to be properly cleared -- thus we
13194 increment the PC here so that the logic setting
13195 tb->size below does the right thing. */
13196 /* TODO: Advance PC by correct instruction length to
13197 * avoid disassembler error messages */
13198 dc->pc += 2;
13199 dc->base.is_jmp = DISAS_NORETURN;
13200 }
13201
13202 return true;
13203}
13204
722ef0a5 13205static bool arm_pre_translate_insn(DisasContext *dc)
13189a90 13206{
13189a90
LV
13207#ifdef CONFIG_USER_ONLY
13208 /* Intercept jump to the magic kernel page. */
13209 if (dc->pc >= 0xffff0000) {
13210 /* We always get here via a jump, so know we are not in a
13211 conditional execution block. */
13212 gen_exception_internal(EXCP_KERNEL_TRAP);
13213 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 13214 return true;
13189a90
LV
13215 }
13216#endif
13217
13218 if (dc->ss_active && !dc->pstate_ss) {
13219 /* Singlestep state is Active-pending.
13220 * If we're in this state at the start of a TB then either
13221 * a) we just took an exception to an EL which is being debugged
13222 * and this is the first insn in the exception handler
13223 * b) debug exceptions were masked and we just unmasked them
13224 * without changing EL (eg by clearing PSTATE.D)
13225 * In either case we're going to take a swstep exception in the
13226 * "did not step an insn" case, and so the syndrome ISV and EX
13227 * bits should be zero.
13228 */
13229 assert(dc->base.num_insns == 1);
13230 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
13231 default_exception_el(dc));
13232 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 13233 return true;
13189a90
LV
13234 }
13235
722ef0a5
RH
13236 return false;
13237}
13189a90 13238
d0264d86 13239static void arm_post_translate_insn(DisasContext *dc)
722ef0a5 13240{
13189a90
LV
13241 if (dc->condjmp && !dc->base.is_jmp) {
13242 gen_set_label(dc->condlabel);
13243 dc->condjmp = 0;
13244 }
13189a90 13245 dc->base.pc_next = dc->pc;
23169224 13246 translator_loop_temp_check(&dc->base);
13189a90
LV
13247}
13248
722ef0a5
RH
13249static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
13250{
13251 DisasContext *dc = container_of(dcbase, DisasContext, base);
13252 CPUARMState *env = cpu->env_ptr;
13253 unsigned int insn;
13254
13255 if (arm_pre_translate_insn(dc)) {
13256 return;
13257 }
13258
13259 insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
58803318 13260 dc->insn = insn;
722ef0a5
RH
13261 dc->pc += 4;
13262 disas_arm_insn(dc, insn);
13263
d0264d86
RH
13264 arm_post_translate_insn(dc);
13265
13266 /* ARM is a fixed-length ISA. We performed the cross-page check
13267 in init_disas_context by adjusting max_insns. */
722ef0a5
RH
13268}
13269
dcf14dfb
PM
13270static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
13271{
13272 /* Return true if this Thumb insn is always unconditional,
13273 * even inside an IT block. This is true of only a very few
13274 * instructions: BKPT, HLT, and SG.
13275 *
13276 * A larger class of instructions are UNPREDICTABLE if used
13277 * inside an IT block; we do not need to detect those here, because
13278 * what we do by default (perform the cc check and update the IT
13279 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
13280 * choice for those situations.
13281 *
13282 * insn is either a 16-bit or a 32-bit instruction; the two are
13283 * distinguishable because for the 16-bit case the top 16 bits
13284 * are zeroes, and that isn't a valid 32-bit encoding.
13285 */
13286 if ((insn & 0xffffff00) == 0xbe00) {
13287 /* BKPT */
13288 return true;
13289 }
13290
13291 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
13292 !arm_dc_feature(s, ARM_FEATURE_M)) {
13293 /* HLT: v8A only. This is unconditional even when it is going to
13294 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
13295 * For v7 cores this was a plain old undefined encoding and so
13296 * honours its cc check. (We might be using the encoding as
13297 * a semihosting trap, but we don't change the cc check behaviour
13298 * on that account, because a debugger connected to a real v7A
13299 * core and emulating semihosting traps by catching the UNDEF
13300 * exception would also only see cases where the cc check passed.
13301 * No guest code should be trying to do a HLT semihosting trap
13302 * in an IT block anyway.
13303 */
13304 return true;
13305 }
13306
13307 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
13308 arm_dc_feature(s, ARM_FEATURE_M)) {
13309 /* SG: v8M only */
13310 return true;
13311 }
13312
13313 return false;
13314}
13315
722ef0a5
RH
13316static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
13317{
13318 DisasContext *dc = container_of(dcbase, DisasContext, base);
13319 CPUARMState *env = cpu->env_ptr;
296e5a0a
PM
13320 uint32_t insn;
13321 bool is_16bit;
722ef0a5
RH
13322
13323 if (arm_pre_translate_insn(dc)) {
13324 return;
13325 }
13326
296e5a0a
PM
13327 insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
13328 is_16bit = thumb_insn_is_16bit(dc, insn);
13329 dc->pc += 2;
13330 if (!is_16bit) {
13331 uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
13332
13333 insn = insn << 16 | insn2;
13334 dc->pc += 2;
13335 }
58803318 13336 dc->insn = insn;
296e5a0a 13337
dcf14dfb 13338 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
296e5a0a
PM
13339 uint32_t cond = dc->condexec_cond;
13340
13341 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
c2d9644e 13342 arm_skip_unless(dc, cond);
296e5a0a
PM
13343 }
13344 }
13345
13346 if (is_16bit) {
13347 disas_thumb_insn(dc, insn);
13348 } else {
2eea841c 13349 disas_thumb2_insn(dc, insn);
296e5a0a 13350 }
722ef0a5
RH
13351
13352 /* Advance the Thumb condexec condition. */
13353 if (dc->condexec_mask) {
13354 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
13355 ((dc->condexec_mask >> 4) & 1));
13356 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
13357 if (dc->condexec_mask == 0) {
13358 dc->condexec_cond = 0;
13359 }
13360 }
13361
d0264d86
RH
13362 arm_post_translate_insn(dc);
13363
13364 /* Thumb is a variable-length ISA. Stop translation when the next insn
13365 * will touch a new page. This ensures that prefetch aborts occur at
13366 * the right place.
13367 *
13368 * We want to stop the TB if the next insn starts in a new page,
13369 * or if it spans between this page and the next. This means that
13370 * if we're looking at the last halfword in the page we need to
13371 * see if it's a 16-bit Thumb insn (which will fit in this TB)
13372 * or a 32-bit Thumb insn (which won't).
13373 * This is to avoid generating a silly TB with a single 16-bit insn
13374 * in it at the end of this page (which would execute correctly
13375 * but isn't very efficient).
13376 */
13377 if (dc->base.is_jmp == DISAS_NEXT
bfe7ad5b
EC
13378 && (dc->pc - dc->page_start >= TARGET_PAGE_SIZE
13379 || (dc->pc - dc->page_start >= TARGET_PAGE_SIZE - 3
d0264d86
RH
13380 && insn_crosses_page(env, dc)))) {
13381 dc->base.is_jmp = DISAS_TOO_MANY;
13382 }
722ef0a5
RH
13383}
13384
70d3c035 13385static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1d8a5535 13386{
70d3c035 13387 DisasContext *dc = container_of(dcbase, DisasContext, base);
2e70f6ef 13388
c5a49c63 13389 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
70d3c035
LV
13390 /* FIXME: This can theoretically happen with self-modifying code. */
13391 cpu_abort(cpu, "IO on conditional branch instruction");
2e70f6ef 13392 }
9ee6e8bb 13393
b5ff1b31 13394 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
13395 instruction was a conditional branch or trap, and the PC has
13396 already been written. */
f021b2c4 13397 gen_set_condexec(dc);
dcba3a8d 13398 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
3bb8a96f
PM
13399 /* Exception return branches need some special case code at the
13400 * end of the TB, which is complex enough that it has to
13401 * handle the single-step vs not and the condition-failed
13402 * insn codepath itself.
13403 */
13404 gen_bx_excret_final_code(dc);
13405 } else if (unlikely(is_singlestepping(dc))) {
7999a5c8 13406 /* Unconditional and "condition passed" instruction codepath. */
dcba3a8d 13407 switch (dc->base.is_jmp) {
7999a5c8 13408 case DISAS_SWI:
50225ad0 13409 gen_ss_advance(dc);
73710361
GB
13410 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
13411 default_exception_el(dc));
7999a5c8
SF
13412 break;
13413 case DISAS_HVC:
37e6456e 13414 gen_ss_advance(dc);
73710361 13415 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
13416 break;
13417 case DISAS_SMC:
37e6456e 13418 gen_ss_advance(dc);
73710361 13419 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
13420 break;
13421 case DISAS_NEXT:
a68956ad 13422 case DISAS_TOO_MANY:
7999a5c8
SF
13423 case DISAS_UPDATE:
13424 gen_set_pc_im(dc, dc->pc);
13425 /* fall through */
13426 default:
5425415e
PM
13427 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
13428 gen_singlestep_exception(dc);
a0c231e6
RH
13429 break;
13430 case DISAS_NORETURN:
13431 break;
7999a5c8 13432 }
8aaca4c0 13433 } else {
9ee6e8bb
PB
13434 /* While branches must always occur at the end of an IT block,
13435 there are a few other things that can cause us to terminate
65626741 13436 the TB in the middle of an IT block:
9ee6e8bb
PB
13437 - Exception generating instructions (bkpt, swi, undefined).
13438 - Page boundaries.
13439 - Hardware watchpoints.
13440 Hardware breakpoints have already been handled and skip this code.
13441 */
dcba3a8d 13442 switch(dc->base.is_jmp) {
8aaca4c0 13443 case DISAS_NEXT:
a68956ad 13444 case DISAS_TOO_MANY:
6e256c93 13445 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 13446 break;
577bf808 13447 case DISAS_JUMP:
8a6b28c7
EC
13448 gen_goto_ptr();
13449 break;
e8d52302
AB
13450 case DISAS_UPDATE:
13451 gen_set_pc_im(dc, dc->pc);
13452 /* fall through */
577bf808 13453 default:
8aaca4c0 13454 /* indicate that the hash table must be used to find the next TB */
07ea28b4 13455 tcg_gen_exit_tb(NULL, 0);
8aaca4c0 13456 break;
a0c231e6 13457 case DISAS_NORETURN:
8aaca4c0
FB
13458 /* nothing more to generate */
13459 break;
9ee6e8bb 13460 case DISAS_WFI:
58803318
SS
13461 {
13462 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
13463 !(dc->insn & (1U << 31))) ? 2 : 4);
13464
13465 gen_helper_wfi(cpu_env, tmp);
13466 tcg_temp_free_i32(tmp);
84549b6d
PM
13467 /* The helper doesn't necessarily throw an exception, but we
13468 * must go back to the main loop to check for interrupts anyway.
13469 */
07ea28b4 13470 tcg_gen_exit_tb(NULL, 0);
9ee6e8bb 13471 break;
58803318 13472 }
72c1d3af
PM
13473 case DISAS_WFE:
13474 gen_helper_wfe(cpu_env);
13475 break;
c87e5a61
PM
13476 case DISAS_YIELD:
13477 gen_helper_yield(cpu_env);
13478 break;
9ee6e8bb 13479 case DISAS_SWI:
73710361
GB
13480 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
13481 default_exception_el(dc));
9ee6e8bb 13482 break;
37e6456e 13483 case DISAS_HVC:
73710361 13484 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
13485 break;
13486 case DISAS_SMC:
73710361 13487 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 13488 break;
8aaca4c0 13489 }
f021b2c4
PM
13490 }
13491
13492 if (dc->condjmp) {
13493 /* "Condition failed" instruction codepath for the branch/trap insn */
13494 gen_set_label(dc->condlabel);
13495 gen_set_condexec(dc);
b636649f 13496 if (unlikely(is_singlestepping(dc))) {
f021b2c4
PM
13497 gen_set_pc_im(dc, dc->pc);
13498 gen_singlestep_exception(dc);
13499 } else {
6e256c93 13500 gen_goto_tb(dc, 1, dc->pc);
e50e6a20 13501 }
2c0262af 13502 }
23169224
LV
13503
13504 /* Functions above can change dc->pc, so re-align db->pc_next */
13505 dc->base.pc_next = dc->pc;
70d3c035
LV
13506}
13507
4013f7fc
LV
13508static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
13509{
13510 DisasContext *dc = container_of(dcbase, DisasContext, base);
13511
13512 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
1d48474d 13513 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
4013f7fc
LV
13514}
13515
23169224
LV
13516static const TranslatorOps arm_translator_ops = {
13517 .init_disas_context = arm_tr_init_disas_context,
13518 .tb_start = arm_tr_tb_start,
13519 .insn_start = arm_tr_insn_start,
13520 .breakpoint_check = arm_tr_breakpoint_check,
13521 .translate_insn = arm_tr_translate_insn,
13522 .tb_stop = arm_tr_tb_stop,
13523 .disas_log = arm_tr_disas_log,
13524};
13525
722ef0a5
RH
13526static const TranslatorOps thumb_translator_ops = {
13527 .init_disas_context = arm_tr_init_disas_context,
13528 .tb_start = arm_tr_tb_start,
13529 .insn_start = arm_tr_insn_start,
13530 .breakpoint_check = arm_tr_breakpoint_check,
13531 .translate_insn = thumb_tr_translate_insn,
13532 .tb_stop = arm_tr_tb_stop,
13533 .disas_log = arm_tr_disas_log,
13534};
13535
70d3c035 13536/* generate intermediate code for basic block 'tb'. */
23169224 13537void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
70d3c035 13538{
23169224
LV
13539 DisasContext dc;
13540 const TranslatorOps *ops = &arm_translator_ops;
70d3c035 13541
aad821ac 13542 if (FIELD_EX32(tb->flags, TBFLAG_A32, THUMB)) {
722ef0a5
RH
13543 ops = &thumb_translator_ops;
13544 }
23169224 13545#ifdef TARGET_AARCH64
aad821ac 13546 if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) {
23169224 13547 ops = &aarch64_translator_ops;
2c0262af
FB
13548 }
13549#endif
23169224
LV
13550
13551 translator_loop(ops, &dc.base, cpu, tb);
2c0262af
FB
13552}
13553
878096ee
AF
13554void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
13555 int flags)
2c0262af 13556{
878096ee
AF
13557 ARMCPU *cpu = ARM_CPU(cs);
13558 CPUARMState *env = &cpu->env;
2c0262af
FB
13559 int i;
13560
17731115
PM
13561 if (is_a64(env)) {
13562 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
13563 return;
13564 }
13565
2c0262af 13566 for(i=0;i<16;i++) {
7fe48483 13567 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 13568 if ((i % 4) == 3)
7fe48483 13569 cpu_fprintf(f, "\n");
2c0262af 13570 else
7fe48483 13571 cpu_fprintf(f, " ");
2c0262af 13572 }
06e5cf7a 13573
5b906f35
PM
13574 if (arm_feature(env, ARM_FEATURE_M)) {
13575 uint32_t xpsr = xpsr_read(env);
13576 const char *mode;
1e577cc7
PM
13577 const char *ns_status = "";
13578
13579 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
13580 ns_status = env->v7m.secure ? "S " : "NS ";
13581 }
5b906f35
PM
13582
13583 if (xpsr & XPSR_EXCP) {
13584 mode = "handler";
13585 } else {
8bfc26ea 13586 if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
5b906f35
PM
13587 mode = "unpriv-thread";
13588 } else {
13589 mode = "priv-thread";
13590 }
13591 }
13592
1e577cc7 13593 cpu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
5b906f35
PM
13594 xpsr,
13595 xpsr & XPSR_N ? 'N' : '-',
13596 xpsr & XPSR_Z ? 'Z' : '-',
13597 xpsr & XPSR_C ? 'C' : '-',
13598 xpsr & XPSR_V ? 'V' : '-',
13599 xpsr & XPSR_T ? 'T' : 'A',
1e577cc7 13600 ns_status,
5b906f35 13601 mode);
06e5cf7a 13602 } else {
5b906f35
PM
13603 uint32_t psr = cpsr_read(env);
13604 const char *ns_status = "";
13605
13606 if (arm_feature(env, ARM_FEATURE_EL3) &&
13607 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
13608 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
13609 }
13610
13611 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
13612 psr,
13613 psr & CPSR_N ? 'N' : '-',
13614 psr & CPSR_Z ? 'Z' : '-',
13615 psr & CPSR_C ? 'C' : '-',
13616 psr & CPSR_V ? 'V' : '-',
13617 psr & CPSR_T ? 'T' : 'A',
13618 ns_status,
81e37284 13619 aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26);
5b906f35 13620 }
b7bcbe95 13621
f2617cfc
PM
13622 if (flags & CPU_DUMP_FPU) {
13623 int numvfpregs = 0;
13624 if (arm_feature(env, ARM_FEATURE_VFP)) {
13625 numvfpregs += 16;
13626 }
13627 if (arm_feature(env, ARM_FEATURE_VFP3)) {
13628 numvfpregs += 16;
13629 }
13630 for (i = 0; i < numvfpregs; i++) {
9a2b5256 13631 uint64_t v = *aa32_vfp_dreg(env, i);
f2617cfc
PM
13632 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
13633 i * 2, (uint32_t)v,
13634 i * 2 + 1, (uint32_t)(v >> 32),
13635 i, v);
13636 }
13637 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 13638 }
2c0262af 13639}
a6b025d3 13640
bad729e2
RH
13641void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
13642 target_ulong *data)
d2856f1a 13643{
3926cc84 13644 if (is_a64(env)) {
bad729e2 13645 env->pc = data[0];
40f860cd 13646 env->condexec_bits = 0;
aaa1f954 13647 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 13648 } else {
bad729e2
RH
13649 env->regs[15] = data[0];
13650 env->condexec_bits = data[1];
aaa1f954 13651 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 13652 }
d2856f1a 13653}
This page took 4.100087 seconds and 4 git commands to generate.