]> Git Repo - qemu.git/blame - target/arm/translate.c
target/arm: Implement v8M MSPLIM and PSPLIM registers
[qemu.git] / target / arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
1de7afc9 28#include "qemu/log.h"
534df156 29#include "qemu/bitops.h"
1d854765 30#include "arm_ldst.h"
19a6e31c 31#include "exec/semihost.h"
1497c961 32
2ef6175a
RH
33#include "exec/helper-proto.h"
34#include "exec/helper-gen.h"
2c0262af 35
a7e30d84 36#include "trace-tcg.h"
508127e2 37#include "exec/log.h"
a7e30d84
LV
38
39
2b51668f
PM
40#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
41#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 42/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 43#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
c99a55d3 44#define ENABLE_ARCH_5J arm_dc_feature(s, ARM_FEATURE_JAZELLE)
2b51668f
PM
45#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
46#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
47#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
48#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
49#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 50
86753403 51#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 52
f570c61e 53#include "translate.h"
e12ce78d 54
b5ff1b31
FB
55#if defined(CONFIG_USER_ONLY)
56#define IS_USER(s) 1
57#else
58#define IS_USER(s) (s->user)
59#endif
60
ad69471c 61/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 62static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 63static TCGv_i32 cpu_R[16];
78bcaa3e
RH
64TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
65TCGv_i64 cpu_exclusive_addr;
66TCGv_i64 cpu_exclusive_val;
ad69471c 67
b26eefb6 68/* FIXME: These should be removed. */
39d5492a 69static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 70static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 71
022c62cb 72#include "exec/gen-icount.h"
2e70f6ef 73
155c3eac
FN
74static const char *regnames[] =
75 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
76 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
77
b26eefb6
PB
78/* initialize TCG globals. */
79void arm_translate_init(void)
80{
155c3eac
FN
81 int i;
82
155c3eac 83 for (i = 0; i < 16; i++) {
e1ccc054 84 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 85 offsetof(CPUARMState, regs[i]),
155c3eac
FN
86 regnames[i]);
87 }
e1ccc054
RH
88 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
89 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
90 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
91 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 92
e1ccc054 93 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 94 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 95 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 96 offsetof(CPUARMState, exclusive_val), "exclusive_val");
155c3eac 97
14ade10f 98 a64_translate_init();
b26eefb6
PB
99}
100
9bb6558a
PM
101/* Flags for the disas_set_da_iss info argument:
102 * lower bits hold the Rt register number, higher bits are flags.
103 */
104typedef enum ISSInfo {
105 ISSNone = 0,
106 ISSRegMask = 0x1f,
107 ISSInvalid = (1 << 5),
108 ISSIsAcqRel = (1 << 6),
109 ISSIsWrite = (1 << 7),
110 ISSIs16Bit = (1 << 8),
111} ISSInfo;
112
113/* Save the syndrome information for a Data Abort */
114static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
115{
116 uint32_t syn;
117 int sas = memop & MO_SIZE;
118 bool sse = memop & MO_SIGN;
119 bool is_acqrel = issinfo & ISSIsAcqRel;
120 bool is_write = issinfo & ISSIsWrite;
121 bool is_16bit = issinfo & ISSIs16Bit;
122 int srt = issinfo & ISSRegMask;
123
124 if (issinfo & ISSInvalid) {
125 /* Some callsites want to conditionally provide ISS info,
126 * eg "only if this was not a writeback"
127 */
128 return;
129 }
130
131 if (srt == 15) {
132 /* For AArch32, insns where the src/dest is R15 never generate
133 * ISS information. Catching that here saves checking at all
134 * the call sites.
135 */
136 return;
137 }
138
139 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
140 0, 0, 0, is_write, 0, is_16bit);
141 disas_set_insn_syndrome(s, syn);
142}
143
8bd5c820 144static inline int get_a32_user_mem_index(DisasContext *s)
579d21cc 145{
8bd5c820 146 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
579d21cc
PM
147 * insns:
148 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
149 * otherwise, access as if at PL0.
150 */
151 switch (s->mmu_idx) {
152 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
153 case ARMMMUIdx_S12NSE0:
154 case ARMMMUIdx_S12NSE1:
8bd5c820 155 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
579d21cc
PM
156 case ARMMMUIdx_S1E3:
157 case ARMMMUIdx_S1SE0:
158 case ARMMMUIdx_S1SE1:
8bd5c820 159 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
e7b921c2
PM
160 case ARMMMUIdx_MUser:
161 case ARMMMUIdx_MPriv:
162 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
62593718
PM
163 case ARMMMUIdx_MUserNegPri:
164 case ARMMMUIdx_MPrivNegPri:
165 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
b9f587d6
PM
166 case ARMMMUIdx_MSUser:
167 case ARMMMUIdx_MSPriv:
b9f587d6 168 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
62593718
PM
169 case ARMMMUIdx_MSUserNegPri:
170 case ARMMMUIdx_MSPrivNegPri:
171 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
579d21cc
PM
172 case ARMMMUIdx_S2NS:
173 default:
174 g_assert_not_reached();
175 }
176}
177
39d5492a 178static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 179{
39d5492a 180 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
181 tcg_gen_ld_i32(tmp, cpu_env, offset);
182 return tmp;
183}
184
0ecb72a5 185#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 186
39d5492a 187static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
188{
189 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 190 tcg_temp_free_i32(var);
d9ba4830
PB
191}
192
193#define store_cpu_field(var, name) \
0ecb72a5 194 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 195
b26eefb6 196/* Set a variable to the value of a CPU register. */
39d5492a 197static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
198{
199 if (reg == 15) {
200 uint32_t addr;
b90372ad 201 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
202 if (s->thumb)
203 addr = (long)s->pc + 2;
204 else
205 addr = (long)s->pc + 4;
206 tcg_gen_movi_i32(var, addr);
207 } else {
155c3eac 208 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
209 }
210}
211
212/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 213static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 214{
39d5492a 215 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
216 load_reg_var(s, tmp, reg);
217 return tmp;
218}
219
220/* Set a CPU register. The source must be a temporary and will be
221 marked as dead. */
39d5492a 222static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
223{
224 if (reg == 15) {
9b6a3ea7
PM
225 /* In Thumb mode, we must ignore bit 0.
226 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
227 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
228 * We choose to ignore [1:0] in ARM mode for all architecture versions.
229 */
230 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
dcba3a8d 231 s->base.is_jmp = DISAS_JUMP;
b26eefb6 232 }
155c3eac 233 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 234 tcg_temp_free_i32(var);
b26eefb6
PB
235}
236
b26eefb6 237/* Value extensions. */
86831435
PB
238#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
239#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
240#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
241#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
242
1497c961
PB
243#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
244#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 245
b26eefb6 246
39d5492a 247static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 248{
39d5492a 249 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 250 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
251 tcg_temp_free_i32(tmp_mask);
252}
d9ba4830
PB
253/* Set NZCV flags from the high 4 bits of var. */
254#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
255
d4a2dc67 256static void gen_exception_internal(int excp)
d9ba4830 257{
d4a2dc67
PM
258 TCGv_i32 tcg_excp = tcg_const_i32(excp);
259
260 assert(excp_is_internal(excp));
261 gen_helper_exception_internal(cpu_env, tcg_excp);
262 tcg_temp_free_i32(tcg_excp);
263}
264
73710361 265static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
266{
267 TCGv_i32 tcg_excp = tcg_const_i32(excp);
268 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 269 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 270
73710361
GB
271 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
272 tcg_syn, tcg_el);
273
274 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
275 tcg_temp_free_i32(tcg_syn);
276 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
277}
278
50225ad0
PM
279static void gen_ss_advance(DisasContext *s)
280{
281 /* If the singlestep state is Active-not-pending, advance to
282 * Active-pending.
283 */
284 if (s->ss_active) {
285 s->pstate_ss = 0;
286 gen_helper_clear_pstate_ss(cpu_env);
287 }
288}
289
290static void gen_step_complete_exception(DisasContext *s)
291{
292 /* We just completed step of an insn. Move from Active-not-pending
293 * to Active-pending, and then also take the swstep exception.
294 * This corresponds to making the (IMPDEF) choice to prioritize
295 * swstep exceptions over asynchronous exceptions taken to an exception
296 * level where debug is disabled. This choice has the advantage that
297 * we do not need to maintain internal state corresponding to the
298 * ISV/EX syndrome bits between completion of the step and generation
299 * of the exception, and our syndrome information is always correct.
300 */
301 gen_ss_advance(s);
73710361
GB
302 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
303 default_exception_el(s));
dcba3a8d 304 s->base.is_jmp = DISAS_NORETURN;
50225ad0
PM
305}
306
5425415e
PM
307static void gen_singlestep_exception(DisasContext *s)
308{
309 /* Generate the right kind of exception for singlestep, which is
310 * either the architectural singlestep or EXCP_DEBUG for QEMU's
311 * gdb singlestepping.
312 */
313 if (s->ss_active) {
314 gen_step_complete_exception(s);
315 } else {
316 gen_exception_internal(EXCP_DEBUG);
317 }
318}
319
b636649f
PM
320static inline bool is_singlestepping(DisasContext *s)
321{
322 /* Return true if we are singlestepping either because of
323 * architectural singlestep or QEMU gdbstub singlestep. This does
324 * not include the command line '-singlestep' mode which is rather
325 * misnamed as it only means "one instruction per TB" and doesn't
326 * affect the code we generate.
327 */
dcba3a8d 328 return s->base.singlestep_enabled || s->ss_active;
b636649f
PM
329}
330
39d5492a 331static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 332{
39d5492a
PM
333 TCGv_i32 tmp1 = tcg_temp_new_i32();
334 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
335 tcg_gen_ext16s_i32(tmp1, a);
336 tcg_gen_ext16s_i32(tmp2, b);
3670669c 337 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 338 tcg_temp_free_i32(tmp2);
3670669c
PB
339 tcg_gen_sari_i32(a, a, 16);
340 tcg_gen_sari_i32(b, b, 16);
341 tcg_gen_mul_i32(b, b, a);
342 tcg_gen_mov_i32(a, tmp1);
7d1b0095 343 tcg_temp_free_i32(tmp1);
3670669c
PB
344}
345
346/* Byteswap each halfword. */
39d5492a 347static void gen_rev16(TCGv_i32 var)
3670669c 348{
39d5492a 349 TCGv_i32 tmp = tcg_temp_new_i32();
68cedf73 350 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
3670669c 351 tcg_gen_shri_i32(tmp, var, 8);
68cedf73
AJ
352 tcg_gen_and_i32(tmp, tmp, mask);
353 tcg_gen_and_i32(var, var, mask);
3670669c 354 tcg_gen_shli_i32(var, var, 8);
3670669c 355 tcg_gen_or_i32(var, var, tmp);
68cedf73 356 tcg_temp_free_i32(mask);
7d1b0095 357 tcg_temp_free_i32(tmp);
3670669c
PB
358}
359
360/* Byteswap low halfword and sign extend. */
39d5492a 361static void gen_revsh(TCGv_i32 var)
3670669c 362{
1a855029
AJ
363 tcg_gen_ext16u_i32(var, var);
364 tcg_gen_bswap16_i32(var, var);
365 tcg_gen_ext16s_i32(var, var);
3670669c
PB
366}
367
838fa72d 368/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 369static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 370{
838fa72d
AJ
371 TCGv_i64 tmp64 = tcg_temp_new_i64();
372
373 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 374 tcg_temp_free_i32(b);
838fa72d
AJ
375 tcg_gen_shli_i64(tmp64, tmp64, 32);
376 tcg_gen_add_i64(a, tmp64, a);
377
378 tcg_temp_free_i64(tmp64);
379 return a;
380}
381
382/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 383static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
384{
385 TCGv_i64 tmp64 = tcg_temp_new_i64();
386
387 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 388 tcg_temp_free_i32(b);
838fa72d
AJ
389 tcg_gen_shli_i64(tmp64, tmp64, 32);
390 tcg_gen_sub_i64(a, tmp64, a);
391
392 tcg_temp_free_i64(tmp64);
393 return a;
3670669c
PB
394}
395
5e3f878a 396/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 397static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 398{
39d5492a
PM
399 TCGv_i32 lo = tcg_temp_new_i32();
400 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 401 TCGv_i64 ret;
5e3f878a 402
831d7fe8 403 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 404 tcg_temp_free_i32(a);
7d1b0095 405 tcg_temp_free_i32(b);
831d7fe8
RH
406
407 ret = tcg_temp_new_i64();
408 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
409 tcg_temp_free_i32(lo);
410 tcg_temp_free_i32(hi);
831d7fe8
RH
411
412 return ret;
5e3f878a
PB
413}
414
39d5492a 415static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 416{
39d5492a
PM
417 TCGv_i32 lo = tcg_temp_new_i32();
418 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 419 TCGv_i64 ret;
5e3f878a 420
831d7fe8 421 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 422 tcg_temp_free_i32(a);
7d1b0095 423 tcg_temp_free_i32(b);
831d7fe8
RH
424
425 ret = tcg_temp_new_i64();
426 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
427 tcg_temp_free_i32(lo);
428 tcg_temp_free_i32(hi);
831d7fe8
RH
429
430 return ret;
5e3f878a
PB
431}
432
8f01245e 433/* Swap low and high halfwords. */
39d5492a 434static void gen_swap_half(TCGv_i32 var)
8f01245e 435{
39d5492a 436 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
437 tcg_gen_shri_i32(tmp, var, 16);
438 tcg_gen_shli_i32(var, var, 16);
439 tcg_gen_or_i32(var, var, tmp);
7d1b0095 440 tcg_temp_free_i32(tmp);
8f01245e
PB
441}
442
b26eefb6
PB
443/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
444 tmp = (t0 ^ t1) & 0x8000;
445 t0 &= ~0x8000;
446 t1 &= ~0x8000;
447 t0 = (t0 + t1) ^ tmp;
448 */
449
39d5492a 450static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 451{
39d5492a 452 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
453 tcg_gen_xor_i32(tmp, t0, t1);
454 tcg_gen_andi_i32(tmp, tmp, 0x8000);
455 tcg_gen_andi_i32(t0, t0, ~0x8000);
456 tcg_gen_andi_i32(t1, t1, ~0x8000);
457 tcg_gen_add_i32(t0, t0, t1);
458 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
459 tcg_temp_free_i32(tmp);
460 tcg_temp_free_i32(t1);
b26eefb6
PB
461}
462
463/* Set CF to the top bit of var. */
39d5492a 464static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 465{
66c374de 466 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
467}
468
469/* Set N and Z flags from var. */
39d5492a 470static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 471{
66c374de
AJ
472 tcg_gen_mov_i32(cpu_NF, var);
473 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
474}
475
476/* T0 += T1 + CF. */
39d5492a 477static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 478{
396e467c 479 tcg_gen_add_i32(t0, t0, t1);
66c374de 480 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
481}
482
e9bb4aa9 483/* dest = T0 + T1 + CF. */
39d5492a 484static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 485{
e9bb4aa9 486 tcg_gen_add_i32(dest, t0, t1);
66c374de 487 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
488}
489
3670669c 490/* dest = T0 - T1 + CF - 1. */
39d5492a 491static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 492{
3670669c 493 tcg_gen_sub_i32(dest, t0, t1);
66c374de 494 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 495 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
496}
497
72485ec4 498/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 499static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 500{
39d5492a 501 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
502 tcg_gen_movi_i32(tmp, 0);
503 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 504 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 505 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
506 tcg_gen_xor_i32(tmp, t0, t1);
507 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
508 tcg_temp_free_i32(tmp);
509 tcg_gen_mov_i32(dest, cpu_NF);
510}
511
49b4c31e 512/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 513static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 514{
39d5492a 515 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
516 if (TCG_TARGET_HAS_add2_i32) {
517 tcg_gen_movi_i32(tmp, 0);
518 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 519 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
520 } else {
521 TCGv_i64 q0 = tcg_temp_new_i64();
522 TCGv_i64 q1 = tcg_temp_new_i64();
523 tcg_gen_extu_i32_i64(q0, t0);
524 tcg_gen_extu_i32_i64(q1, t1);
525 tcg_gen_add_i64(q0, q0, q1);
526 tcg_gen_extu_i32_i64(q1, cpu_CF);
527 tcg_gen_add_i64(q0, q0, q1);
528 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
529 tcg_temp_free_i64(q0);
530 tcg_temp_free_i64(q1);
531 }
532 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
533 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
534 tcg_gen_xor_i32(tmp, t0, t1);
535 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
536 tcg_temp_free_i32(tmp);
537 tcg_gen_mov_i32(dest, cpu_NF);
538}
539
72485ec4 540/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 541static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 542{
39d5492a 543 TCGv_i32 tmp;
72485ec4
AJ
544 tcg_gen_sub_i32(cpu_NF, t0, t1);
545 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
546 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
547 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
548 tmp = tcg_temp_new_i32();
549 tcg_gen_xor_i32(tmp, t0, t1);
550 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
551 tcg_temp_free_i32(tmp);
552 tcg_gen_mov_i32(dest, cpu_NF);
553}
554
e77f0832 555/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 556static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 557{
39d5492a 558 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
559 tcg_gen_not_i32(tmp, t1);
560 gen_adc_CC(dest, t0, tmp);
39d5492a 561 tcg_temp_free_i32(tmp);
2de68a49
RH
562}
563
365af80e 564#define GEN_SHIFT(name) \
39d5492a 565static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 566{ \
39d5492a 567 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
568 tmp1 = tcg_temp_new_i32(); \
569 tcg_gen_andi_i32(tmp1, t1, 0xff); \
570 tmp2 = tcg_const_i32(0); \
571 tmp3 = tcg_const_i32(0x1f); \
572 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
573 tcg_temp_free_i32(tmp3); \
574 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
575 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
576 tcg_temp_free_i32(tmp2); \
577 tcg_temp_free_i32(tmp1); \
578}
579GEN_SHIFT(shl)
580GEN_SHIFT(shr)
581#undef GEN_SHIFT
582
39d5492a 583static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 584{
39d5492a 585 TCGv_i32 tmp1, tmp2;
365af80e
AJ
586 tmp1 = tcg_temp_new_i32();
587 tcg_gen_andi_i32(tmp1, t1, 0xff);
588 tmp2 = tcg_const_i32(0x1f);
589 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
590 tcg_temp_free_i32(tmp2);
591 tcg_gen_sar_i32(dest, t0, tmp1);
592 tcg_temp_free_i32(tmp1);
593}
594
39d5492a 595static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 596{
39d5492a
PM
597 TCGv_i32 c0 = tcg_const_i32(0);
598 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
599 tcg_gen_neg_i32(tmp, src);
600 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
601 tcg_temp_free_i32(c0);
602 tcg_temp_free_i32(tmp);
603}
ad69471c 604
39d5492a 605static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 606{
9a119ff6 607 if (shift == 0) {
66c374de 608 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 609 } else {
66c374de
AJ
610 tcg_gen_shri_i32(cpu_CF, var, shift);
611 if (shift != 31) {
612 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
613 }
9a119ff6 614 }
9a119ff6 615}
b26eefb6 616
9a119ff6 617/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
618static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
619 int shift, int flags)
9a119ff6
PB
620{
621 switch (shiftop) {
622 case 0: /* LSL */
623 if (shift != 0) {
624 if (flags)
625 shifter_out_im(var, 32 - shift);
626 tcg_gen_shli_i32(var, var, shift);
627 }
628 break;
629 case 1: /* LSR */
630 if (shift == 0) {
631 if (flags) {
66c374de 632 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
633 }
634 tcg_gen_movi_i32(var, 0);
635 } else {
636 if (flags)
637 shifter_out_im(var, shift - 1);
638 tcg_gen_shri_i32(var, var, shift);
639 }
640 break;
641 case 2: /* ASR */
642 if (shift == 0)
643 shift = 32;
644 if (flags)
645 shifter_out_im(var, shift - 1);
646 if (shift == 32)
647 shift = 31;
648 tcg_gen_sari_i32(var, var, shift);
649 break;
650 case 3: /* ROR/RRX */
651 if (shift != 0) {
652 if (flags)
653 shifter_out_im(var, shift - 1);
f669df27 654 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 655 } else {
39d5492a 656 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 657 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
658 if (flags)
659 shifter_out_im(var, 0);
660 tcg_gen_shri_i32(var, var, 1);
b26eefb6 661 tcg_gen_or_i32(var, var, tmp);
7d1b0095 662 tcg_temp_free_i32(tmp);
b26eefb6
PB
663 }
664 }
665};
666
39d5492a
PM
667static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
668 TCGv_i32 shift, int flags)
8984bd2e
PB
669{
670 if (flags) {
671 switch (shiftop) {
9ef39277
BS
672 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
673 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
674 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
675 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
676 }
677 } else {
678 switch (shiftop) {
365af80e
AJ
679 case 0:
680 gen_shl(var, var, shift);
681 break;
682 case 1:
683 gen_shr(var, var, shift);
684 break;
685 case 2:
686 gen_sar(var, var, shift);
687 break;
f669df27
AJ
688 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
689 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
690 }
691 }
7d1b0095 692 tcg_temp_free_i32(shift);
8984bd2e
PB
693}
694
6ddbc6e4
PB
695#define PAS_OP(pfx) \
696 switch (op2) { \
697 case 0: gen_pas_helper(glue(pfx,add16)); break; \
698 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
699 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
700 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
701 case 4: gen_pas_helper(glue(pfx,add8)); break; \
702 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
703 }
39d5492a 704static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 705{
a7812ae4 706 TCGv_ptr tmp;
6ddbc6e4
PB
707
708 switch (op1) {
709#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
710 case 1:
a7812ae4 711 tmp = tcg_temp_new_ptr();
0ecb72a5 712 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 713 PAS_OP(s)
b75263d6 714 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
715 break;
716 case 5:
a7812ae4 717 tmp = tcg_temp_new_ptr();
0ecb72a5 718 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 719 PAS_OP(u)
b75263d6 720 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
721 break;
722#undef gen_pas_helper
723#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
724 case 2:
725 PAS_OP(q);
726 break;
727 case 3:
728 PAS_OP(sh);
729 break;
730 case 6:
731 PAS_OP(uq);
732 break;
733 case 7:
734 PAS_OP(uh);
735 break;
736#undef gen_pas_helper
737 }
738}
9ee6e8bb
PB
739#undef PAS_OP
740
6ddbc6e4
PB
741/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
742#define PAS_OP(pfx) \
ed89a2f1 743 switch (op1) { \
6ddbc6e4
PB
744 case 0: gen_pas_helper(glue(pfx,add8)); break; \
745 case 1: gen_pas_helper(glue(pfx,add16)); break; \
746 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
747 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
748 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
749 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
750 }
39d5492a 751static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 752{
a7812ae4 753 TCGv_ptr tmp;
6ddbc6e4 754
ed89a2f1 755 switch (op2) {
6ddbc6e4
PB
756#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
757 case 0:
a7812ae4 758 tmp = tcg_temp_new_ptr();
0ecb72a5 759 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 760 PAS_OP(s)
b75263d6 761 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
762 break;
763 case 4:
a7812ae4 764 tmp = tcg_temp_new_ptr();
0ecb72a5 765 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 766 PAS_OP(u)
b75263d6 767 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
768 break;
769#undef gen_pas_helper
770#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
771 case 1:
772 PAS_OP(q);
773 break;
774 case 2:
775 PAS_OP(sh);
776 break;
777 case 5:
778 PAS_OP(uq);
779 break;
780 case 6:
781 PAS_OP(uh);
782 break;
783#undef gen_pas_helper
784 }
785}
9ee6e8bb
PB
786#undef PAS_OP
787
39fb730a 788/*
6c2c63d3 789 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
790 * This is common between ARM and Aarch64 targets.
791 */
6c2c63d3 792void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 793{
6c2c63d3
RH
794 TCGv_i32 value;
795 TCGCond cond;
796 bool global = true;
d9ba4830 797
d9ba4830
PB
798 switch (cc) {
799 case 0: /* eq: Z */
d9ba4830 800 case 1: /* ne: !Z */
6c2c63d3
RH
801 cond = TCG_COND_EQ;
802 value = cpu_ZF;
d9ba4830 803 break;
6c2c63d3 804
d9ba4830 805 case 2: /* cs: C */
d9ba4830 806 case 3: /* cc: !C */
6c2c63d3
RH
807 cond = TCG_COND_NE;
808 value = cpu_CF;
d9ba4830 809 break;
6c2c63d3 810
d9ba4830 811 case 4: /* mi: N */
d9ba4830 812 case 5: /* pl: !N */
6c2c63d3
RH
813 cond = TCG_COND_LT;
814 value = cpu_NF;
d9ba4830 815 break;
6c2c63d3 816
d9ba4830 817 case 6: /* vs: V */
d9ba4830 818 case 7: /* vc: !V */
6c2c63d3
RH
819 cond = TCG_COND_LT;
820 value = cpu_VF;
d9ba4830 821 break;
6c2c63d3 822
d9ba4830 823 case 8: /* hi: C && !Z */
6c2c63d3
RH
824 case 9: /* ls: !C || Z -> !(C && !Z) */
825 cond = TCG_COND_NE;
826 value = tcg_temp_new_i32();
827 global = false;
828 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
829 ZF is non-zero for !Z; so AND the two subexpressions. */
830 tcg_gen_neg_i32(value, cpu_CF);
831 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 832 break;
6c2c63d3 833
d9ba4830 834 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 835 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
836 /* Since we're only interested in the sign bit, == 0 is >= 0. */
837 cond = TCG_COND_GE;
838 value = tcg_temp_new_i32();
839 global = false;
840 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 841 break;
6c2c63d3 842
d9ba4830 843 case 12: /* gt: !Z && N == V */
d9ba4830 844 case 13: /* le: Z || N != V */
6c2c63d3
RH
845 cond = TCG_COND_NE;
846 value = tcg_temp_new_i32();
847 global = false;
848 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
849 * the sign bit then AND with ZF to yield the result. */
850 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
851 tcg_gen_sari_i32(value, value, 31);
852 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 853 break;
6c2c63d3 854
9305eac0
RH
855 case 14: /* always */
856 case 15: /* always */
857 /* Use the ALWAYS condition, which will fold early.
858 * It doesn't matter what we use for the value. */
859 cond = TCG_COND_ALWAYS;
860 value = cpu_ZF;
861 goto no_invert;
862
d9ba4830
PB
863 default:
864 fprintf(stderr, "Bad condition code 0x%x\n", cc);
865 abort();
866 }
6c2c63d3
RH
867
868 if (cc & 1) {
869 cond = tcg_invert_cond(cond);
870 }
871
9305eac0 872 no_invert:
6c2c63d3
RH
873 cmp->cond = cond;
874 cmp->value = value;
875 cmp->value_global = global;
876}
877
878void arm_free_cc(DisasCompare *cmp)
879{
880 if (!cmp->value_global) {
881 tcg_temp_free_i32(cmp->value);
882 }
883}
884
885void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
886{
887 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
888}
889
890void arm_gen_test_cc(int cc, TCGLabel *label)
891{
892 DisasCompare cmp;
893 arm_test_cc(&cmp, cc);
894 arm_jump_cc(&cmp, label);
895 arm_free_cc(&cmp);
d9ba4830 896}
2c0262af 897
b1d8e52e 898static const uint8_t table_logic_cc[16] = {
2c0262af
FB
899 1, /* and */
900 1, /* xor */
901 0, /* sub */
902 0, /* rsb */
903 0, /* add */
904 0, /* adc */
905 0, /* sbc */
906 0, /* rsc */
907 1, /* andl */
908 1, /* xorl */
909 0, /* cmp */
910 0, /* cmn */
911 1, /* orr */
912 1, /* mov */
913 1, /* bic */
914 1, /* mvn */
915};
3b46e624 916
4d5e8c96
PM
917static inline void gen_set_condexec(DisasContext *s)
918{
919 if (s->condexec_mask) {
920 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
921 TCGv_i32 tmp = tcg_temp_new_i32();
922 tcg_gen_movi_i32(tmp, val);
923 store_cpu_field(tmp, condexec_bits);
924 }
925}
926
927static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
928{
929 tcg_gen_movi_i32(cpu_R[15], val);
930}
931
d9ba4830
PB
932/* Set PC and Thumb state from an immediate address. */
933static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 934{
39d5492a 935 TCGv_i32 tmp;
99c475ab 936
dcba3a8d 937 s->base.is_jmp = DISAS_JUMP;
d9ba4830 938 if (s->thumb != (addr & 1)) {
7d1b0095 939 tmp = tcg_temp_new_i32();
d9ba4830 940 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 941 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 942 tcg_temp_free_i32(tmp);
d9ba4830 943 }
155c3eac 944 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
945}
946
947/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 948static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 949{
dcba3a8d 950 s->base.is_jmp = DISAS_JUMP;
155c3eac
FN
951 tcg_gen_andi_i32(cpu_R[15], var, ~1);
952 tcg_gen_andi_i32(var, var, 1);
953 store_cpu_field(var, thumb);
d9ba4830
PB
954}
955
3bb8a96f
PM
956/* Set PC and Thumb state from var. var is marked as dead.
957 * For M-profile CPUs, include logic to detect exception-return
958 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
959 * and BX reg, and no others, and happens only for code in Handler mode.
960 */
961static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
962{
963 /* Generate the same code here as for a simple bx, but flag via
dcba3a8d 964 * s->base.is_jmp that we need to do the rest of the work later.
3bb8a96f
PM
965 */
966 gen_bx(s, var);
d02a8698
PM
967 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
968 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
dcba3a8d 969 s->base.is_jmp = DISAS_BX_EXCRET;
3bb8a96f
PM
970 }
971}
972
973static inline void gen_bx_excret_final_code(DisasContext *s)
974{
975 /* Generate the code to finish possible exception return and end the TB */
976 TCGLabel *excret_label = gen_new_label();
d02a8698
PM
977 uint32_t min_magic;
978
979 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
980 /* Covers FNC_RETURN and EXC_RETURN magic */
981 min_magic = FNC_RETURN_MIN_MAGIC;
982 } else {
983 /* EXC_RETURN magic only */
984 min_magic = EXC_RETURN_MIN_MAGIC;
985 }
3bb8a96f
PM
986
987 /* Is the new PC value in the magic range indicating exception return? */
d02a8698 988 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
3bb8a96f
PM
989 /* No: end the TB as we would for a DISAS_JMP */
990 if (is_singlestepping(s)) {
991 gen_singlestep_exception(s);
992 } else {
993 tcg_gen_exit_tb(0);
994 }
995 gen_set_label(excret_label);
996 /* Yes: this is an exception return.
997 * At this point in runtime env->regs[15] and env->thumb will hold
998 * the exception-return magic number, which do_v7m_exception_exit()
999 * will read. Nothing else will be able to see those values because
1000 * the cpu-exec main loop guarantees that we will always go straight
1001 * from raising the exception to the exception-handling code.
1002 *
1003 * gen_ss_advance(s) does nothing on M profile currently but
1004 * calling it is conceptually the right thing as we have executed
1005 * this instruction (compare SWI, HVC, SMC handling).
1006 */
1007 gen_ss_advance(s);
1008 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1009}
1010
fb602cb7
PM
1011static inline void gen_bxns(DisasContext *s, int rm)
1012{
1013 TCGv_i32 var = load_reg(s, rm);
1014
1015 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1016 * we need to sync state before calling it, but:
1017 * - we don't need to do gen_set_pc_im() because the bxns helper will
1018 * always set the PC itself
1019 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1020 * unless it's outside an IT block or the last insn in an IT block,
1021 * so we know that condexec == 0 (already set at the top of the TB)
1022 * is correct in the non-UNPREDICTABLE cases, and we can choose
1023 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1024 */
1025 gen_helper_v7m_bxns(cpu_env, var);
1026 tcg_temp_free_i32(var);
ef475b5d 1027 s->base.is_jmp = DISAS_EXIT;
fb602cb7
PM
1028}
1029
3e3fa230
PM
1030static inline void gen_blxns(DisasContext *s, int rm)
1031{
1032 TCGv_i32 var = load_reg(s, rm);
1033
1034 /* We don't need to sync condexec state, for the same reason as bxns.
1035 * We do however need to set the PC, because the blxns helper reads it.
1036 * The blxns helper may throw an exception.
1037 */
1038 gen_set_pc_im(s, s->pc);
1039 gen_helper_v7m_blxns(cpu_env, var);
1040 tcg_temp_free_i32(var);
1041 s->base.is_jmp = DISAS_EXIT;
1042}
1043
21aeb343
JR
1044/* Variant of store_reg which uses branch&exchange logic when storing
1045 to r15 in ARM architecture v7 and above. The source must be a temporary
1046 and will be marked as dead. */
7dcc1f89 1047static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
1048{
1049 if (reg == 15 && ENABLE_ARCH_7) {
1050 gen_bx(s, var);
1051 } else {
1052 store_reg(s, reg, var);
1053 }
1054}
1055
be5e7a76
DES
1056/* Variant of store_reg which uses branch&exchange logic when storing
1057 * to r15 in ARM architecture v5T and above. This is used for storing
1058 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1059 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 1060static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
1061{
1062 if (reg == 15 && ENABLE_ARCH_5) {
3bb8a96f 1063 gen_bx_excret(s, var);
be5e7a76
DES
1064 } else {
1065 store_reg(s, reg, var);
1066 }
1067}
1068
e334bd31
PB
1069#ifdef CONFIG_USER_ONLY
1070#define IS_USER_ONLY 1
1071#else
1072#define IS_USER_ONLY 0
1073#endif
1074
08307563
PM
1075/* Abstractions of "generate code to do a guest load/store for
1076 * AArch32", where a vaddr is always 32 bits (and is zero
1077 * extended if we're a 64 bit core) and data is also
1078 * 32 bits unless specifically doing a 64 bit access.
1079 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 1080 * that the address argument is TCGv_i32 rather than TCGv.
08307563 1081 */
08307563 1082
7f5616f5 1083static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
08307563 1084{
7f5616f5
RH
1085 TCGv addr = tcg_temp_new();
1086 tcg_gen_extu_i32_tl(addr, a32);
1087
e334bd31 1088 /* Not needed for user-mode BE32, where we use MO_BE instead. */
7f5616f5
RH
1089 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1090 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
e334bd31 1091 }
7f5616f5 1092 return addr;
08307563
PM
1093}
1094
7f5616f5
RH
1095static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1096 int index, TCGMemOp opc)
08307563 1097{
7f5616f5
RH
1098 TCGv addr = gen_aa32_addr(s, a32, opc);
1099 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1100 tcg_temp_free(addr);
08307563
PM
1101}
1102
7f5616f5
RH
1103static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1104 int index, TCGMemOp opc)
1105{
1106 TCGv addr = gen_aa32_addr(s, a32, opc);
1107 tcg_gen_qemu_st_i32(val, addr, index, opc);
1108 tcg_temp_free(addr);
1109}
08307563 1110
7f5616f5 1111#define DO_GEN_LD(SUFF, OPC) \
12dcc321 1112static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1113 TCGv_i32 a32, int index) \
08307563 1114{ \
7f5616f5 1115 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1116} \
1117static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1118 TCGv_i32 val, \
1119 TCGv_i32 a32, int index, \
1120 ISSInfo issinfo) \
1121{ \
1122 gen_aa32_ld##SUFF(s, val, a32, index); \
1123 disas_set_da_iss(s, OPC, issinfo); \
08307563
PM
1124}
1125
7f5616f5 1126#define DO_GEN_ST(SUFF, OPC) \
12dcc321 1127static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1128 TCGv_i32 a32, int index) \
08307563 1129{ \
7f5616f5 1130 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1131} \
1132static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1133 TCGv_i32 val, \
1134 TCGv_i32 a32, int index, \
1135 ISSInfo issinfo) \
1136{ \
1137 gen_aa32_st##SUFF(s, val, a32, index); \
1138 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
08307563
PM
1139}
1140
7f5616f5 1141static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
08307563 1142{
e334bd31
PB
1143 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1144 if (!IS_USER_ONLY && s->sctlr_b) {
1145 tcg_gen_rotri_i64(val, val, 32);
1146 }
08307563
PM
1147}
1148
7f5616f5
RH
1149static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1150 int index, TCGMemOp opc)
08307563 1151{
7f5616f5
RH
1152 TCGv addr = gen_aa32_addr(s, a32, opc);
1153 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1154 gen_aa32_frob64(s, val);
1155 tcg_temp_free(addr);
1156}
1157
1158static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1159 TCGv_i32 a32, int index)
1160{
1161 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1162}
1163
1164static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1165 int index, TCGMemOp opc)
1166{
1167 TCGv addr = gen_aa32_addr(s, a32, opc);
e334bd31
PB
1168
1169 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1170 if (!IS_USER_ONLY && s->sctlr_b) {
7f5616f5 1171 TCGv_i64 tmp = tcg_temp_new_i64();
e334bd31 1172 tcg_gen_rotri_i64(tmp, val, 32);
7f5616f5
RH
1173 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1174 tcg_temp_free_i64(tmp);
e334bd31 1175 } else {
7f5616f5 1176 tcg_gen_qemu_st_i64(val, addr, index, opc);
e334bd31 1177 }
7f5616f5 1178 tcg_temp_free(addr);
08307563
PM
1179}
1180
7f5616f5
RH
1181static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1182 TCGv_i32 a32, int index)
1183{
1184 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1185}
08307563 1186
7f5616f5
RH
1187DO_GEN_LD(8s, MO_SB)
1188DO_GEN_LD(8u, MO_UB)
1189DO_GEN_LD(16s, MO_SW)
1190DO_GEN_LD(16u, MO_UW)
1191DO_GEN_LD(32u, MO_UL)
7f5616f5
RH
1192DO_GEN_ST(8, MO_UB)
1193DO_GEN_ST(16, MO_UW)
1194DO_GEN_ST(32, MO_UL)
08307563 1195
37e6456e
PM
1196static inline void gen_hvc(DisasContext *s, int imm16)
1197{
1198 /* The pre HVC helper handles cases when HVC gets trapped
1199 * as an undefined insn by runtime configuration (ie before
1200 * the insn really executes).
1201 */
1202 gen_set_pc_im(s, s->pc - 4);
1203 gen_helper_pre_hvc(cpu_env);
1204 /* Otherwise we will treat this as a real exception which
1205 * happens after execution of the insn. (The distinction matters
1206 * for the PC value reported to the exception handler and also
1207 * for single stepping.)
1208 */
1209 s->svc_imm = imm16;
1210 gen_set_pc_im(s, s->pc);
dcba3a8d 1211 s->base.is_jmp = DISAS_HVC;
37e6456e
PM
1212}
1213
1214static inline void gen_smc(DisasContext *s)
1215{
1216 /* As with HVC, we may take an exception either before or after
1217 * the insn executes.
1218 */
1219 TCGv_i32 tmp;
1220
1221 gen_set_pc_im(s, s->pc - 4);
1222 tmp = tcg_const_i32(syn_aa32_smc());
1223 gen_helper_pre_smc(cpu_env, tmp);
1224 tcg_temp_free_i32(tmp);
1225 gen_set_pc_im(s, s->pc);
dcba3a8d 1226 s->base.is_jmp = DISAS_SMC;
37e6456e
PM
1227}
1228
d4a2dc67
PM
1229static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1230{
1231 gen_set_condexec(s);
1232 gen_set_pc_im(s, s->pc - offset);
1233 gen_exception_internal(excp);
dcba3a8d 1234 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1235}
1236
73710361
GB
1237static void gen_exception_insn(DisasContext *s, int offset, int excp,
1238 int syn, uint32_t target_el)
d4a2dc67
PM
1239{
1240 gen_set_condexec(s);
1241 gen_set_pc_im(s, s->pc - offset);
73710361 1242 gen_exception(excp, syn, target_el);
dcba3a8d 1243 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1244}
1245
b5ff1b31
FB
1246/* Force a TB lookup after an instruction that changes the CPU state. */
1247static inline void gen_lookup_tb(DisasContext *s)
1248{
a6445c52 1249 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
dcba3a8d 1250 s->base.is_jmp = DISAS_EXIT;
b5ff1b31
FB
1251}
1252
19a6e31c
PM
1253static inline void gen_hlt(DisasContext *s, int imm)
1254{
1255 /* HLT. This has two purposes.
1256 * Architecturally, it is an external halting debug instruction.
1257 * Since QEMU doesn't implement external debug, we treat this as
1258 * it is required for halting debug disabled: it will UNDEF.
1259 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1260 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1261 * must trigger semihosting even for ARMv7 and earlier, where
1262 * HLT was an undefined encoding.
1263 * In system mode, we don't allow userspace access to
1264 * semihosting, to provide some semblance of security
1265 * (and for consistency with our 32-bit semihosting).
1266 */
1267 if (semihosting_enabled() &&
1268#ifndef CONFIG_USER_ONLY
1269 s->current_el != 0 &&
1270#endif
1271 (imm == (s->thumb ? 0x3c : 0xf000))) {
1272 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1273 return;
1274 }
1275
1276 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1277 default_exception_el(s));
1278}
1279
b0109805 1280static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1281 TCGv_i32 var)
2c0262af 1282{
1e8d4eec 1283 int val, rm, shift, shiftop;
39d5492a 1284 TCGv_i32 offset;
2c0262af
FB
1285
1286 if (!(insn & (1 << 25))) {
1287 /* immediate */
1288 val = insn & 0xfff;
1289 if (!(insn & (1 << 23)))
1290 val = -val;
537730b9 1291 if (val != 0)
b0109805 1292 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1293 } else {
1294 /* shift/register */
1295 rm = (insn) & 0xf;
1296 shift = (insn >> 7) & 0x1f;
1e8d4eec 1297 shiftop = (insn >> 5) & 3;
b26eefb6 1298 offset = load_reg(s, rm);
9a119ff6 1299 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1300 if (!(insn & (1 << 23)))
b0109805 1301 tcg_gen_sub_i32(var, var, offset);
2c0262af 1302 else
b0109805 1303 tcg_gen_add_i32(var, var, offset);
7d1b0095 1304 tcg_temp_free_i32(offset);
2c0262af
FB
1305 }
1306}
1307
191f9a93 1308static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1309 int extra, TCGv_i32 var)
2c0262af
FB
1310{
1311 int val, rm;
39d5492a 1312 TCGv_i32 offset;
3b46e624 1313
2c0262af
FB
1314 if (insn & (1 << 22)) {
1315 /* immediate */
1316 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1317 if (!(insn & (1 << 23)))
1318 val = -val;
18acad92 1319 val += extra;
537730b9 1320 if (val != 0)
b0109805 1321 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1322 } else {
1323 /* register */
191f9a93 1324 if (extra)
b0109805 1325 tcg_gen_addi_i32(var, var, extra);
2c0262af 1326 rm = (insn) & 0xf;
b26eefb6 1327 offset = load_reg(s, rm);
2c0262af 1328 if (!(insn & (1 << 23)))
b0109805 1329 tcg_gen_sub_i32(var, var, offset);
2c0262af 1330 else
b0109805 1331 tcg_gen_add_i32(var, var, offset);
7d1b0095 1332 tcg_temp_free_i32(offset);
2c0262af
FB
1333 }
1334}
1335
5aaebd13
PM
1336static TCGv_ptr get_fpstatus_ptr(int neon)
1337{
1338 TCGv_ptr statusptr = tcg_temp_new_ptr();
1339 int offset;
1340 if (neon) {
0ecb72a5 1341 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1342 } else {
0ecb72a5 1343 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1344 }
1345 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1346 return statusptr;
1347}
1348
4373f3ce
PB
1349#define VFP_OP2(name) \
1350static inline void gen_vfp_##name(int dp) \
1351{ \
ae1857ec
PM
1352 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1353 if (dp) { \
1354 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1355 } else { \
1356 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1357 } \
1358 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1359}
1360
4373f3ce
PB
1361VFP_OP2(add)
1362VFP_OP2(sub)
1363VFP_OP2(mul)
1364VFP_OP2(div)
1365
1366#undef VFP_OP2
1367
605a6aed
PM
1368static inline void gen_vfp_F1_mul(int dp)
1369{
1370 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1371 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1372 if (dp) {
ae1857ec 1373 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1374 } else {
ae1857ec 1375 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1376 }
ae1857ec 1377 tcg_temp_free_ptr(fpst);
605a6aed
PM
1378}
1379
1380static inline void gen_vfp_F1_neg(int dp)
1381{
1382 /* Like gen_vfp_neg() but put result in F1 */
1383 if (dp) {
1384 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1385 } else {
1386 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1387 }
1388}
1389
4373f3ce
PB
1390static inline void gen_vfp_abs(int dp)
1391{
1392 if (dp)
1393 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1394 else
1395 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1396}
1397
1398static inline void gen_vfp_neg(int dp)
1399{
1400 if (dp)
1401 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1402 else
1403 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1404}
1405
1406static inline void gen_vfp_sqrt(int dp)
1407{
1408 if (dp)
1409 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1410 else
1411 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1412}
1413
1414static inline void gen_vfp_cmp(int dp)
1415{
1416 if (dp)
1417 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1418 else
1419 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1420}
1421
1422static inline void gen_vfp_cmpe(int dp)
1423{
1424 if (dp)
1425 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1426 else
1427 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1428}
1429
1430static inline void gen_vfp_F1_ld0(int dp)
1431{
1432 if (dp)
5b340b51 1433 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1434 else
5b340b51 1435 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1436}
1437
5500b06c
PM
1438#define VFP_GEN_ITOF(name) \
1439static inline void gen_vfp_##name(int dp, int neon) \
1440{ \
5aaebd13 1441 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1442 if (dp) { \
1443 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1444 } else { \
1445 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1446 } \
b7fa9214 1447 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1448}
1449
5500b06c
PM
1450VFP_GEN_ITOF(uito)
1451VFP_GEN_ITOF(sito)
1452#undef VFP_GEN_ITOF
4373f3ce 1453
5500b06c
PM
1454#define VFP_GEN_FTOI(name) \
1455static inline void gen_vfp_##name(int dp, int neon) \
1456{ \
5aaebd13 1457 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1458 if (dp) { \
1459 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1460 } else { \
1461 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1462 } \
b7fa9214 1463 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1464}
1465
5500b06c
PM
1466VFP_GEN_FTOI(toui)
1467VFP_GEN_FTOI(touiz)
1468VFP_GEN_FTOI(tosi)
1469VFP_GEN_FTOI(tosiz)
1470#undef VFP_GEN_FTOI
4373f3ce 1471
16d5b3ca 1472#define VFP_GEN_FIX(name, round) \
5500b06c 1473static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1474{ \
39d5492a 1475 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1476 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1477 if (dp) { \
16d5b3ca
WN
1478 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1479 statusptr); \
5500b06c 1480 } else { \
16d5b3ca
WN
1481 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1482 statusptr); \
5500b06c 1483 } \
b75263d6 1484 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1485 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1486}
16d5b3ca
WN
1487VFP_GEN_FIX(tosh, _round_to_zero)
1488VFP_GEN_FIX(tosl, _round_to_zero)
1489VFP_GEN_FIX(touh, _round_to_zero)
1490VFP_GEN_FIX(toul, _round_to_zero)
1491VFP_GEN_FIX(shto, )
1492VFP_GEN_FIX(slto, )
1493VFP_GEN_FIX(uhto, )
1494VFP_GEN_FIX(ulto, )
4373f3ce 1495#undef VFP_GEN_FIX
9ee6e8bb 1496
39d5492a 1497static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1498{
08307563 1499 if (dp) {
12dcc321 1500 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1501 } else {
12dcc321 1502 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
08307563 1503 }
b5ff1b31
FB
1504}
1505
39d5492a 1506static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1507{
08307563 1508 if (dp) {
12dcc321 1509 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1510 } else {
12dcc321 1511 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
08307563 1512 }
b5ff1b31
FB
1513}
1514
c39c2b90 1515static inline long vfp_reg_offset(bool dp, unsigned reg)
8e96005d 1516{
9a2b5256 1517 if (dp) {
c39c2b90 1518 return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
8e96005d 1519 } else {
c39c2b90 1520 long ofs = offsetof(CPUARMState, vfp.zregs[reg >> 2].d[(reg >> 1) & 1]);
9a2b5256
RH
1521 if (reg & 1) {
1522 ofs += offsetof(CPU_DoubleU, l.upper);
1523 } else {
1524 ofs += offsetof(CPU_DoubleU, l.lower);
1525 }
1526 return ofs;
8e96005d
FB
1527 }
1528}
9ee6e8bb
PB
1529
1530/* Return the offset of a 32-bit piece of a NEON register.
1531 zero is the least significant end of the register. */
1532static inline long
1533neon_reg_offset (int reg, int n)
1534{
1535 int sreg;
1536 sreg = reg * 2 + n;
1537 return vfp_reg_offset(0, sreg);
1538}
1539
39d5492a 1540static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1541{
39d5492a 1542 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1543 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1544 return tmp;
1545}
1546
39d5492a 1547static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1548{
1549 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1550 tcg_temp_free_i32(var);
8f8e3aa4
PB
1551}
1552
a7812ae4 1553static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1554{
1555 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1556}
1557
a7812ae4 1558static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1559{
1560 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1561}
1562
1a66ac61
RH
1563static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1564{
1565 TCGv_ptr ret = tcg_temp_new_ptr();
1566 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1567 return ret;
1568}
1569
4373f3ce
PB
1570#define tcg_gen_ld_f32 tcg_gen_ld_i32
1571#define tcg_gen_ld_f64 tcg_gen_ld_i64
1572#define tcg_gen_st_f32 tcg_gen_st_i32
1573#define tcg_gen_st_f64 tcg_gen_st_i64
1574
b7bcbe95
FB
1575static inline void gen_mov_F0_vreg(int dp, int reg)
1576{
1577 if (dp)
4373f3ce 1578 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1579 else
4373f3ce 1580 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1581}
1582
1583static inline void gen_mov_F1_vreg(int dp, int reg)
1584{
1585 if (dp)
4373f3ce 1586 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1587 else
4373f3ce 1588 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1589}
1590
1591static inline void gen_mov_vreg_F0(int dp, int reg)
1592{
1593 if (dp)
4373f3ce 1594 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1595 else
4373f3ce 1596 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1597}
1598
18c9b560
AZ
1599#define ARM_CP_RW_BIT (1 << 20)
1600
a7812ae4 1601static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1602{
0ecb72a5 1603 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1604}
1605
a7812ae4 1606static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1607{
0ecb72a5 1608 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1609}
1610
39d5492a 1611static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1612{
39d5492a 1613 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1614 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1615 return var;
e677137d
PB
1616}
1617
39d5492a 1618static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1619{
0ecb72a5 1620 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1621 tcg_temp_free_i32(var);
e677137d
PB
1622}
1623
1624static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1625{
1626 iwmmxt_store_reg(cpu_M0, rn);
1627}
1628
1629static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1630{
1631 iwmmxt_load_reg(cpu_M0, rn);
1632}
1633
1634static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1635{
1636 iwmmxt_load_reg(cpu_V1, rn);
1637 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1638}
1639
1640static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1641{
1642 iwmmxt_load_reg(cpu_V1, rn);
1643 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1644}
1645
1646static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1647{
1648 iwmmxt_load_reg(cpu_V1, rn);
1649 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1650}
1651
1652#define IWMMXT_OP(name) \
1653static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1654{ \
1655 iwmmxt_load_reg(cpu_V1, rn); \
1656 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1657}
1658
477955bd
PM
1659#define IWMMXT_OP_ENV(name) \
1660static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1661{ \
1662 iwmmxt_load_reg(cpu_V1, rn); \
1663 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1664}
1665
1666#define IWMMXT_OP_ENV_SIZE(name) \
1667IWMMXT_OP_ENV(name##b) \
1668IWMMXT_OP_ENV(name##w) \
1669IWMMXT_OP_ENV(name##l)
e677137d 1670
477955bd 1671#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1672static inline void gen_op_iwmmxt_##name##_M0(void) \
1673{ \
477955bd 1674 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1675}
1676
1677IWMMXT_OP(maddsq)
1678IWMMXT_OP(madduq)
1679IWMMXT_OP(sadb)
1680IWMMXT_OP(sadw)
1681IWMMXT_OP(mulslw)
1682IWMMXT_OP(mulshw)
1683IWMMXT_OP(mululw)
1684IWMMXT_OP(muluhw)
1685IWMMXT_OP(macsw)
1686IWMMXT_OP(macuw)
1687
477955bd
PM
1688IWMMXT_OP_ENV_SIZE(unpackl)
1689IWMMXT_OP_ENV_SIZE(unpackh)
1690
1691IWMMXT_OP_ENV1(unpacklub)
1692IWMMXT_OP_ENV1(unpackluw)
1693IWMMXT_OP_ENV1(unpacklul)
1694IWMMXT_OP_ENV1(unpackhub)
1695IWMMXT_OP_ENV1(unpackhuw)
1696IWMMXT_OP_ENV1(unpackhul)
1697IWMMXT_OP_ENV1(unpacklsb)
1698IWMMXT_OP_ENV1(unpacklsw)
1699IWMMXT_OP_ENV1(unpacklsl)
1700IWMMXT_OP_ENV1(unpackhsb)
1701IWMMXT_OP_ENV1(unpackhsw)
1702IWMMXT_OP_ENV1(unpackhsl)
1703
1704IWMMXT_OP_ENV_SIZE(cmpeq)
1705IWMMXT_OP_ENV_SIZE(cmpgtu)
1706IWMMXT_OP_ENV_SIZE(cmpgts)
1707
1708IWMMXT_OP_ENV_SIZE(mins)
1709IWMMXT_OP_ENV_SIZE(minu)
1710IWMMXT_OP_ENV_SIZE(maxs)
1711IWMMXT_OP_ENV_SIZE(maxu)
1712
1713IWMMXT_OP_ENV_SIZE(subn)
1714IWMMXT_OP_ENV_SIZE(addn)
1715IWMMXT_OP_ENV_SIZE(subu)
1716IWMMXT_OP_ENV_SIZE(addu)
1717IWMMXT_OP_ENV_SIZE(subs)
1718IWMMXT_OP_ENV_SIZE(adds)
1719
1720IWMMXT_OP_ENV(avgb0)
1721IWMMXT_OP_ENV(avgb1)
1722IWMMXT_OP_ENV(avgw0)
1723IWMMXT_OP_ENV(avgw1)
e677137d 1724
477955bd
PM
1725IWMMXT_OP_ENV(packuw)
1726IWMMXT_OP_ENV(packul)
1727IWMMXT_OP_ENV(packuq)
1728IWMMXT_OP_ENV(packsw)
1729IWMMXT_OP_ENV(packsl)
1730IWMMXT_OP_ENV(packsq)
e677137d 1731
e677137d
PB
1732static void gen_op_iwmmxt_set_mup(void)
1733{
39d5492a 1734 TCGv_i32 tmp;
e677137d
PB
1735 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1736 tcg_gen_ori_i32(tmp, tmp, 2);
1737 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1738}
1739
1740static void gen_op_iwmmxt_set_cup(void)
1741{
39d5492a 1742 TCGv_i32 tmp;
e677137d
PB
1743 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1744 tcg_gen_ori_i32(tmp, tmp, 1);
1745 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1746}
1747
1748static void gen_op_iwmmxt_setpsr_nz(void)
1749{
39d5492a 1750 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1751 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1752 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1753}
1754
1755static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1756{
1757 iwmmxt_load_reg(cpu_V1, rn);
86831435 1758 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1759 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1760}
1761
39d5492a
PM
1762static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1763 TCGv_i32 dest)
18c9b560
AZ
1764{
1765 int rd;
1766 uint32_t offset;
39d5492a 1767 TCGv_i32 tmp;
18c9b560
AZ
1768
1769 rd = (insn >> 16) & 0xf;
da6b5335 1770 tmp = load_reg(s, rd);
18c9b560
AZ
1771
1772 offset = (insn & 0xff) << ((insn >> 7) & 2);
1773 if (insn & (1 << 24)) {
1774 /* Pre indexed */
1775 if (insn & (1 << 23))
da6b5335 1776 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1777 else
da6b5335
FN
1778 tcg_gen_addi_i32(tmp, tmp, -offset);
1779 tcg_gen_mov_i32(dest, tmp);
18c9b560 1780 if (insn & (1 << 21))
da6b5335
FN
1781 store_reg(s, rd, tmp);
1782 else
7d1b0095 1783 tcg_temp_free_i32(tmp);
18c9b560
AZ
1784 } else if (insn & (1 << 21)) {
1785 /* Post indexed */
da6b5335 1786 tcg_gen_mov_i32(dest, tmp);
18c9b560 1787 if (insn & (1 << 23))
da6b5335 1788 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1789 else
da6b5335
FN
1790 tcg_gen_addi_i32(tmp, tmp, -offset);
1791 store_reg(s, rd, tmp);
18c9b560
AZ
1792 } else if (!(insn & (1 << 23)))
1793 return 1;
1794 return 0;
1795}
1796
39d5492a 1797static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1798{
1799 int rd = (insn >> 0) & 0xf;
39d5492a 1800 TCGv_i32 tmp;
18c9b560 1801
da6b5335
FN
1802 if (insn & (1 << 8)) {
1803 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1804 return 1;
da6b5335
FN
1805 } else {
1806 tmp = iwmmxt_load_creg(rd);
1807 }
1808 } else {
7d1b0095 1809 tmp = tcg_temp_new_i32();
da6b5335 1810 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1811 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1812 }
1813 tcg_gen_andi_i32(tmp, tmp, mask);
1814 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1815 tcg_temp_free_i32(tmp);
18c9b560
AZ
1816 return 0;
1817}
1818
a1c7273b 1819/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1820 (ie. an undefined instruction). */
7dcc1f89 1821static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1822{
1823 int rd, wrd;
1824 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1825 TCGv_i32 addr;
1826 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1827
1828 if ((insn & 0x0e000e00) == 0x0c000000) {
1829 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1830 wrd = insn & 0xf;
1831 rdlo = (insn >> 12) & 0xf;
1832 rdhi = (insn >> 16) & 0xf;
1833 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1834 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1835 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1836 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1837 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1838 } else { /* TMCRR */
da6b5335
FN
1839 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1840 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1841 gen_op_iwmmxt_set_mup();
1842 }
1843 return 0;
1844 }
1845
1846 wrd = (insn >> 12) & 0xf;
7d1b0095 1847 addr = tcg_temp_new_i32();
da6b5335 1848 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1849 tcg_temp_free_i32(addr);
18c9b560 1850 return 1;
da6b5335 1851 }
18c9b560
AZ
1852 if (insn & ARM_CP_RW_BIT) {
1853 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1854 tmp = tcg_temp_new_i32();
12dcc321 1855 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1856 iwmmxt_store_creg(wrd, tmp);
18c9b560 1857 } else {
e677137d
PB
1858 i = 1;
1859 if (insn & (1 << 8)) {
1860 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 1861 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d
PB
1862 i = 0;
1863 } else { /* WLDRW wRd */
29531141 1864 tmp = tcg_temp_new_i32();
12dcc321 1865 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1866 }
1867 } else {
29531141 1868 tmp = tcg_temp_new_i32();
e677137d 1869 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 1870 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
e677137d 1871 } else { /* WLDRB */
12dcc321 1872 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1873 }
1874 }
1875 if (i) {
1876 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1877 tcg_temp_free_i32(tmp);
e677137d 1878 }
18c9b560
AZ
1879 gen_op_iwmmxt_movq_wRn_M0(wrd);
1880 }
1881 } else {
1882 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1883 tmp = iwmmxt_load_creg(wrd);
12dcc321 1884 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
1885 } else {
1886 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1887 tmp = tcg_temp_new_i32();
e677137d
PB
1888 if (insn & (1 << 8)) {
1889 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 1890 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
e677137d 1891 } else { /* WSTRW wRd */
ecc7b3aa 1892 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1893 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
1894 }
1895 } else {
1896 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1897 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1898 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
e677137d 1899 } else { /* WSTRB */
ecc7b3aa 1900 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1901 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
1902 }
1903 }
18c9b560 1904 }
29531141 1905 tcg_temp_free_i32(tmp);
18c9b560 1906 }
7d1b0095 1907 tcg_temp_free_i32(addr);
18c9b560
AZ
1908 return 0;
1909 }
1910
1911 if ((insn & 0x0f000000) != 0x0e000000)
1912 return 1;
1913
1914 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1915 case 0x000: /* WOR */
1916 wrd = (insn >> 12) & 0xf;
1917 rd0 = (insn >> 0) & 0xf;
1918 rd1 = (insn >> 16) & 0xf;
1919 gen_op_iwmmxt_movq_M0_wRn(rd0);
1920 gen_op_iwmmxt_orq_M0_wRn(rd1);
1921 gen_op_iwmmxt_setpsr_nz();
1922 gen_op_iwmmxt_movq_wRn_M0(wrd);
1923 gen_op_iwmmxt_set_mup();
1924 gen_op_iwmmxt_set_cup();
1925 break;
1926 case 0x011: /* TMCR */
1927 if (insn & 0xf)
1928 return 1;
1929 rd = (insn >> 12) & 0xf;
1930 wrd = (insn >> 16) & 0xf;
1931 switch (wrd) {
1932 case ARM_IWMMXT_wCID:
1933 case ARM_IWMMXT_wCASF:
1934 break;
1935 case ARM_IWMMXT_wCon:
1936 gen_op_iwmmxt_set_cup();
1937 /* Fall through. */
1938 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1939 tmp = iwmmxt_load_creg(wrd);
1940 tmp2 = load_reg(s, rd);
f669df27 1941 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1942 tcg_temp_free_i32(tmp2);
da6b5335 1943 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1944 break;
1945 case ARM_IWMMXT_wCGR0:
1946 case ARM_IWMMXT_wCGR1:
1947 case ARM_IWMMXT_wCGR2:
1948 case ARM_IWMMXT_wCGR3:
1949 gen_op_iwmmxt_set_cup();
da6b5335
FN
1950 tmp = load_reg(s, rd);
1951 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1952 break;
1953 default:
1954 return 1;
1955 }
1956 break;
1957 case 0x100: /* WXOR */
1958 wrd = (insn >> 12) & 0xf;
1959 rd0 = (insn >> 0) & 0xf;
1960 rd1 = (insn >> 16) & 0xf;
1961 gen_op_iwmmxt_movq_M0_wRn(rd0);
1962 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1963 gen_op_iwmmxt_setpsr_nz();
1964 gen_op_iwmmxt_movq_wRn_M0(wrd);
1965 gen_op_iwmmxt_set_mup();
1966 gen_op_iwmmxt_set_cup();
1967 break;
1968 case 0x111: /* TMRC */
1969 if (insn & 0xf)
1970 return 1;
1971 rd = (insn >> 12) & 0xf;
1972 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1973 tmp = iwmmxt_load_creg(wrd);
1974 store_reg(s, rd, tmp);
18c9b560
AZ
1975 break;
1976 case 0x300: /* WANDN */
1977 wrd = (insn >> 12) & 0xf;
1978 rd0 = (insn >> 0) & 0xf;
1979 rd1 = (insn >> 16) & 0xf;
1980 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1981 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1982 gen_op_iwmmxt_andq_M0_wRn(rd1);
1983 gen_op_iwmmxt_setpsr_nz();
1984 gen_op_iwmmxt_movq_wRn_M0(wrd);
1985 gen_op_iwmmxt_set_mup();
1986 gen_op_iwmmxt_set_cup();
1987 break;
1988 case 0x200: /* WAND */
1989 wrd = (insn >> 12) & 0xf;
1990 rd0 = (insn >> 0) & 0xf;
1991 rd1 = (insn >> 16) & 0xf;
1992 gen_op_iwmmxt_movq_M0_wRn(rd0);
1993 gen_op_iwmmxt_andq_M0_wRn(rd1);
1994 gen_op_iwmmxt_setpsr_nz();
1995 gen_op_iwmmxt_movq_wRn_M0(wrd);
1996 gen_op_iwmmxt_set_mup();
1997 gen_op_iwmmxt_set_cup();
1998 break;
1999 case 0x810: case 0xa10: /* WMADD */
2000 wrd = (insn >> 12) & 0xf;
2001 rd0 = (insn >> 0) & 0xf;
2002 rd1 = (insn >> 16) & 0xf;
2003 gen_op_iwmmxt_movq_M0_wRn(rd0);
2004 if (insn & (1 << 21))
2005 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
2006 else
2007 gen_op_iwmmxt_madduq_M0_wRn(rd1);
2008 gen_op_iwmmxt_movq_wRn_M0(wrd);
2009 gen_op_iwmmxt_set_mup();
2010 break;
2011 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
2012 wrd = (insn >> 12) & 0xf;
2013 rd0 = (insn >> 16) & 0xf;
2014 rd1 = (insn >> 0) & 0xf;
2015 gen_op_iwmmxt_movq_M0_wRn(rd0);
2016 switch ((insn >> 22) & 3) {
2017 case 0:
2018 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
2019 break;
2020 case 1:
2021 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
2022 break;
2023 case 2:
2024 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
2025 break;
2026 case 3:
2027 return 1;
2028 }
2029 gen_op_iwmmxt_movq_wRn_M0(wrd);
2030 gen_op_iwmmxt_set_mup();
2031 gen_op_iwmmxt_set_cup();
2032 break;
2033 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
2034 wrd = (insn >> 12) & 0xf;
2035 rd0 = (insn >> 16) & 0xf;
2036 rd1 = (insn >> 0) & 0xf;
2037 gen_op_iwmmxt_movq_M0_wRn(rd0);
2038 switch ((insn >> 22) & 3) {
2039 case 0:
2040 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
2041 break;
2042 case 1:
2043 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
2044 break;
2045 case 2:
2046 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
2047 break;
2048 case 3:
2049 return 1;
2050 }
2051 gen_op_iwmmxt_movq_wRn_M0(wrd);
2052 gen_op_iwmmxt_set_mup();
2053 gen_op_iwmmxt_set_cup();
2054 break;
2055 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
2056 wrd = (insn >> 12) & 0xf;
2057 rd0 = (insn >> 16) & 0xf;
2058 rd1 = (insn >> 0) & 0xf;
2059 gen_op_iwmmxt_movq_M0_wRn(rd0);
2060 if (insn & (1 << 22))
2061 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2062 else
2063 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2064 if (!(insn & (1 << 20)))
2065 gen_op_iwmmxt_addl_M0_wRn(wrd);
2066 gen_op_iwmmxt_movq_wRn_M0(wrd);
2067 gen_op_iwmmxt_set_mup();
2068 break;
2069 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
2070 wrd = (insn >> 12) & 0xf;
2071 rd0 = (insn >> 16) & 0xf;
2072 rd1 = (insn >> 0) & 0xf;
2073 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2074 if (insn & (1 << 21)) {
2075 if (insn & (1 << 20))
2076 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2077 else
2078 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2079 } else {
2080 if (insn & (1 << 20))
2081 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2082 else
2083 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2084 }
18c9b560
AZ
2085 gen_op_iwmmxt_movq_wRn_M0(wrd);
2086 gen_op_iwmmxt_set_mup();
2087 break;
2088 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
2089 wrd = (insn >> 12) & 0xf;
2090 rd0 = (insn >> 16) & 0xf;
2091 rd1 = (insn >> 0) & 0xf;
2092 gen_op_iwmmxt_movq_M0_wRn(rd0);
2093 if (insn & (1 << 21))
2094 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2095 else
2096 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2097 if (!(insn & (1 << 20))) {
e677137d
PB
2098 iwmmxt_load_reg(cpu_V1, wrd);
2099 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
2100 }
2101 gen_op_iwmmxt_movq_wRn_M0(wrd);
2102 gen_op_iwmmxt_set_mup();
2103 break;
2104 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
2105 wrd = (insn >> 12) & 0xf;
2106 rd0 = (insn >> 16) & 0xf;
2107 rd1 = (insn >> 0) & 0xf;
2108 gen_op_iwmmxt_movq_M0_wRn(rd0);
2109 switch ((insn >> 22) & 3) {
2110 case 0:
2111 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2112 break;
2113 case 1:
2114 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2115 break;
2116 case 2:
2117 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2118 break;
2119 case 3:
2120 return 1;
2121 }
2122 gen_op_iwmmxt_movq_wRn_M0(wrd);
2123 gen_op_iwmmxt_set_mup();
2124 gen_op_iwmmxt_set_cup();
2125 break;
2126 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2127 wrd = (insn >> 12) & 0xf;
2128 rd0 = (insn >> 16) & 0xf;
2129 rd1 = (insn >> 0) & 0xf;
2130 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2131 if (insn & (1 << 22)) {
2132 if (insn & (1 << 20))
2133 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2134 else
2135 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2136 } else {
2137 if (insn & (1 << 20))
2138 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2139 else
2140 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2141 }
18c9b560
AZ
2142 gen_op_iwmmxt_movq_wRn_M0(wrd);
2143 gen_op_iwmmxt_set_mup();
2144 gen_op_iwmmxt_set_cup();
2145 break;
2146 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2147 wrd = (insn >> 12) & 0xf;
2148 rd0 = (insn >> 16) & 0xf;
2149 rd1 = (insn >> 0) & 0xf;
2150 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2151 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2152 tcg_gen_andi_i32(tmp, tmp, 7);
2153 iwmmxt_load_reg(cpu_V1, rd1);
2154 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2155 tcg_temp_free_i32(tmp);
18c9b560
AZ
2156 gen_op_iwmmxt_movq_wRn_M0(wrd);
2157 gen_op_iwmmxt_set_mup();
2158 break;
2159 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2160 if (((insn >> 6) & 3) == 3)
2161 return 1;
18c9b560
AZ
2162 rd = (insn >> 12) & 0xf;
2163 wrd = (insn >> 16) & 0xf;
da6b5335 2164 tmp = load_reg(s, rd);
18c9b560
AZ
2165 gen_op_iwmmxt_movq_M0_wRn(wrd);
2166 switch ((insn >> 6) & 3) {
2167 case 0:
da6b5335
FN
2168 tmp2 = tcg_const_i32(0xff);
2169 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2170 break;
2171 case 1:
da6b5335
FN
2172 tmp2 = tcg_const_i32(0xffff);
2173 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2174 break;
2175 case 2:
da6b5335
FN
2176 tmp2 = tcg_const_i32(0xffffffff);
2177 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2178 break;
da6b5335 2179 default:
f764718d
RH
2180 tmp2 = NULL;
2181 tmp3 = NULL;
18c9b560 2182 }
da6b5335 2183 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2184 tcg_temp_free_i32(tmp3);
2185 tcg_temp_free_i32(tmp2);
7d1b0095 2186 tcg_temp_free_i32(tmp);
18c9b560
AZ
2187 gen_op_iwmmxt_movq_wRn_M0(wrd);
2188 gen_op_iwmmxt_set_mup();
2189 break;
2190 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2191 rd = (insn >> 12) & 0xf;
2192 wrd = (insn >> 16) & 0xf;
da6b5335 2193 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2194 return 1;
2195 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2196 tmp = tcg_temp_new_i32();
18c9b560
AZ
2197 switch ((insn >> 22) & 3) {
2198 case 0:
da6b5335 2199 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2200 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2201 if (insn & 8) {
2202 tcg_gen_ext8s_i32(tmp, tmp);
2203 } else {
2204 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2205 }
2206 break;
2207 case 1:
da6b5335 2208 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2209 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2210 if (insn & 8) {
2211 tcg_gen_ext16s_i32(tmp, tmp);
2212 } else {
2213 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2214 }
2215 break;
2216 case 2:
da6b5335 2217 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2218 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2219 break;
18c9b560 2220 }
da6b5335 2221 store_reg(s, rd, tmp);
18c9b560
AZ
2222 break;
2223 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2224 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2225 return 1;
da6b5335 2226 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2227 switch ((insn >> 22) & 3) {
2228 case 0:
da6b5335 2229 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2230 break;
2231 case 1:
da6b5335 2232 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2233 break;
2234 case 2:
da6b5335 2235 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2236 break;
18c9b560 2237 }
da6b5335
FN
2238 tcg_gen_shli_i32(tmp, tmp, 28);
2239 gen_set_nzcv(tmp);
7d1b0095 2240 tcg_temp_free_i32(tmp);
18c9b560
AZ
2241 break;
2242 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2243 if (((insn >> 6) & 3) == 3)
2244 return 1;
18c9b560
AZ
2245 rd = (insn >> 12) & 0xf;
2246 wrd = (insn >> 16) & 0xf;
da6b5335 2247 tmp = load_reg(s, rd);
18c9b560
AZ
2248 switch ((insn >> 6) & 3) {
2249 case 0:
da6b5335 2250 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2251 break;
2252 case 1:
da6b5335 2253 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2254 break;
2255 case 2:
da6b5335 2256 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2257 break;
18c9b560 2258 }
7d1b0095 2259 tcg_temp_free_i32(tmp);
18c9b560
AZ
2260 gen_op_iwmmxt_movq_wRn_M0(wrd);
2261 gen_op_iwmmxt_set_mup();
2262 break;
2263 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2264 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2265 return 1;
da6b5335 2266 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2267 tmp2 = tcg_temp_new_i32();
da6b5335 2268 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2269 switch ((insn >> 22) & 3) {
2270 case 0:
2271 for (i = 0; i < 7; i ++) {
da6b5335
FN
2272 tcg_gen_shli_i32(tmp2, tmp2, 4);
2273 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2274 }
2275 break;
2276 case 1:
2277 for (i = 0; i < 3; i ++) {
da6b5335
FN
2278 tcg_gen_shli_i32(tmp2, tmp2, 8);
2279 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2280 }
2281 break;
2282 case 2:
da6b5335
FN
2283 tcg_gen_shli_i32(tmp2, tmp2, 16);
2284 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2285 break;
18c9b560 2286 }
da6b5335 2287 gen_set_nzcv(tmp);
7d1b0095
PM
2288 tcg_temp_free_i32(tmp2);
2289 tcg_temp_free_i32(tmp);
18c9b560
AZ
2290 break;
2291 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2292 wrd = (insn >> 12) & 0xf;
2293 rd0 = (insn >> 16) & 0xf;
2294 gen_op_iwmmxt_movq_M0_wRn(rd0);
2295 switch ((insn >> 22) & 3) {
2296 case 0:
e677137d 2297 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2298 break;
2299 case 1:
e677137d 2300 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2301 break;
2302 case 2:
e677137d 2303 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2304 break;
2305 case 3:
2306 return 1;
2307 }
2308 gen_op_iwmmxt_movq_wRn_M0(wrd);
2309 gen_op_iwmmxt_set_mup();
2310 break;
2311 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2312 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2313 return 1;
da6b5335 2314 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2315 tmp2 = tcg_temp_new_i32();
da6b5335 2316 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2317 switch ((insn >> 22) & 3) {
2318 case 0:
2319 for (i = 0; i < 7; i ++) {
da6b5335
FN
2320 tcg_gen_shli_i32(tmp2, tmp2, 4);
2321 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2322 }
2323 break;
2324 case 1:
2325 for (i = 0; i < 3; i ++) {
da6b5335
FN
2326 tcg_gen_shli_i32(tmp2, tmp2, 8);
2327 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2328 }
2329 break;
2330 case 2:
da6b5335
FN
2331 tcg_gen_shli_i32(tmp2, tmp2, 16);
2332 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2333 break;
18c9b560 2334 }
da6b5335 2335 gen_set_nzcv(tmp);
7d1b0095
PM
2336 tcg_temp_free_i32(tmp2);
2337 tcg_temp_free_i32(tmp);
18c9b560
AZ
2338 break;
2339 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2340 rd = (insn >> 12) & 0xf;
2341 rd0 = (insn >> 16) & 0xf;
da6b5335 2342 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2343 return 1;
2344 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2345 tmp = tcg_temp_new_i32();
18c9b560
AZ
2346 switch ((insn >> 22) & 3) {
2347 case 0:
da6b5335 2348 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2349 break;
2350 case 1:
da6b5335 2351 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2352 break;
2353 case 2:
da6b5335 2354 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2355 break;
18c9b560 2356 }
da6b5335 2357 store_reg(s, rd, tmp);
18c9b560
AZ
2358 break;
2359 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2360 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2361 wrd = (insn >> 12) & 0xf;
2362 rd0 = (insn >> 16) & 0xf;
2363 rd1 = (insn >> 0) & 0xf;
2364 gen_op_iwmmxt_movq_M0_wRn(rd0);
2365 switch ((insn >> 22) & 3) {
2366 case 0:
2367 if (insn & (1 << 21))
2368 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2369 else
2370 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2371 break;
2372 case 1:
2373 if (insn & (1 << 21))
2374 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2375 else
2376 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2377 break;
2378 case 2:
2379 if (insn & (1 << 21))
2380 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2381 else
2382 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2383 break;
2384 case 3:
2385 return 1;
2386 }
2387 gen_op_iwmmxt_movq_wRn_M0(wrd);
2388 gen_op_iwmmxt_set_mup();
2389 gen_op_iwmmxt_set_cup();
2390 break;
2391 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2392 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2393 wrd = (insn >> 12) & 0xf;
2394 rd0 = (insn >> 16) & 0xf;
2395 gen_op_iwmmxt_movq_M0_wRn(rd0);
2396 switch ((insn >> 22) & 3) {
2397 case 0:
2398 if (insn & (1 << 21))
2399 gen_op_iwmmxt_unpacklsb_M0();
2400 else
2401 gen_op_iwmmxt_unpacklub_M0();
2402 break;
2403 case 1:
2404 if (insn & (1 << 21))
2405 gen_op_iwmmxt_unpacklsw_M0();
2406 else
2407 gen_op_iwmmxt_unpackluw_M0();
2408 break;
2409 case 2:
2410 if (insn & (1 << 21))
2411 gen_op_iwmmxt_unpacklsl_M0();
2412 else
2413 gen_op_iwmmxt_unpacklul_M0();
2414 break;
2415 case 3:
2416 return 1;
2417 }
2418 gen_op_iwmmxt_movq_wRn_M0(wrd);
2419 gen_op_iwmmxt_set_mup();
2420 gen_op_iwmmxt_set_cup();
2421 break;
2422 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2423 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2424 wrd = (insn >> 12) & 0xf;
2425 rd0 = (insn >> 16) & 0xf;
2426 gen_op_iwmmxt_movq_M0_wRn(rd0);
2427 switch ((insn >> 22) & 3) {
2428 case 0:
2429 if (insn & (1 << 21))
2430 gen_op_iwmmxt_unpackhsb_M0();
2431 else
2432 gen_op_iwmmxt_unpackhub_M0();
2433 break;
2434 case 1:
2435 if (insn & (1 << 21))
2436 gen_op_iwmmxt_unpackhsw_M0();
2437 else
2438 gen_op_iwmmxt_unpackhuw_M0();
2439 break;
2440 case 2:
2441 if (insn & (1 << 21))
2442 gen_op_iwmmxt_unpackhsl_M0();
2443 else
2444 gen_op_iwmmxt_unpackhul_M0();
2445 break;
2446 case 3:
2447 return 1;
2448 }
2449 gen_op_iwmmxt_movq_wRn_M0(wrd);
2450 gen_op_iwmmxt_set_mup();
2451 gen_op_iwmmxt_set_cup();
2452 break;
2453 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2454 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2455 if (((insn >> 22) & 3) == 0)
2456 return 1;
18c9b560
AZ
2457 wrd = (insn >> 12) & 0xf;
2458 rd0 = (insn >> 16) & 0xf;
2459 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2460 tmp = tcg_temp_new_i32();
da6b5335 2461 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2462 tcg_temp_free_i32(tmp);
18c9b560 2463 return 1;
da6b5335 2464 }
18c9b560 2465 switch ((insn >> 22) & 3) {
18c9b560 2466 case 1:
477955bd 2467 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2468 break;
2469 case 2:
477955bd 2470 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2471 break;
2472 case 3:
477955bd 2473 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2474 break;
2475 }
7d1b0095 2476 tcg_temp_free_i32(tmp);
18c9b560
AZ
2477 gen_op_iwmmxt_movq_wRn_M0(wrd);
2478 gen_op_iwmmxt_set_mup();
2479 gen_op_iwmmxt_set_cup();
2480 break;
2481 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2482 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2483 if (((insn >> 22) & 3) == 0)
2484 return 1;
18c9b560
AZ
2485 wrd = (insn >> 12) & 0xf;
2486 rd0 = (insn >> 16) & 0xf;
2487 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2488 tmp = tcg_temp_new_i32();
da6b5335 2489 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2490 tcg_temp_free_i32(tmp);
18c9b560 2491 return 1;
da6b5335 2492 }
18c9b560 2493 switch ((insn >> 22) & 3) {
18c9b560 2494 case 1:
477955bd 2495 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2496 break;
2497 case 2:
477955bd 2498 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2499 break;
2500 case 3:
477955bd 2501 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2502 break;
2503 }
7d1b0095 2504 tcg_temp_free_i32(tmp);
18c9b560
AZ
2505 gen_op_iwmmxt_movq_wRn_M0(wrd);
2506 gen_op_iwmmxt_set_mup();
2507 gen_op_iwmmxt_set_cup();
2508 break;
2509 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2510 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2511 if (((insn >> 22) & 3) == 0)
2512 return 1;
18c9b560
AZ
2513 wrd = (insn >> 12) & 0xf;
2514 rd0 = (insn >> 16) & 0xf;
2515 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2516 tmp = tcg_temp_new_i32();
da6b5335 2517 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2518 tcg_temp_free_i32(tmp);
18c9b560 2519 return 1;
da6b5335 2520 }
18c9b560 2521 switch ((insn >> 22) & 3) {
18c9b560 2522 case 1:
477955bd 2523 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2524 break;
2525 case 2:
477955bd 2526 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2527 break;
2528 case 3:
477955bd 2529 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2530 break;
2531 }
7d1b0095 2532 tcg_temp_free_i32(tmp);
18c9b560
AZ
2533 gen_op_iwmmxt_movq_wRn_M0(wrd);
2534 gen_op_iwmmxt_set_mup();
2535 gen_op_iwmmxt_set_cup();
2536 break;
2537 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2538 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2539 if (((insn >> 22) & 3) == 0)
2540 return 1;
18c9b560
AZ
2541 wrd = (insn >> 12) & 0xf;
2542 rd0 = (insn >> 16) & 0xf;
2543 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2544 tmp = tcg_temp_new_i32();
18c9b560 2545 switch ((insn >> 22) & 3) {
18c9b560 2546 case 1:
da6b5335 2547 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2548 tcg_temp_free_i32(tmp);
18c9b560 2549 return 1;
da6b5335 2550 }
477955bd 2551 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2552 break;
2553 case 2:
da6b5335 2554 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2555 tcg_temp_free_i32(tmp);
18c9b560 2556 return 1;
da6b5335 2557 }
477955bd 2558 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2559 break;
2560 case 3:
da6b5335 2561 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2562 tcg_temp_free_i32(tmp);
18c9b560 2563 return 1;
da6b5335 2564 }
477955bd 2565 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2566 break;
2567 }
7d1b0095 2568 tcg_temp_free_i32(tmp);
18c9b560
AZ
2569 gen_op_iwmmxt_movq_wRn_M0(wrd);
2570 gen_op_iwmmxt_set_mup();
2571 gen_op_iwmmxt_set_cup();
2572 break;
2573 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2574 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2575 wrd = (insn >> 12) & 0xf;
2576 rd0 = (insn >> 16) & 0xf;
2577 rd1 = (insn >> 0) & 0xf;
2578 gen_op_iwmmxt_movq_M0_wRn(rd0);
2579 switch ((insn >> 22) & 3) {
2580 case 0:
2581 if (insn & (1 << 21))
2582 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2583 else
2584 gen_op_iwmmxt_minub_M0_wRn(rd1);
2585 break;
2586 case 1:
2587 if (insn & (1 << 21))
2588 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2589 else
2590 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2591 break;
2592 case 2:
2593 if (insn & (1 << 21))
2594 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2595 else
2596 gen_op_iwmmxt_minul_M0_wRn(rd1);
2597 break;
2598 case 3:
2599 return 1;
2600 }
2601 gen_op_iwmmxt_movq_wRn_M0(wrd);
2602 gen_op_iwmmxt_set_mup();
2603 break;
2604 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2605 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2606 wrd = (insn >> 12) & 0xf;
2607 rd0 = (insn >> 16) & 0xf;
2608 rd1 = (insn >> 0) & 0xf;
2609 gen_op_iwmmxt_movq_M0_wRn(rd0);
2610 switch ((insn >> 22) & 3) {
2611 case 0:
2612 if (insn & (1 << 21))
2613 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2614 else
2615 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2616 break;
2617 case 1:
2618 if (insn & (1 << 21))
2619 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2620 else
2621 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2622 break;
2623 case 2:
2624 if (insn & (1 << 21))
2625 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2626 else
2627 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2628 break;
2629 case 3:
2630 return 1;
2631 }
2632 gen_op_iwmmxt_movq_wRn_M0(wrd);
2633 gen_op_iwmmxt_set_mup();
2634 break;
2635 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2636 case 0x402: case 0x502: case 0x602: case 0x702:
2637 wrd = (insn >> 12) & 0xf;
2638 rd0 = (insn >> 16) & 0xf;
2639 rd1 = (insn >> 0) & 0xf;
2640 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2641 tmp = tcg_const_i32((insn >> 20) & 3);
2642 iwmmxt_load_reg(cpu_V1, rd1);
2643 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2644 tcg_temp_free_i32(tmp);
18c9b560
AZ
2645 gen_op_iwmmxt_movq_wRn_M0(wrd);
2646 gen_op_iwmmxt_set_mup();
2647 break;
2648 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2649 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2650 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2651 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2652 wrd = (insn >> 12) & 0xf;
2653 rd0 = (insn >> 16) & 0xf;
2654 rd1 = (insn >> 0) & 0xf;
2655 gen_op_iwmmxt_movq_M0_wRn(rd0);
2656 switch ((insn >> 20) & 0xf) {
2657 case 0x0:
2658 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2659 break;
2660 case 0x1:
2661 gen_op_iwmmxt_subub_M0_wRn(rd1);
2662 break;
2663 case 0x3:
2664 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2665 break;
2666 case 0x4:
2667 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2668 break;
2669 case 0x5:
2670 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2671 break;
2672 case 0x7:
2673 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2674 break;
2675 case 0x8:
2676 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2677 break;
2678 case 0x9:
2679 gen_op_iwmmxt_subul_M0_wRn(rd1);
2680 break;
2681 case 0xb:
2682 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2683 break;
2684 default:
2685 return 1;
2686 }
2687 gen_op_iwmmxt_movq_wRn_M0(wrd);
2688 gen_op_iwmmxt_set_mup();
2689 gen_op_iwmmxt_set_cup();
2690 break;
2691 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2692 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2693 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2694 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2695 wrd = (insn >> 12) & 0xf;
2696 rd0 = (insn >> 16) & 0xf;
2697 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2698 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2699 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2700 tcg_temp_free_i32(tmp);
18c9b560
AZ
2701 gen_op_iwmmxt_movq_wRn_M0(wrd);
2702 gen_op_iwmmxt_set_mup();
2703 gen_op_iwmmxt_set_cup();
2704 break;
2705 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2706 case 0x418: case 0x518: case 0x618: case 0x718:
2707 case 0x818: case 0x918: case 0xa18: case 0xb18:
2708 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2709 wrd = (insn >> 12) & 0xf;
2710 rd0 = (insn >> 16) & 0xf;
2711 rd1 = (insn >> 0) & 0xf;
2712 gen_op_iwmmxt_movq_M0_wRn(rd0);
2713 switch ((insn >> 20) & 0xf) {
2714 case 0x0:
2715 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2716 break;
2717 case 0x1:
2718 gen_op_iwmmxt_addub_M0_wRn(rd1);
2719 break;
2720 case 0x3:
2721 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2722 break;
2723 case 0x4:
2724 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2725 break;
2726 case 0x5:
2727 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2728 break;
2729 case 0x7:
2730 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2731 break;
2732 case 0x8:
2733 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2734 break;
2735 case 0x9:
2736 gen_op_iwmmxt_addul_M0_wRn(rd1);
2737 break;
2738 case 0xb:
2739 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2740 break;
2741 default:
2742 return 1;
2743 }
2744 gen_op_iwmmxt_movq_wRn_M0(wrd);
2745 gen_op_iwmmxt_set_mup();
2746 gen_op_iwmmxt_set_cup();
2747 break;
2748 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2749 case 0x408: case 0x508: case 0x608: case 0x708:
2750 case 0x808: case 0x908: case 0xa08: case 0xb08:
2751 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2752 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2753 return 1;
18c9b560
AZ
2754 wrd = (insn >> 12) & 0xf;
2755 rd0 = (insn >> 16) & 0xf;
2756 rd1 = (insn >> 0) & 0xf;
2757 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2758 switch ((insn >> 22) & 3) {
18c9b560
AZ
2759 case 1:
2760 if (insn & (1 << 21))
2761 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2762 else
2763 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2764 break;
2765 case 2:
2766 if (insn & (1 << 21))
2767 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2768 else
2769 gen_op_iwmmxt_packul_M0_wRn(rd1);
2770 break;
2771 case 3:
2772 if (insn & (1 << 21))
2773 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2774 else
2775 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2776 break;
2777 }
2778 gen_op_iwmmxt_movq_wRn_M0(wrd);
2779 gen_op_iwmmxt_set_mup();
2780 gen_op_iwmmxt_set_cup();
2781 break;
2782 case 0x201: case 0x203: case 0x205: case 0x207:
2783 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2784 case 0x211: case 0x213: case 0x215: case 0x217:
2785 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2786 wrd = (insn >> 5) & 0xf;
2787 rd0 = (insn >> 12) & 0xf;
2788 rd1 = (insn >> 0) & 0xf;
2789 if (rd0 == 0xf || rd1 == 0xf)
2790 return 1;
2791 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2792 tmp = load_reg(s, rd0);
2793 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2794 switch ((insn >> 16) & 0xf) {
2795 case 0x0: /* TMIA */
da6b5335 2796 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2797 break;
2798 case 0x8: /* TMIAPH */
da6b5335 2799 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2800 break;
2801 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2802 if (insn & (1 << 16))
da6b5335 2803 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2804 if (insn & (1 << 17))
da6b5335
FN
2805 tcg_gen_shri_i32(tmp2, tmp2, 16);
2806 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2807 break;
2808 default:
7d1b0095
PM
2809 tcg_temp_free_i32(tmp2);
2810 tcg_temp_free_i32(tmp);
18c9b560
AZ
2811 return 1;
2812 }
7d1b0095
PM
2813 tcg_temp_free_i32(tmp2);
2814 tcg_temp_free_i32(tmp);
18c9b560
AZ
2815 gen_op_iwmmxt_movq_wRn_M0(wrd);
2816 gen_op_iwmmxt_set_mup();
2817 break;
2818 default:
2819 return 1;
2820 }
2821
2822 return 0;
2823}
2824
a1c7273b 2825/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2826 (ie. an undefined instruction). */
7dcc1f89 2827static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2828{
2829 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2830 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2831
2832 if ((insn & 0x0ff00f10) == 0x0e200010) {
2833 /* Multiply with Internal Accumulate Format */
2834 rd0 = (insn >> 12) & 0xf;
2835 rd1 = insn & 0xf;
2836 acc = (insn >> 5) & 7;
2837
2838 if (acc != 0)
2839 return 1;
2840
3a554c0f
FN
2841 tmp = load_reg(s, rd0);
2842 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2843 switch ((insn >> 16) & 0xf) {
2844 case 0x0: /* MIA */
3a554c0f 2845 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2846 break;
2847 case 0x8: /* MIAPH */
3a554c0f 2848 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2849 break;
2850 case 0xc: /* MIABB */
2851 case 0xd: /* MIABT */
2852 case 0xe: /* MIATB */
2853 case 0xf: /* MIATT */
18c9b560 2854 if (insn & (1 << 16))
3a554c0f 2855 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2856 if (insn & (1 << 17))
3a554c0f
FN
2857 tcg_gen_shri_i32(tmp2, tmp2, 16);
2858 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2859 break;
2860 default:
2861 return 1;
2862 }
7d1b0095
PM
2863 tcg_temp_free_i32(tmp2);
2864 tcg_temp_free_i32(tmp);
18c9b560
AZ
2865
2866 gen_op_iwmmxt_movq_wRn_M0(acc);
2867 return 0;
2868 }
2869
2870 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2871 /* Internal Accumulator Access Format */
2872 rdhi = (insn >> 16) & 0xf;
2873 rdlo = (insn >> 12) & 0xf;
2874 acc = insn & 7;
2875
2876 if (acc != 0)
2877 return 1;
2878
2879 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2880 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2881 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 2882 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 2883 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2884 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2885 } else { /* MAR */
3a554c0f
FN
2886 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2887 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2888 }
2889 return 0;
2890 }
2891
2892 return 1;
2893}
2894
9ee6e8bb
PB
2895#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2896#define VFP_SREG(insn, bigbit, smallbit) \
2897 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2898#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2899 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2900 reg = (((insn) >> (bigbit)) & 0x0f) \
2901 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2902 } else { \
2903 if (insn & (1 << (smallbit))) \
2904 return 1; \
2905 reg = ((insn) >> (bigbit)) & 0x0f; \
2906 }} while (0)
2907
2908#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2909#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2910#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2911#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2912#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2913#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2914
4373f3ce 2915/* Move between integer and VFP cores. */
39d5492a 2916static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2917{
39d5492a 2918 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2919 tcg_gen_mov_i32(tmp, cpu_F0s);
2920 return tmp;
2921}
2922
39d5492a 2923static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2924{
2925 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2926 tcg_temp_free_i32(tmp);
4373f3ce
PB
2927}
2928
39d5492a 2929static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2930{
39d5492a 2931 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2932 if (shift)
2933 tcg_gen_shri_i32(var, var, shift);
86831435 2934 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2935 tcg_gen_shli_i32(tmp, var, 8);
2936 tcg_gen_or_i32(var, var, tmp);
2937 tcg_gen_shli_i32(tmp, var, 16);
2938 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2939 tcg_temp_free_i32(tmp);
ad69471c
PB
2940}
2941
39d5492a 2942static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2943{
39d5492a 2944 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2945 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2946 tcg_gen_shli_i32(tmp, var, 16);
2947 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2948 tcg_temp_free_i32(tmp);
ad69471c
PB
2949}
2950
39d5492a 2951static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2952{
39d5492a 2953 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2954 tcg_gen_andi_i32(var, var, 0xffff0000);
2955 tcg_gen_shri_i32(tmp, var, 16);
2956 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2957 tcg_temp_free_i32(tmp);
ad69471c
PB
2958}
2959
39d5492a 2960static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2961{
2962 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2963 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2964 switch (size) {
2965 case 0:
12dcc321 2966 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2967 gen_neon_dup_u8(tmp, 0);
2968 break;
2969 case 1:
12dcc321 2970 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2971 gen_neon_dup_low16(tmp);
2972 break;
2973 case 2:
12dcc321 2974 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2975 break;
2976 default: /* Avoid compiler warnings. */
2977 abort();
2978 }
2979 return tmp;
2980}
2981
04731fb5
WN
2982static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2983 uint32_t dp)
2984{
2985 uint32_t cc = extract32(insn, 20, 2);
2986
2987 if (dp) {
2988 TCGv_i64 frn, frm, dest;
2989 TCGv_i64 tmp, zero, zf, nf, vf;
2990
2991 zero = tcg_const_i64(0);
2992
2993 frn = tcg_temp_new_i64();
2994 frm = tcg_temp_new_i64();
2995 dest = tcg_temp_new_i64();
2996
2997 zf = tcg_temp_new_i64();
2998 nf = tcg_temp_new_i64();
2999 vf = tcg_temp_new_i64();
3000
3001 tcg_gen_extu_i32_i64(zf, cpu_ZF);
3002 tcg_gen_ext_i32_i64(nf, cpu_NF);
3003 tcg_gen_ext_i32_i64(vf, cpu_VF);
3004
3005 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3006 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3007 switch (cc) {
3008 case 0: /* eq: Z */
3009 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
3010 frn, frm);
3011 break;
3012 case 1: /* vs: V */
3013 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
3014 frn, frm);
3015 break;
3016 case 2: /* ge: N == V -> N ^ V == 0 */
3017 tmp = tcg_temp_new_i64();
3018 tcg_gen_xor_i64(tmp, vf, nf);
3019 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3020 frn, frm);
3021 tcg_temp_free_i64(tmp);
3022 break;
3023 case 3: /* gt: !Z && N == V */
3024 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
3025 frn, frm);
3026 tmp = tcg_temp_new_i64();
3027 tcg_gen_xor_i64(tmp, vf, nf);
3028 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3029 dest, frm);
3030 tcg_temp_free_i64(tmp);
3031 break;
3032 }
3033 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3034 tcg_temp_free_i64(frn);
3035 tcg_temp_free_i64(frm);
3036 tcg_temp_free_i64(dest);
3037
3038 tcg_temp_free_i64(zf);
3039 tcg_temp_free_i64(nf);
3040 tcg_temp_free_i64(vf);
3041
3042 tcg_temp_free_i64(zero);
3043 } else {
3044 TCGv_i32 frn, frm, dest;
3045 TCGv_i32 tmp, zero;
3046
3047 zero = tcg_const_i32(0);
3048
3049 frn = tcg_temp_new_i32();
3050 frm = tcg_temp_new_i32();
3051 dest = tcg_temp_new_i32();
3052 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3053 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3054 switch (cc) {
3055 case 0: /* eq: Z */
3056 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
3057 frn, frm);
3058 break;
3059 case 1: /* vs: V */
3060 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
3061 frn, frm);
3062 break;
3063 case 2: /* ge: N == V -> N ^ V == 0 */
3064 tmp = tcg_temp_new_i32();
3065 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3066 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3067 frn, frm);
3068 tcg_temp_free_i32(tmp);
3069 break;
3070 case 3: /* gt: !Z && N == V */
3071 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
3072 frn, frm);
3073 tmp = tcg_temp_new_i32();
3074 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3075 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3076 dest, frm);
3077 tcg_temp_free_i32(tmp);
3078 break;
3079 }
3080 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3081 tcg_temp_free_i32(frn);
3082 tcg_temp_free_i32(frm);
3083 tcg_temp_free_i32(dest);
3084
3085 tcg_temp_free_i32(zero);
3086 }
3087
3088 return 0;
3089}
3090
40cfacdd
WN
3091static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
3092 uint32_t rm, uint32_t dp)
3093{
3094 uint32_t vmin = extract32(insn, 6, 1);
3095 TCGv_ptr fpst = get_fpstatus_ptr(0);
3096
3097 if (dp) {
3098 TCGv_i64 frn, frm, dest;
3099
3100 frn = tcg_temp_new_i64();
3101 frm = tcg_temp_new_i64();
3102 dest = tcg_temp_new_i64();
3103
3104 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3105 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3106 if (vmin) {
f71a2ae5 3107 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 3108 } else {
f71a2ae5 3109 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
3110 }
3111 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3112 tcg_temp_free_i64(frn);
3113 tcg_temp_free_i64(frm);
3114 tcg_temp_free_i64(dest);
3115 } else {
3116 TCGv_i32 frn, frm, dest;
3117
3118 frn = tcg_temp_new_i32();
3119 frm = tcg_temp_new_i32();
3120 dest = tcg_temp_new_i32();
3121
3122 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3123 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3124 if (vmin) {
f71a2ae5 3125 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 3126 } else {
f71a2ae5 3127 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
3128 }
3129 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3130 tcg_temp_free_i32(frn);
3131 tcg_temp_free_i32(frm);
3132 tcg_temp_free_i32(dest);
3133 }
3134
3135 tcg_temp_free_ptr(fpst);
3136 return 0;
3137}
3138
7655f39b
WN
3139static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3140 int rounding)
3141{
3142 TCGv_ptr fpst = get_fpstatus_ptr(0);
3143 TCGv_i32 tcg_rmode;
3144
3145 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3146 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3147
3148 if (dp) {
3149 TCGv_i64 tcg_op;
3150 TCGv_i64 tcg_res;
3151 tcg_op = tcg_temp_new_i64();
3152 tcg_res = tcg_temp_new_i64();
3153 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3154 gen_helper_rintd(tcg_res, tcg_op, fpst);
3155 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3156 tcg_temp_free_i64(tcg_op);
3157 tcg_temp_free_i64(tcg_res);
3158 } else {
3159 TCGv_i32 tcg_op;
3160 TCGv_i32 tcg_res;
3161 tcg_op = tcg_temp_new_i32();
3162 tcg_res = tcg_temp_new_i32();
3163 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3164 gen_helper_rints(tcg_res, tcg_op, fpst);
3165 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3166 tcg_temp_free_i32(tcg_op);
3167 tcg_temp_free_i32(tcg_res);
3168 }
3169
3170 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3171 tcg_temp_free_i32(tcg_rmode);
3172
3173 tcg_temp_free_ptr(fpst);
3174 return 0;
3175}
3176
c9975a83
WN
3177static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3178 int rounding)
3179{
3180 bool is_signed = extract32(insn, 7, 1);
3181 TCGv_ptr fpst = get_fpstatus_ptr(0);
3182 TCGv_i32 tcg_rmode, tcg_shift;
3183
3184 tcg_shift = tcg_const_i32(0);
3185
3186 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3187 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3188
3189 if (dp) {
3190 TCGv_i64 tcg_double, tcg_res;
3191 TCGv_i32 tcg_tmp;
3192 /* Rd is encoded as a single precision register even when the source
3193 * is double precision.
3194 */
3195 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3196 tcg_double = tcg_temp_new_i64();
3197 tcg_res = tcg_temp_new_i64();
3198 tcg_tmp = tcg_temp_new_i32();
3199 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3200 if (is_signed) {
3201 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3202 } else {
3203 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3204 }
ecc7b3aa 3205 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
c9975a83
WN
3206 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3207 tcg_temp_free_i32(tcg_tmp);
3208 tcg_temp_free_i64(tcg_res);
3209 tcg_temp_free_i64(tcg_double);
3210 } else {
3211 TCGv_i32 tcg_single, tcg_res;
3212 tcg_single = tcg_temp_new_i32();
3213 tcg_res = tcg_temp_new_i32();
3214 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3215 if (is_signed) {
3216 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3217 } else {
3218 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3219 }
3220 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3221 tcg_temp_free_i32(tcg_res);
3222 tcg_temp_free_i32(tcg_single);
3223 }
3224
3225 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3226 tcg_temp_free_i32(tcg_rmode);
3227
3228 tcg_temp_free_i32(tcg_shift);
3229
3230 tcg_temp_free_ptr(fpst);
3231
3232 return 0;
3233}
7655f39b
WN
3234
3235/* Table for converting the most common AArch32 encoding of
3236 * rounding mode to arm_fprounding order (which matches the
3237 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3238 */
3239static const uint8_t fp_decode_rm[] = {
3240 FPROUNDING_TIEAWAY,
3241 FPROUNDING_TIEEVEN,
3242 FPROUNDING_POSINF,
3243 FPROUNDING_NEGINF,
3244};
3245
7dcc1f89 3246static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
3247{
3248 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3249
d614a513 3250 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
04731fb5
WN
3251 return 1;
3252 }
3253
3254 if (dp) {
3255 VFP_DREG_D(rd, insn);
3256 VFP_DREG_N(rn, insn);
3257 VFP_DREG_M(rm, insn);
3258 } else {
3259 rd = VFP_SREG_D(insn);
3260 rn = VFP_SREG_N(insn);
3261 rm = VFP_SREG_M(insn);
3262 }
3263
3264 if ((insn & 0x0f800e50) == 0x0e000a00) {
3265 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
3266 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3267 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
3268 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3269 /* VRINTA, VRINTN, VRINTP, VRINTM */
3270 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3271 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
3272 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3273 /* VCVTA, VCVTN, VCVTP, VCVTM */
3274 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3275 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3276 }
3277 return 1;
3278}
3279
a1c7273b 3280/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3281 (ie. an undefined instruction). */
7dcc1f89 3282static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3283{
3284 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3285 int dp, veclen;
39d5492a
PM
3286 TCGv_i32 addr;
3287 TCGv_i32 tmp;
3288 TCGv_i32 tmp2;
b7bcbe95 3289
d614a513 3290 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3291 return 1;
d614a513 3292 }
40f137e1 3293
2c7ffc41
PM
3294 /* FIXME: this access check should not take precedence over UNDEF
3295 * for invalid encodings; we will generate incorrect syndrome information
3296 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3297 */
9dbbc748 3298 if (s->fp_excp_el) {
2c7ffc41 3299 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 3300 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3301 return 0;
3302 }
3303
5df8bac1 3304 if (!s->vfp_enabled) {
9ee6e8bb 3305 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3306 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3307 return 1;
3308 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3309 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3310 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3311 return 1;
a50c0f51 3312 }
40f137e1 3313 }
6a57f3eb
WN
3314
3315 if (extract32(insn, 28, 4) == 0xf) {
3316 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3317 * only used in v8 and above.
3318 */
7dcc1f89 3319 return disas_vfp_v8_insn(s, insn);
6a57f3eb
WN
3320 }
3321
b7bcbe95
FB
3322 dp = ((insn & 0xf00) == 0xb00);
3323 switch ((insn >> 24) & 0xf) {
3324 case 0xe:
3325 if (insn & (1 << 4)) {
3326 /* single register transfer */
b7bcbe95
FB
3327 rd = (insn >> 12) & 0xf;
3328 if (dp) {
9ee6e8bb
PB
3329 int size;
3330 int pass;
3331
3332 VFP_DREG_N(rn, insn);
3333 if (insn & 0xf)
b7bcbe95 3334 return 1;
9ee6e8bb 3335 if (insn & 0x00c00060
d614a513 3336 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3337 return 1;
d614a513 3338 }
9ee6e8bb
PB
3339
3340 pass = (insn >> 21) & 1;
3341 if (insn & (1 << 22)) {
3342 size = 0;
3343 offset = ((insn >> 5) & 3) * 8;
3344 } else if (insn & (1 << 5)) {
3345 size = 1;
3346 offset = (insn & (1 << 6)) ? 16 : 0;
3347 } else {
3348 size = 2;
3349 offset = 0;
3350 }
18c9b560 3351 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3352 /* vfp->arm */
ad69471c 3353 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3354 switch (size) {
3355 case 0:
9ee6e8bb 3356 if (offset)
ad69471c 3357 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3358 if (insn & (1 << 23))
ad69471c 3359 gen_uxtb(tmp);
9ee6e8bb 3360 else
ad69471c 3361 gen_sxtb(tmp);
9ee6e8bb
PB
3362 break;
3363 case 1:
9ee6e8bb
PB
3364 if (insn & (1 << 23)) {
3365 if (offset) {
ad69471c 3366 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3367 } else {
ad69471c 3368 gen_uxth(tmp);
9ee6e8bb
PB
3369 }
3370 } else {
3371 if (offset) {
ad69471c 3372 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3373 } else {
ad69471c 3374 gen_sxth(tmp);
9ee6e8bb
PB
3375 }
3376 }
3377 break;
3378 case 2:
9ee6e8bb
PB
3379 break;
3380 }
ad69471c 3381 store_reg(s, rd, tmp);
b7bcbe95
FB
3382 } else {
3383 /* arm->vfp */
ad69471c 3384 tmp = load_reg(s, rd);
9ee6e8bb
PB
3385 if (insn & (1 << 23)) {
3386 /* VDUP */
3387 if (size == 0) {
ad69471c 3388 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3389 } else if (size == 1) {
ad69471c 3390 gen_neon_dup_low16(tmp);
9ee6e8bb 3391 }
cbbccffc 3392 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3393 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3394 tcg_gen_mov_i32(tmp2, tmp);
3395 neon_store_reg(rn, n, tmp2);
3396 }
3397 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3398 } else {
3399 /* VMOV */
3400 switch (size) {
3401 case 0:
ad69471c 3402 tmp2 = neon_load_reg(rn, pass);
d593c48e 3403 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3404 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3405 break;
3406 case 1:
ad69471c 3407 tmp2 = neon_load_reg(rn, pass);
d593c48e 3408 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3409 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3410 break;
3411 case 2:
9ee6e8bb
PB
3412 break;
3413 }
ad69471c 3414 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3415 }
b7bcbe95 3416 }
9ee6e8bb
PB
3417 } else { /* !dp */
3418 if ((insn & 0x6f) != 0x00)
3419 return 1;
3420 rn = VFP_SREG_N(insn);
18c9b560 3421 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3422 /* vfp->arm */
3423 if (insn & (1 << 21)) {
3424 /* system register */
40f137e1 3425 rn >>= 1;
9ee6e8bb 3426
b7bcbe95 3427 switch (rn) {
40f137e1 3428 case ARM_VFP_FPSID:
4373f3ce 3429 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3430 VFP3 restricts all id registers to privileged
3431 accesses. */
3432 if (IS_USER(s)
d614a513 3433 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3434 return 1;
d614a513 3435 }
4373f3ce 3436 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3437 break;
40f137e1 3438 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3439 if (IS_USER(s))
3440 return 1;
4373f3ce 3441 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3442 break;
40f137e1
PB
3443 case ARM_VFP_FPINST:
3444 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3445 /* Not present in VFP3. */
3446 if (IS_USER(s)
d614a513 3447 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3448 return 1;
d614a513 3449 }
4373f3ce 3450 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3451 break;
40f137e1 3452 case ARM_VFP_FPSCR:
601d70b9 3453 if (rd == 15) {
4373f3ce
PB
3454 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3455 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3456 } else {
7d1b0095 3457 tmp = tcg_temp_new_i32();
4373f3ce
PB
3458 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3459 }
b7bcbe95 3460 break;
a50c0f51 3461 case ARM_VFP_MVFR2:
d614a513 3462 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3463 return 1;
3464 }
3465 /* fall through */
9ee6e8bb
PB
3466 case ARM_VFP_MVFR0:
3467 case ARM_VFP_MVFR1:
3468 if (IS_USER(s)
d614a513 3469 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3470 return 1;
d614a513 3471 }
4373f3ce 3472 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3473 break;
b7bcbe95
FB
3474 default:
3475 return 1;
3476 }
3477 } else {
3478 gen_mov_F0_vreg(0, rn);
4373f3ce 3479 tmp = gen_vfp_mrs();
b7bcbe95
FB
3480 }
3481 if (rd == 15) {
b5ff1b31 3482 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3483 gen_set_nzcv(tmp);
7d1b0095 3484 tcg_temp_free_i32(tmp);
4373f3ce
PB
3485 } else {
3486 store_reg(s, rd, tmp);
3487 }
b7bcbe95
FB
3488 } else {
3489 /* arm->vfp */
b7bcbe95 3490 if (insn & (1 << 21)) {
40f137e1 3491 rn >>= 1;
b7bcbe95
FB
3492 /* system register */
3493 switch (rn) {
40f137e1 3494 case ARM_VFP_FPSID:
9ee6e8bb
PB
3495 case ARM_VFP_MVFR0:
3496 case ARM_VFP_MVFR1:
b7bcbe95
FB
3497 /* Writes are ignored. */
3498 break;
40f137e1 3499 case ARM_VFP_FPSCR:
e4c1cfa5 3500 tmp = load_reg(s, rd);
4373f3ce 3501 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3502 tcg_temp_free_i32(tmp);
b5ff1b31 3503 gen_lookup_tb(s);
b7bcbe95 3504 break;
40f137e1 3505 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3506 if (IS_USER(s))
3507 return 1;
71b3c3de
JR
3508 /* TODO: VFP subarchitecture support.
3509 * For now, keep the EN bit only */
e4c1cfa5 3510 tmp = load_reg(s, rd);
71b3c3de 3511 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3512 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3513 gen_lookup_tb(s);
3514 break;
3515 case ARM_VFP_FPINST:
3516 case ARM_VFP_FPINST2:
23adb861
PM
3517 if (IS_USER(s)) {
3518 return 1;
3519 }
e4c1cfa5 3520 tmp = load_reg(s, rd);
4373f3ce 3521 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3522 break;
b7bcbe95
FB
3523 default:
3524 return 1;
3525 }
3526 } else {
e4c1cfa5 3527 tmp = load_reg(s, rd);
4373f3ce 3528 gen_vfp_msr(tmp);
b7bcbe95
FB
3529 gen_mov_vreg_F0(0, rn);
3530 }
3531 }
3532 }
3533 } else {
3534 /* data processing */
3535 /* The opcode is in bits 23, 21, 20 and 6. */
3536 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3537 if (dp) {
3538 if (op == 15) {
3539 /* rn is opcode */
3540 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3541 } else {
3542 /* rn is register number */
9ee6e8bb 3543 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3544 }
3545
239c20c7
WN
3546 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3547 ((rn & 0x1e) == 0x6))) {
3548 /* Integer or single/half precision destination. */
9ee6e8bb 3549 rd = VFP_SREG_D(insn);
b7bcbe95 3550 } else {
9ee6e8bb 3551 VFP_DREG_D(rd, insn);
b7bcbe95 3552 }
04595bf6 3553 if (op == 15 &&
239c20c7
WN
3554 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3555 ((rn & 0x1e) == 0x4))) {
3556 /* VCVT from int or half precision is always from S reg
3557 * regardless of dp bit. VCVT with immediate frac_bits
3558 * has same format as SREG_M.
04595bf6
PM
3559 */
3560 rm = VFP_SREG_M(insn);
b7bcbe95 3561 } else {
9ee6e8bb 3562 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3563 }
3564 } else {
9ee6e8bb 3565 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3566 if (op == 15 && rn == 15) {
3567 /* Double precision destination. */
9ee6e8bb
PB
3568 VFP_DREG_D(rd, insn);
3569 } else {
3570 rd = VFP_SREG_D(insn);
3571 }
04595bf6
PM
3572 /* NB that we implicitly rely on the encoding for the frac_bits
3573 * in VCVT of fixed to float being the same as that of an SREG_M
3574 */
9ee6e8bb 3575 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3576 }
3577
69d1fc22 3578 veclen = s->vec_len;
b7bcbe95
FB
3579 if (op == 15 && rn > 3)
3580 veclen = 0;
3581
3582 /* Shut up compiler warnings. */
3583 delta_m = 0;
3584 delta_d = 0;
3585 bank_mask = 0;
3b46e624 3586
b7bcbe95
FB
3587 if (veclen > 0) {
3588 if (dp)
3589 bank_mask = 0xc;
3590 else
3591 bank_mask = 0x18;
3592
3593 /* Figure out what type of vector operation this is. */
3594 if ((rd & bank_mask) == 0) {
3595 /* scalar */
3596 veclen = 0;
3597 } else {
3598 if (dp)
69d1fc22 3599 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3600 else
69d1fc22 3601 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3602
3603 if ((rm & bank_mask) == 0) {
3604 /* mixed scalar/vector */
3605 delta_m = 0;
3606 } else {
3607 /* vector */
3608 delta_m = delta_d;
3609 }
3610 }
3611 }
3612
3613 /* Load the initial operands. */
3614 if (op == 15) {
3615 switch (rn) {
3616 case 16:
3617 case 17:
3618 /* Integer source */
3619 gen_mov_F0_vreg(0, rm);
3620 break;
3621 case 8:
3622 case 9:
3623 /* Compare */
3624 gen_mov_F0_vreg(dp, rd);
3625 gen_mov_F1_vreg(dp, rm);
3626 break;
3627 case 10:
3628 case 11:
3629 /* Compare with zero */
3630 gen_mov_F0_vreg(dp, rd);
3631 gen_vfp_F1_ld0(dp);
3632 break;
9ee6e8bb
PB
3633 case 20:
3634 case 21:
3635 case 22:
3636 case 23:
644ad806
PB
3637 case 28:
3638 case 29:
3639 case 30:
3640 case 31:
9ee6e8bb
PB
3641 /* Source and destination the same. */
3642 gen_mov_F0_vreg(dp, rd);
3643 break;
6e0c0ed1
PM
3644 case 4:
3645 case 5:
3646 case 6:
3647 case 7:
239c20c7
WN
3648 /* VCVTB, VCVTT: only present with the halfprec extension
3649 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3650 * (we choose to UNDEF)
6e0c0ed1 3651 */
d614a513
PM
3652 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3653 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3654 return 1;
3655 }
239c20c7
WN
3656 if (!extract32(rn, 1, 1)) {
3657 /* Half precision source. */
3658 gen_mov_F0_vreg(0, rm);
3659 break;
3660 }
6e0c0ed1 3661 /* Otherwise fall through */
b7bcbe95
FB
3662 default:
3663 /* One source operand. */
3664 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3665 break;
b7bcbe95
FB
3666 }
3667 } else {
3668 /* Two source operands. */
3669 gen_mov_F0_vreg(dp, rn);
3670 gen_mov_F1_vreg(dp, rm);
3671 }
3672
3673 for (;;) {
3674 /* Perform the calculation. */
3675 switch (op) {
605a6aed
PM
3676 case 0: /* VMLA: fd + (fn * fm) */
3677 /* Note that order of inputs to the add matters for NaNs */
3678 gen_vfp_F1_mul(dp);
3679 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3680 gen_vfp_add(dp);
3681 break;
605a6aed 3682 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3683 gen_vfp_mul(dp);
605a6aed
PM
3684 gen_vfp_F1_neg(dp);
3685 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3686 gen_vfp_add(dp);
3687 break;
605a6aed
PM
3688 case 2: /* VNMLS: -fd + (fn * fm) */
3689 /* Note that it isn't valid to replace (-A + B) with (B - A)
3690 * or similar plausible looking simplifications
3691 * because this will give wrong results for NaNs.
3692 */
3693 gen_vfp_F1_mul(dp);
3694 gen_mov_F0_vreg(dp, rd);
3695 gen_vfp_neg(dp);
3696 gen_vfp_add(dp);
b7bcbe95 3697 break;
605a6aed 3698 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3699 gen_vfp_mul(dp);
605a6aed
PM
3700 gen_vfp_F1_neg(dp);
3701 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3702 gen_vfp_neg(dp);
605a6aed 3703 gen_vfp_add(dp);
b7bcbe95
FB
3704 break;
3705 case 4: /* mul: fn * fm */
3706 gen_vfp_mul(dp);
3707 break;
3708 case 5: /* nmul: -(fn * fm) */
3709 gen_vfp_mul(dp);
3710 gen_vfp_neg(dp);
3711 break;
3712 case 6: /* add: fn + fm */
3713 gen_vfp_add(dp);
3714 break;
3715 case 7: /* sub: fn - fm */
3716 gen_vfp_sub(dp);
3717 break;
3718 case 8: /* div: fn / fm */
3719 gen_vfp_div(dp);
3720 break;
da97f52c
PM
3721 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3722 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3723 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3724 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3725 /* These are fused multiply-add, and must be done as one
3726 * floating point operation with no rounding between the
3727 * multiplication and addition steps.
3728 * NB that doing the negations here as separate steps is
3729 * correct : an input NaN should come out with its sign bit
3730 * flipped if it is a negated-input.
3731 */
d614a513 3732 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3733 return 1;
3734 }
3735 if (dp) {
3736 TCGv_ptr fpst;
3737 TCGv_i64 frd;
3738 if (op & 1) {
3739 /* VFNMS, VFMS */
3740 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3741 }
3742 frd = tcg_temp_new_i64();
3743 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3744 if (op & 2) {
3745 /* VFNMA, VFNMS */
3746 gen_helper_vfp_negd(frd, frd);
3747 }
3748 fpst = get_fpstatus_ptr(0);
3749 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3750 cpu_F1d, frd, fpst);
3751 tcg_temp_free_ptr(fpst);
3752 tcg_temp_free_i64(frd);
3753 } else {
3754 TCGv_ptr fpst;
3755 TCGv_i32 frd;
3756 if (op & 1) {
3757 /* VFNMS, VFMS */
3758 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3759 }
3760 frd = tcg_temp_new_i32();
3761 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3762 if (op & 2) {
3763 gen_helper_vfp_negs(frd, frd);
3764 }
3765 fpst = get_fpstatus_ptr(0);
3766 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3767 cpu_F1s, frd, fpst);
3768 tcg_temp_free_ptr(fpst);
3769 tcg_temp_free_i32(frd);
3770 }
3771 break;
9ee6e8bb 3772 case 14: /* fconst */
d614a513
PM
3773 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3774 return 1;
3775 }
9ee6e8bb
PB
3776
3777 n = (insn << 12) & 0x80000000;
3778 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3779 if (dp) {
3780 if (i & 0x40)
3781 i |= 0x3f80;
3782 else
3783 i |= 0x4000;
3784 n |= i << 16;
4373f3ce 3785 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3786 } else {
3787 if (i & 0x40)
3788 i |= 0x780;
3789 else
3790 i |= 0x800;
3791 n |= i << 19;
5b340b51 3792 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3793 }
9ee6e8bb 3794 break;
b7bcbe95
FB
3795 case 15: /* extension space */
3796 switch (rn) {
3797 case 0: /* cpy */
3798 /* no-op */
3799 break;
3800 case 1: /* abs */
3801 gen_vfp_abs(dp);
3802 break;
3803 case 2: /* neg */
3804 gen_vfp_neg(dp);
3805 break;
3806 case 3: /* sqrt */
3807 gen_vfp_sqrt(dp);
3808 break;
239c20c7 3809 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
60011498
PB
3810 tmp = gen_vfp_mrs();
3811 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3812 if (dp) {
3813 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3814 cpu_env);
3815 } else {
3816 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3817 cpu_env);
3818 }
7d1b0095 3819 tcg_temp_free_i32(tmp);
60011498 3820 break;
239c20c7 3821 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
60011498
PB
3822 tmp = gen_vfp_mrs();
3823 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3824 if (dp) {
3825 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3826 cpu_env);
3827 } else {
3828 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3829 cpu_env);
3830 }
7d1b0095 3831 tcg_temp_free_i32(tmp);
60011498 3832 break;
239c20c7 3833 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
7d1b0095 3834 tmp = tcg_temp_new_i32();
239c20c7
WN
3835 if (dp) {
3836 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3837 cpu_env);
3838 } else {
3839 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3840 cpu_env);
3841 }
60011498
PB
3842 gen_mov_F0_vreg(0, rd);
3843 tmp2 = gen_vfp_mrs();
3844 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3845 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3846 tcg_temp_free_i32(tmp2);
60011498
PB
3847 gen_vfp_msr(tmp);
3848 break;
239c20c7 3849 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
7d1b0095 3850 tmp = tcg_temp_new_i32();
239c20c7
WN
3851 if (dp) {
3852 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3853 cpu_env);
3854 } else {
3855 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3856 cpu_env);
3857 }
60011498
PB
3858 tcg_gen_shli_i32(tmp, tmp, 16);
3859 gen_mov_F0_vreg(0, rd);
3860 tmp2 = gen_vfp_mrs();
3861 tcg_gen_ext16u_i32(tmp2, tmp2);
3862 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3863 tcg_temp_free_i32(tmp2);
60011498
PB
3864 gen_vfp_msr(tmp);
3865 break;
b7bcbe95
FB
3866 case 8: /* cmp */
3867 gen_vfp_cmp(dp);
3868 break;
3869 case 9: /* cmpe */
3870 gen_vfp_cmpe(dp);
3871 break;
3872 case 10: /* cmpz */
3873 gen_vfp_cmp(dp);
3874 break;
3875 case 11: /* cmpez */
3876 gen_vfp_F1_ld0(dp);
3877 gen_vfp_cmpe(dp);
3878 break;
664c6733
WN
3879 case 12: /* vrintr */
3880 {
3881 TCGv_ptr fpst = get_fpstatus_ptr(0);
3882 if (dp) {
3883 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3884 } else {
3885 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3886 }
3887 tcg_temp_free_ptr(fpst);
3888 break;
3889 }
a290c62a
WN
3890 case 13: /* vrintz */
3891 {
3892 TCGv_ptr fpst = get_fpstatus_ptr(0);
3893 TCGv_i32 tcg_rmode;
3894 tcg_rmode = tcg_const_i32(float_round_to_zero);
3895 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3896 if (dp) {
3897 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3898 } else {
3899 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3900 }
3901 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3902 tcg_temp_free_i32(tcg_rmode);
3903 tcg_temp_free_ptr(fpst);
3904 break;
3905 }
4e82bc01
WN
3906 case 14: /* vrintx */
3907 {
3908 TCGv_ptr fpst = get_fpstatus_ptr(0);
3909 if (dp) {
3910 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3911 } else {
3912 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3913 }
3914 tcg_temp_free_ptr(fpst);
3915 break;
3916 }
b7bcbe95
FB
3917 case 15: /* single<->double conversion */
3918 if (dp)
4373f3ce 3919 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3920 else
4373f3ce 3921 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3922 break;
3923 case 16: /* fuito */
5500b06c 3924 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3925 break;
3926 case 17: /* fsito */
5500b06c 3927 gen_vfp_sito(dp, 0);
b7bcbe95 3928 break;
9ee6e8bb 3929 case 20: /* fshto */
d614a513
PM
3930 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3931 return 1;
3932 }
5500b06c 3933 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3934 break;
3935 case 21: /* fslto */
d614a513
PM
3936 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3937 return 1;
3938 }
5500b06c 3939 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3940 break;
3941 case 22: /* fuhto */
d614a513
PM
3942 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3943 return 1;
3944 }
5500b06c 3945 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3946 break;
3947 case 23: /* fulto */
d614a513
PM
3948 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3949 return 1;
3950 }
5500b06c 3951 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3952 break;
b7bcbe95 3953 case 24: /* ftoui */
5500b06c 3954 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3955 break;
3956 case 25: /* ftouiz */
5500b06c 3957 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3958 break;
3959 case 26: /* ftosi */
5500b06c 3960 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3961 break;
3962 case 27: /* ftosiz */
5500b06c 3963 gen_vfp_tosiz(dp, 0);
b7bcbe95 3964 break;
9ee6e8bb 3965 case 28: /* ftosh */
d614a513
PM
3966 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3967 return 1;
3968 }
5500b06c 3969 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3970 break;
3971 case 29: /* ftosl */
d614a513
PM
3972 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3973 return 1;
3974 }
5500b06c 3975 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3976 break;
3977 case 30: /* ftouh */
d614a513
PM
3978 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3979 return 1;
3980 }
5500b06c 3981 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3982 break;
3983 case 31: /* ftoul */
d614a513
PM
3984 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3985 return 1;
3986 }
5500b06c 3987 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3988 break;
b7bcbe95 3989 default: /* undefined */
b7bcbe95
FB
3990 return 1;
3991 }
3992 break;
3993 default: /* undefined */
b7bcbe95
FB
3994 return 1;
3995 }
3996
3997 /* Write back the result. */
239c20c7
WN
3998 if (op == 15 && (rn >= 8 && rn <= 11)) {
3999 /* Comparison, do nothing. */
4000 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
4001 (rn & 0x1e) == 0x6)) {
4002 /* VCVT double to int: always integer result.
4003 * VCVT double to half precision is always a single
4004 * precision result.
4005 */
b7bcbe95 4006 gen_mov_vreg_F0(0, rd);
239c20c7 4007 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
4008 /* conversion */
4009 gen_mov_vreg_F0(!dp, rd);
239c20c7 4010 } else {
b7bcbe95 4011 gen_mov_vreg_F0(dp, rd);
239c20c7 4012 }
b7bcbe95
FB
4013
4014 /* break out of the loop if we have finished */
4015 if (veclen == 0)
4016 break;
4017
4018 if (op == 15 && delta_m == 0) {
4019 /* single source one-many */
4020 while (veclen--) {
4021 rd = ((rd + delta_d) & (bank_mask - 1))
4022 | (rd & bank_mask);
4023 gen_mov_vreg_F0(dp, rd);
4024 }
4025 break;
4026 }
4027 /* Setup the next operands. */
4028 veclen--;
4029 rd = ((rd + delta_d) & (bank_mask - 1))
4030 | (rd & bank_mask);
4031
4032 if (op == 15) {
4033 /* One source operand. */
4034 rm = ((rm + delta_m) & (bank_mask - 1))
4035 | (rm & bank_mask);
4036 gen_mov_F0_vreg(dp, rm);
4037 } else {
4038 /* Two source operands. */
4039 rn = ((rn + delta_d) & (bank_mask - 1))
4040 | (rn & bank_mask);
4041 gen_mov_F0_vreg(dp, rn);
4042 if (delta_m) {
4043 rm = ((rm + delta_m) & (bank_mask - 1))
4044 | (rm & bank_mask);
4045 gen_mov_F1_vreg(dp, rm);
4046 }
4047 }
4048 }
4049 }
4050 break;
4051 case 0xc:
4052 case 0xd:
8387da81 4053 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
4054 /* two-register transfer */
4055 rn = (insn >> 16) & 0xf;
4056 rd = (insn >> 12) & 0xf;
4057 if (dp) {
9ee6e8bb
PB
4058 VFP_DREG_M(rm, insn);
4059 } else {
4060 rm = VFP_SREG_M(insn);
4061 }
b7bcbe95 4062
18c9b560 4063 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
4064 /* vfp->arm */
4065 if (dp) {
4373f3ce
PB
4066 gen_mov_F0_vreg(0, rm * 2);
4067 tmp = gen_vfp_mrs();
4068 store_reg(s, rd, tmp);
4069 gen_mov_F0_vreg(0, rm * 2 + 1);
4070 tmp = gen_vfp_mrs();
4071 store_reg(s, rn, tmp);
b7bcbe95
FB
4072 } else {
4073 gen_mov_F0_vreg(0, rm);
4373f3ce 4074 tmp = gen_vfp_mrs();
8387da81 4075 store_reg(s, rd, tmp);
b7bcbe95 4076 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 4077 tmp = gen_vfp_mrs();
8387da81 4078 store_reg(s, rn, tmp);
b7bcbe95
FB
4079 }
4080 } else {
4081 /* arm->vfp */
4082 if (dp) {
4373f3ce
PB
4083 tmp = load_reg(s, rd);
4084 gen_vfp_msr(tmp);
4085 gen_mov_vreg_F0(0, rm * 2);
4086 tmp = load_reg(s, rn);
4087 gen_vfp_msr(tmp);
4088 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 4089 } else {
8387da81 4090 tmp = load_reg(s, rd);
4373f3ce 4091 gen_vfp_msr(tmp);
b7bcbe95 4092 gen_mov_vreg_F0(0, rm);
8387da81 4093 tmp = load_reg(s, rn);
4373f3ce 4094 gen_vfp_msr(tmp);
b7bcbe95
FB
4095 gen_mov_vreg_F0(0, rm + 1);
4096 }
4097 }
4098 } else {
4099 /* Load/store */
4100 rn = (insn >> 16) & 0xf;
4101 if (dp)
9ee6e8bb 4102 VFP_DREG_D(rd, insn);
b7bcbe95 4103 else
9ee6e8bb 4104 rd = VFP_SREG_D(insn);
b7bcbe95
FB
4105 if ((insn & 0x01200000) == 0x01000000) {
4106 /* Single load/store */
4107 offset = (insn & 0xff) << 2;
4108 if ((insn & (1 << 23)) == 0)
4109 offset = -offset;
934814f1
PM
4110 if (s->thumb && rn == 15) {
4111 /* This is actually UNPREDICTABLE */
4112 addr = tcg_temp_new_i32();
4113 tcg_gen_movi_i32(addr, s->pc & ~2);
4114 } else {
4115 addr = load_reg(s, rn);
4116 }
312eea9f 4117 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4118 if (insn & (1 << 20)) {
312eea9f 4119 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4120 gen_mov_vreg_F0(dp, rd);
4121 } else {
4122 gen_mov_F0_vreg(dp, rd);
312eea9f 4123 gen_vfp_st(s, dp, addr);
b7bcbe95 4124 }
7d1b0095 4125 tcg_temp_free_i32(addr);
b7bcbe95
FB
4126 } else {
4127 /* load/store multiple */
934814f1 4128 int w = insn & (1 << 21);
b7bcbe95
FB
4129 if (dp)
4130 n = (insn >> 1) & 0x7f;
4131 else
4132 n = insn & 0xff;
4133
934814f1
PM
4134 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
4135 /* P == U , W == 1 => UNDEF */
4136 return 1;
4137 }
4138 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
4139 /* UNPREDICTABLE cases for bad immediates: we choose to
4140 * UNDEF to avoid generating huge numbers of TCG ops
4141 */
4142 return 1;
4143 }
4144 if (rn == 15 && w) {
4145 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4146 return 1;
4147 }
4148
4149 if (s->thumb && rn == 15) {
4150 /* This is actually UNPREDICTABLE */
4151 addr = tcg_temp_new_i32();
4152 tcg_gen_movi_i32(addr, s->pc & ~2);
4153 } else {
4154 addr = load_reg(s, rn);
4155 }
b7bcbe95 4156 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 4157 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
4158
4159 if (dp)
4160 offset = 8;
4161 else
4162 offset = 4;
4163 for (i = 0; i < n; i++) {
18c9b560 4164 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 4165 /* load */
312eea9f 4166 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4167 gen_mov_vreg_F0(dp, rd + i);
4168 } else {
4169 /* store */
4170 gen_mov_F0_vreg(dp, rd + i);
312eea9f 4171 gen_vfp_st(s, dp, addr);
b7bcbe95 4172 }
312eea9f 4173 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4174 }
934814f1 4175 if (w) {
b7bcbe95
FB
4176 /* writeback */
4177 if (insn & (1 << 24))
4178 offset = -offset * n;
4179 else if (dp && (insn & 1))
4180 offset = 4;
4181 else
4182 offset = 0;
4183
4184 if (offset != 0)
312eea9f
FN
4185 tcg_gen_addi_i32(addr, addr, offset);
4186 store_reg(s, rn, addr);
4187 } else {
7d1b0095 4188 tcg_temp_free_i32(addr);
b7bcbe95
FB
4189 }
4190 }
4191 }
4192 break;
4193 default:
4194 /* Should never happen. */
4195 return 1;
4196 }
4197 return 0;
4198}
4199
90aa39a1 4200static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 4201{
90aa39a1 4202#ifndef CONFIG_USER_ONLY
dcba3a8d 4203 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
90aa39a1
SF
4204 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4205#else
4206 return true;
4207#endif
4208}
6e256c93 4209
8a6b28c7
EC
4210static void gen_goto_ptr(void)
4211{
7f11636d 4212 tcg_gen_lookup_and_goto_ptr();
8a6b28c7
EC
4213}
4214
4cae8f56
AB
4215/* This will end the TB but doesn't guarantee we'll return to
4216 * cpu_loop_exec. Any live exit_requests will be processed as we
4217 * enter the next TB.
4218 */
8a6b28c7 4219static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
90aa39a1
SF
4220{
4221 if (use_goto_tb(s, dest)) {
57fec1fe 4222 tcg_gen_goto_tb(n);
eaed129d 4223 gen_set_pc_im(s, dest);
dcba3a8d 4224 tcg_gen_exit_tb((uintptr_t)s->base.tb + n);
6e256c93 4225 } else {
eaed129d 4226 gen_set_pc_im(s, dest);
8a6b28c7 4227 gen_goto_ptr();
6e256c93 4228 }
dcba3a8d 4229 s->base.is_jmp = DISAS_NORETURN;
c53be334
FB
4230}
4231
8aaca4c0
FB
4232static inline void gen_jmp (DisasContext *s, uint32_t dest)
4233{
b636649f 4234 if (unlikely(is_singlestepping(s))) {
8aaca4c0 4235 /* An indirect jump so that we still trigger the debug exception. */
5899f386 4236 if (s->thumb)
d9ba4830
PB
4237 dest |= 1;
4238 gen_bx_im(s, dest);
8aaca4c0 4239 } else {
6e256c93 4240 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
4241 }
4242}
4243
39d5492a 4244static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 4245{
ee097184 4246 if (x)
d9ba4830 4247 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 4248 else
d9ba4830 4249 gen_sxth(t0);
ee097184 4250 if (y)
d9ba4830 4251 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 4252 else
d9ba4830
PB
4253 gen_sxth(t1);
4254 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
4255}
4256
4257/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
4258static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4259{
b5ff1b31
FB
4260 uint32_t mask;
4261
4262 mask = 0;
4263 if (flags & (1 << 0))
4264 mask |= 0xff;
4265 if (flags & (1 << 1))
4266 mask |= 0xff00;
4267 if (flags & (1 << 2))
4268 mask |= 0xff0000;
4269 if (flags & (1 << 3))
4270 mask |= 0xff000000;
9ee6e8bb 4271
2ae23e75 4272 /* Mask out undefined bits. */
9ee6e8bb 4273 mask &= ~CPSR_RESERVED;
d614a513 4274 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4275 mask &= ~CPSR_T;
d614a513
PM
4276 }
4277 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4278 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4279 }
4280 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4281 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4282 }
4283 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4284 mask &= ~CPSR_IT;
d614a513 4285 }
4051e12c
PM
4286 /* Mask out execution state and reserved bits. */
4287 if (!spsr) {
4288 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4289 }
b5ff1b31
FB
4290 /* Mask out privileged bits. */
4291 if (IS_USER(s))
9ee6e8bb 4292 mask &= CPSR_USER;
b5ff1b31
FB
4293 return mask;
4294}
4295
2fbac54b 4296/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4297static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4298{
39d5492a 4299 TCGv_i32 tmp;
b5ff1b31
FB
4300 if (spsr) {
4301 /* ??? This is also undefined in system mode. */
4302 if (IS_USER(s))
4303 return 1;
d9ba4830
PB
4304
4305 tmp = load_cpu_field(spsr);
4306 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4307 tcg_gen_andi_i32(t0, t0, mask);
4308 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4309 store_cpu_field(tmp, spsr);
b5ff1b31 4310 } else {
2fbac54b 4311 gen_set_cpsr(t0, mask);
b5ff1b31 4312 }
7d1b0095 4313 tcg_temp_free_i32(t0);
b5ff1b31
FB
4314 gen_lookup_tb(s);
4315 return 0;
4316}
4317
2fbac54b
FN
4318/* Returns nonzero if access to the PSR is not permitted. */
4319static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4320{
39d5492a 4321 TCGv_i32 tmp;
7d1b0095 4322 tmp = tcg_temp_new_i32();
2fbac54b
FN
4323 tcg_gen_movi_i32(tmp, val);
4324 return gen_set_psr(s, mask, spsr, tmp);
4325}
4326
8bfd0550
PM
4327static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4328 int *tgtmode, int *regno)
4329{
4330 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4331 * the target mode and register number, and identify the various
4332 * unpredictable cases.
4333 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4334 * + executed in user mode
4335 * + using R15 as the src/dest register
4336 * + accessing an unimplemented register
4337 * + accessing a register that's inaccessible at current PL/security state*
4338 * + accessing a register that you could access with a different insn
4339 * We choose to UNDEF in all these cases.
4340 * Since we don't know which of the various AArch32 modes we are in
4341 * we have to defer some checks to runtime.
4342 * Accesses to Monitor mode registers from Secure EL1 (which implies
4343 * that EL3 is AArch64) must trap to EL3.
4344 *
4345 * If the access checks fail this function will emit code to take
4346 * an exception and return false. Otherwise it will return true,
4347 * and set *tgtmode and *regno appropriately.
4348 */
4349 int exc_target = default_exception_el(s);
4350
4351 /* These instructions are present only in ARMv8, or in ARMv7 with the
4352 * Virtualization Extensions.
4353 */
4354 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4355 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4356 goto undef;
4357 }
4358
4359 if (IS_USER(s) || rn == 15) {
4360 goto undef;
4361 }
4362
4363 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4364 * of registers into (r, sysm).
4365 */
4366 if (r) {
4367 /* SPSRs for other modes */
4368 switch (sysm) {
4369 case 0xe: /* SPSR_fiq */
4370 *tgtmode = ARM_CPU_MODE_FIQ;
4371 break;
4372 case 0x10: /* SPSR_irq */
4373 *tgtmode = ARM_CPU_MODE_IRQ;
4374 break;
4375 case 0x12: /* SPSR_svc */
4376 *tgtmode = ARM_CPU_MODE_SVC;
4377 break;
4378 case 0x14: /* SPSR_abt */
4379 *tgtmode = ARM_CPU_MODE_ABT;
4380 break;
4381 case 0x16: /* SPSR_und */
4382 *tgtmode = ARM_CPU_MODE_UND;
4383 break;
4384 case 0x1c: /* SPSR_mon */
4385 *tgtmode = ARM_CPU_MODE_MON;
4386 break;
4387 case 0x1e: /* SPSR_hyp */
4388 *tgtmode = ARM_CPU_MODE_HYP;
4389 break;
4390 default: /* unallocated */
4391 goto undef;
4392 }
4393 /* We arbitrarily assign SPSR a register number of 16. */
4394 *regno = 16;
4395 } else {
4396 /* general purpose registers for other modes */
4397 switch (sysm) {
4398 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4399 *tgtmode = ARM_CPU_MODE_USR;
4400 *regno = sysm + 8;
4401 break;
4402 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4403 *tgtmode = ARM_CPU_MODE_FIQ;
4404 *regno = sysm;
4405 break;
4406 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4407 *tgtmode = ARM_CPU_MODE_IRQ;
4408 *regno = sysm & 1 ? 13 : 14;
4409 break;
4410 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4411 *tgtmode = ARM_CPU_MODE_SVC;
4412 *regno = sysm & 1 ? 13 : 14;
4413 break;
4414 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4415 *tgtmode = ARM_CPU_MODE_ABT;
4416 *regno = sysm & 1 ? 13 : 14;
4417 break;
4418 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4419 *tgtmode = ARM_CPU_MODE_UND;
4420 *regno = sysm & 1 ? 13 : 14;
4421 break;
4422 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4423 *tgtmode = ARM_CPU_MODE_MON;
4424 *regno = sysm & 1 ? 13 : 14;
4425 break;
4426 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4427 *tgtmode = ARM_CPU_MODE_HYP;
4428 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4429 *regno = sysm & 1 ? 13 : 17;
4430 break;
4431 default: /* unallocated */
4432 goto undef;
4433 }
4434 }
4435
4436 /* Catch the 'accessing inaccessible register' cases we can detect
4437 * at translate time.
4438 */
4439 switch (*tgtmode) {
4440 case ARM_CPU_MODE_MON:
4441 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4442 goto undef;
4443 }
4444 if (s->current_el == 1) {
4445 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4446 * then accesses to Mon registers trap to EL3
4447 */
4448 exc_target = 3;
4449 goto undef;
4450 }
4451 break;
4452 case ARM_CPU_MODE_HYP:
4453 /* Note that we can forbid accesses from EL2 here because they
4454 * must be from Hyp mode itself
4455 */
4456 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 3) {
4457 goto undef;
4458 }
4459 break;
4460 default:
4461 break;
4462 }
4463
4464 return true;
4465
4466undef:
4467 /* If we get here then some access check did not pass */
4468 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4469 return false;
4470}
4471
4472static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4473{
4474 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4475 int tgtmode = 0, regno = 0;
4476
4477 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4478 return;
4479 }
4480
4481 /* Sync state because msr_banked() can raise exceptions */
4482 gen_set_condexec(s);
4483 gen_set_pc_im(s, s->pc - 4);
4484 tcg_reg = load_reg(s, rn);
4485 tcg_tgtmode = tcg_const_i32(tgtmode);
4486 tcg_regno = tcg_const_i32(regno);
4487 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4488 tcg_temp_free_i32(tcg_tgtmode);
4489 tcg_temp_free_i32(tcg_regno);
4490 tcg_temp_free_i32(tcg_reg);
dcba3a8d 4491 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4492}
4493
4494static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4495{
4496 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4497 int tgtmode = 0, regno = 0;
4498
4499 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4500 return;
4501 }
4502
4503 /* Sync state because mrs_banked() can raise exceptions */
4504 gen_set_condexec(s);
4505 gen_set_pc_im(s, s->pc - 4);
4506 tcg_reg = tcg_temp_new_i32();
4507 tcg_tgtmode = tcg_const_i32(tgtmode);
4508 tcg_regno = tcg_const_i32(regno);
4509 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4510 tcg_temp_free_i32(tcg_tgtmode);
4511 tcg_temp_free_i32(tcg_regno);
4512 store_reg(s, rn, tcg_reg);
dcba3a8d 4513 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4514}
4515
fb0e8e79
PM
4516/* Store value to PC as for an exception return (ie don't
4517 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4518 * will do the masking based on the new value of the Thumb bit.
4519 */
4520static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4521{
fb0e8e79
PM
4522 tcg_gen_mov_i32(cpu_R[15], pc);
4523 tcg_temp_free_i32(pc);
b5ff1b31
FB
4524}
4525
b0109805 4526/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4527static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4528{
fb0e8e79
PM
4529 store_pc_exc_ret(s, pc);
4530 /* The cpsr_write_eret helper will mask the low bits of PC
4531 * appropriately depending on the new Thumb bit, so it must
4532 * be called after storing the new PC.
4533 */
235ea1f5 4534 gen_helper_cpsr_write_eret(cpu_env, cpsr);
7d1b0095 4535 tcg_temp_free_i32(cpsr);
b29fd33d 4536 /* Must exit loop to check un-masked IRQs */
dcba3a8d 4537 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb 4538}
3b46e624 4539
fb0e8e79
PM
4540/* Generate an old-style exception return. Marks pc as dead. */
4541static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4542{
4543 gen_rfe(s, pc, load_cpu_field(spsr));
4544}
4545
c22edfeb
AB
4546/*
4547 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4548 * only call the helper when running single threaded TCG code to ensure
4549 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4550 * just skip this instruction. Currently the SEV/SEVL instructions
4551 * which are *one* of many ways to wake the CPU from WFE are not
4552 * implemented so we can't sleep like WFI does.
4553 */
9ee6e8bb
PB
4554static void gen_nop_hint(DisasContext *s, int val)
4555{
4556 switch (val) {
2399d4e7
EC
4557 /* When running in MTTCG we don't generate jumps to the yield and
4558 * WFE helpers as it won't affect the scheduling of other vCPUs.
4559 * If we wanted to more completely model WFE/SEV so we don't busy
4560 * spin unnecessarily we would need to do something more involved.
4561 */
c87e5a61 4562 case 1: /* yield */
2399d4e7 4563 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4564 gen_set_pc_im(s, s->pc);
dcba3a8d 4565 s->base.is_jmp = DISAS_YIELD;
c22edfeb 4566 }
c87e5a61 4567 break;
9ee6e8bb 4568 case 3: /* wfi */
eaed129d 4569 gen_set_pc_im(s, s->pc);
dcba3a8d 4570 s->base.is_jmp = DISAS_WFI;
9ee6e8bb
PB
4571 break;
4572 case 2: /* wfe */
2399d4e7 4573 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4574 gen_set_pc_im(s, s->pc);
dcba3a8d 4575 s->base.is_jmp = DISAS_WFE;
c22edfeb 4576 }
72c1d3af 4577 break;
9ee6e8bb 4578 case 4: /* sev */
12b10571
MR
4579 case 5: /* sevl */
4580 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4581 default: /* nop */
4582 break;
4583 }
4584}
99c475ab 4585
ad69471c 4586#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4587
39d5492a 4588static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4589{
4590 switch (size) {
dd8fbd78
FN
4591 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4592 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4593 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4594 default: abort();
9ee6e8bb 4595 }
9ee6e8bb
PB
4596}
4597
39d5492a 4598static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4599{
4600 switch (size) {
dd8fbd78
FN
4601 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4602 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4603 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4604 default: return;
4605 }
4606}
4607
4608/* 32-bit pairwise ops end up the same as the elementwise versions. */
4609#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4610#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4611#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4612#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4613
ad69471c
PB
4614#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4615 switch ((size << 1) | u) { \
4616 case 0: \
dd8fbd78 4617 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4618 break; \
4619 case 1: \
dd8fbd78 4620 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4621 break; \
4622 case 2: \
dd8fbd78 4623 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4624 break; \
4625 case 3: \
dd8fbd78 4626 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4627 break; \
4628 case 4: \
dd8fbd78 4629 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4630 break; \
4631 case 5: \
dd8fbd78 4632 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4633 break; \
4634 default: return 1; \
4635 }} while (0)
9ee6e8bb
PB
4636
4637#define GEN_NEON_INTEGER_OP(name) do { \
4638 switch ((size << 1) | u) { \
ad69471c 4639 case 0: \
dd8fbd78 4640 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4641 break; \
4642 case 1: \
dd8fbd78 4643 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4644 break; \
4645 case 2: \
dd8fbd78 4646 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4647 break; \
4648 case 3: \
dd8fbd78 4649 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4650 break; \
4651 case 4: \
dd8fbd78 4652 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4653 break; \
4654 case 5: \
dd8fbd78 4655 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4656 break; \
9ee6e8bb
PB
4657 default: return 1; \
4658 }} while (0)
4659
39d5492a 4660static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4661{
39d5492a 4662 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4663 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4664 return tmp;
9ee6e8bb
PB
4665}
4666
39d5492a 4667static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4668{
dd8fbd78 4669 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4670 tcg_temp_free_i32(var);
9ee6e8bb
PB
4671}
4672
39d5492a 4673static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4674{
39d5492a 4675 TCGv_i32 tmp;
9ee6e8bb 4676 if (size == 1) {
0fad6efc
PM
4677 tmp = neon_load_reg(reg & 7, reg >> 4);
4678 if (reg & 8) {
dd8fbd78 4679 gen_neon_dup_high16(tmp);
0fad6efc
PM
4680 } else {
4681 gen_neon_dup_low16(tmp);
dd8fbd78 4682 }
0fad6efc
PM
4683 } else {
4684 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4685 }
dd8fbd78 4686 return tmp;
9ee6e8bb
PB
4687}
4688
02acedf9 4689static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4690{
b13708bb
RH
4691 TCGv_ptr pd, pm;
4692
600b828c 4693 if (!q && size == 2) {
02acedf9
PM
4694 return 1;
4695 }
b13708bb
RH
4696 pd = vfp_reg_ptr(true, rd);
4697 pm = vfp_reg_ptr(true, rm);
02acedf9
PM
4698 if (q) {
4699 switch (size) {
4700 case 0:
b13708bb 4701 gen_helper_neon_qunzip8(pd, pm);
02acedf9
PM
4702 break;
4703 case 1:
b13708bb 4704 gen_helper_neon_qunzip16(pd, pm);
02acedf9
PM
4705 break;
4706 case 2:
b13708bb 4707 gen_helper_neon_qunzip32(pd, pm);
02acedf9
PM
4708 break;
4709 default:
4710 abort();
4711 }
4712 } else {
4713 switch (size) {
4714 case 0:
b13708bb 4715 gen_helper_neon_unzip8(pd, pm);
02acedf9
PM
4716 break;
4717 case 1:
b13708bb 4718 gen_helper_neon_unzip16(pd, pm);
02acedf9
PM
4719 break;
4720 default:
4721 abort();
4722 }
4723 }
b13708bb
RH
4724 tcg_temp_free_ptr(pd);
4725 tcg_temp_free_ptr(pm);
02acedf9 4726 return 0;
19457615
FN
4727}
4728
d68a6f3a 4729static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4730{
b13708bb
RH
4731 TCGv_ptr pd, pm;
4732
600b828c 4733 if (!q && size == 2) {
d68a6f3a
PM
4734 return 1;
4735 }
b13708bb
RH
4736 pd = vfp_reg_ptr(true, rd);
4737 pm = vfp_reg_ptr(true, rm);
d68a6f3a
PM
4738 if (q) {
4739 switch (size) {
4740 case 0:
b13708bb 4741 gen_helper_neon_qzip8(pd, pm);
d68a6f3a
PM
4742 break;
4743 case 1:
b13708bb 4744 gen_helper_neon_qzip16(pd, pm);
d68a6f3a
PM
4745 break;
4746 case 2:
b13708bb 4747 gen_helper_neon_qzip32(pd, pm);
d68a6f3a
PM
4748 break;
4749 default:
4750 abort();
4751 }
4752 } else {
4753 switch (size) {
4754 case 0:
b13708bb 4755 gen_helper_neon_zip8(pd, pm);
d68a6f3a
PM
4756 break;
4757 case 1:
b13708bb 4758 gen_helper_neon_zip16(pd, pm);
d68a6f3a
PM
4759 break;
4760 default:
4761 abort();
4762 }
4763 }
b13708bb
RH
4764 tcg_temp_free_ptr(pd);
4765 tcg_temp_free_ptr(pm);
d68a6f3a 4766 return 0;
19457615
FN
4767}
4768
39d5492a 4769static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4770{
39d5492a 4771 TCGv_i32 rd, tmp;
19457615 4772
7d1b0095
PM
4773 rd = tcg_temp_new_i32();
4774 tmp = tcg_temp_new_i32();
19457615
FN
4775
4776 tcg_gen_shli_i32(rd, t0, 8);
4777 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4778 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4779 tcg_gen_or_i32(rd, rd, tmp);
4780
4781 tcg_gen_shri_i32(t1, t1, 8);
4782 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4783 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4784 tcg_gen_or_i32(t1, t1, tmp);
4785 tcg_gen_mov_i32(t0, rd);
4786
7d1b0095
PM
4787 tcg_temp_free_i32(tmp);
4788 tcg_temp_free_i32(rd);
19457615
FN
4789}
4790
39d5492a 4791static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4792{
39d5492a 4793 TCGv_i32 rd, tmp;
19457615 4794
7d1b0095
PM
4795 rd = tcg_temp_new_i32();
4796 tmp = tcg_temp_new_i32();
19457615
FN
4797
4798 tcg_gen_shli_i32(rd, t0, 16);
4799 tcg_gen_andi_i32(tmp, t1, 0xffff);
4800 tcg_gen_or_i32(rd, rd, tmp);
4801 tcg_gen_shri_i32(t1, t1, 16);
4802 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4803 tcg_gen_or_i32(t1, t1, tmp);
4804 tcg_gen_mov_i32(t0, rd);
4805
7d1b0095
PM
4806 tcg_temp_free_i32(tmp);
4807 tcg_temp_free_i32(rd);
19457615
FN
4808}
4809
4810
9ee6e8bb
PB
4811static struct {
4812 int nregs;
4813 int interleave;
4814 int spacing;
4815} neon_ls_element_type[11] = {
4816 {4, 4, 1},
4817 {4, 4, 2},
4818 {4, 1, 1},
4819 {4, 2, 1},
4820 {3, 3, 1},
4821 {3, 3, 2},
4822 {3, 1, 1},
4823 {1, 1, 1},
4824 {2, 2, 1},
4825 {2, 2, 2},
4826 {2, 1, 1}
4827};
4828
4829/* Translate a NEON load/store element instruction. Return nonzero if the
4830 instruction is invalid. */
7dcc1f89 4831static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4832{
4833 int rd, rn, rm;
4834 int op;
4835 int nregs;
4836 int interleave;
84496233 4837 int spacing;
9ee6e8bb
PB
4838 int stride;
4839 int size;
4840 int reg;
4841 int pass;
4842 int load;
4843 int shift;
9ee6e8bb 4844 int n;
39d5492a
PM
4845 TCGv_i32 addr;
4846 TCGv_i32 tmp;
4847 TCGv_i32 tmp2;
84496233 4848 TCGv_i64 tmp64;
9ee6e8bb 4849
2c7ffc41
PM
4850 /* FIXME: this access check should not take precedence over UNDEF
4851 * for invalid encodings; we will generate incorrect syndrome information
4852 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4853 */
9dbbc748 4854 if (s->fp_excp_el) {
2c7ffc41 4855 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 4856 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
4857 return 0;
4858 }
4859
5df8bac1 4860 if (!s->vfp_enabled)
9ee6e8bb
PB
4861 return 1;
4862 VFP_DREG_D(rd, insn);
4863 rn = (insn >> 16) & 0xf;
4864 rm = insn & 0xf;
4865 load = (insn & (1 << 21)) != 0;
4866 if ((insn & (1 << 23)) == 0) {
4867 /* Load store all elements. */
4868 op = (insn >> 8) & 0xf;
4869 size = (insn >> 6) & 3;
84496233 4870 if (op > 10)
9ee6e8bb 4871 return 1;
f2dd89d0
PM
4872 /* Catch UNDEF cases for bad values of align field */
4873 switch (op & 0xc) {
4874 case 4:
4875 if (((insn >> 5) & 1) == 1) {
4876 return 1;
4877 }
4878 break;
4879 case 8:
4880 if (((insn >> 4) & 3) == 3) {
4881 return 1;
4882 }
4883 break;
4884 default:
4885 break;
4886 }
9ee6e8bb
PB
4887 nregs = neon_ls_element_type[op].nregs;
4888 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4889 spacing = neon_ls_element_type[op].spacing;
4890 if (size == 3 && (interleave | spacing) != 1)
4891 return 1;
e318a60b 4892 addr = tcg_temp_new_i32();
dcc65026 4893 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4894 stride = (1 << size) * interleave;
4895 for (reg = 0; reg < nregs; reg++) {
4896 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4897 load_reg_var(s, addr, rn);
4898 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4899 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4900 load_reg_var(s, addr, rn);
4901 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4902 }
84496233 4903 if (size == 3) {
8ed1237d 4904 tmp64 = tcg_temp_new_i64();
84496233 4905 if (load) {
12dcc321 4906 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
84496233 4907 neon_store_reg64(tmp64, rd);
84496233 4908 } else {
84496233 4909 neon_load_reg64(tmp64, rd);
12dcc321 4910 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
84496233 4911 }
8ed1237d 4912 tcg_temp_free_i64(tmp64);
84496233
JR
4913 tcg_gen_addi_i32(addr, addr, stride);
4914 } else {
4915 for (pass = 0; pass < 2; pass++) {
4916 if (size == 2) {
4917 if (load) {
58ab8e96 4918 tmp = tcg_temp_new_i32();
12dcc321 4919 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
84496233
JR
4920 neon_store_reg(rd, pass, tmp);
4921 } else {
4922 tmp = neon_load_reg(rd, pass);
12dcc321 4923 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
58ab8e96 4924 tcg_temp_free_i32(tmp);
84496233 4925 }
1b2b1e54 4926 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4927 } else if (size == 1) {
4928 if (load) {
58ab8e96 4929 tmp = tcg_temp_new_i32();
12dcc321 4930 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
84496233 4931 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4932 tmp2 = tcg_temp_new_i32();
12dcc321 4933 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
84496233 4934 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4935 tcg_gen_shli_i32(tmp2, tmp2, 16);
4936 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4937 tcg_temp_free_i32(tmp2);
84496233
JR
4938 neon_store_reg(rd, pass, tmp);
4939 } else {
4940 tmp = neon_load_reg(rd, pass);
7d1b0095 4941 tmp2 = tcg_temp_new_i32();
84496233 4942 tcg_gen_shri_i32(tmp2, tmp, 16);
12dcc321 4943 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
58ab8e96 4944 tcg_temp_free_i32(tmp);
84496233 4945 tcg_gen_addi_i32(addr, addr, stride);
12dcc321 4946 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
58ab8e96 4947 tcg_temp_free_i32(tmp2);
1b2b1e54 4948 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4949 }
84496233
JR
4950 } else /* size == 0 */ {
4951 if (load) {
f764718d 4952 tmp2 = NULL;
84496233 4953 for (n = 0; n < 4; n++) {
58ab8e96 4954 tmp = tcg_temp_new_i32();
12dcc321 4955 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
84496233
JR
4956 tcg_gen_addi_i32(addr, addr, stride);
4957 if (n == 0) {
4958 tmp2 = tmp;
4959 } else {
41ba8341
PB
4960 tcg_gen_shli_i32(tmp, tmp, n * 8);
4961 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4962 tcg_temp_free_i32(tmp);
84496233 4963 }
9ee6e8bb 4964 }
84496233
JR
4965 neon_store_reg(rd, pass, tmp2);
4966 } else {
4967 tmp2 = neon_load_reg(rd, pass);
4968 for (n = 0; n < 4; n++) {
7d1b0095 4969 tmp = tcg_temp_new_i32();
84496233
JR
4970 if (n == 0) {
4971 tcg_gen_mov_i32(tmp, tmp2);
4972 } else {
4973 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4974 }
12dcc321 4975 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
58ab8e96 4976 tcg_temp_free_i32(tmp);
84496233
JR
4977 tcg_gen_addi_i32(addr, addr, stride);
4978 }
7d1b0095 4979 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4980 }
4981 }
4982 }
4983 }
84496233 4984 rd += spacing;
9ee6e8bb 4985 }
e318a60b 4986 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4987 stride = nregs * 8;
4988 } else {
4989 size = (insn >> 10) & 3;
4990 if (size == 3) {
4991 /* Load single element to all lanes. */
8e18cde3
PM
4992 int a = (insn >> 4) & 1;
4993 if (!load) {
9ee6e8bb 4994 return 1;
8e18cde3 4995 }
9ee6e8bb
PB
4996 size = (insn >> 6) & 3;
4997 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4998
4999 if (size == 3) {
5000 if (nregs != 4 || a == 0) {
9ee6e8bb 5001 return 1;
99c475ab 5002 }
8e18cde3
PM
5003 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
5004 size = 2;
5005 }
5006 if (nregs == 1 && a == 1 && size == 0) {
5007 return 1;
5008 }
5009 if (nregs == 3 && a == 1) {
5010 return 1;
5011 }
e318a60b 5012 addr = tcg_temp_new_i32();
8e18cde3
PM
5013 load_reg_var(s, addr, rn);
5014 if (nregs == 1) {
5015 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
5016 tmp = gen_load_and_replicate(s, addr, size);
5017 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
5018 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
5019 if (insn & (1 << 5)) {
5020 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
5021 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
5022 }
5023 tcg_temp_free_i32(tmp);
5024 } else {
5025 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
5026 stride = (insn & (1 << 5)) ? 2 : 1;
5027 for (reg = 0; reg < nregs; reg++) {
5028 tmp = gen_load_and_replicate(s, addr, size);
5029 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
5030 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
5031 tcg_temp_free_i32(tmp);
5032 tcg_gen_addi_i32(addr, addr, 1 << size);
5033 rd += stride;
5034 }
9ee6e8bb 5035 }
e318a60b 5036 tcg_temp_free_i32(addr);
9ee6e8bb
PB
5037 stride = (1 << size) * nregs;
5038 } else {
5039 /* Single element. */
93262b16 5040 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
5041 pass = (insn >> 7) & 1;
5042 switch (size) {
5043 case 0:
5044 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
5045 stride = 1;
5046 break;
5047 case 1:
5048 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
5049 stride = (insn & (1 << 5)) ? 2 : 1;
5050 break;
5051 case 2:
5052 shift = 0;
9ee6e8bb
PB
5053 stride = (insn & (1 << 6)) ? 2 : 1;
5054 break;
5055 default:
5056 abort();
5057 }
5058 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
5059 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
5060 switch (nregs) {
5061 case 1:
5062 if (((idx & (1 << size)) != 0) ||
5063 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
5064 return 1;
5065 }
5066 break;
5067 case 3:
5068 if ((idx & 1) != 0) {
5069 return 1;
5070 }
5071 /* fall through */
5072 case 2:
5073 if (size == 2 && (idx & 2) != 0) {
5074 return 1;
5075 }
5076 break;
5077 case 4:
5078 if ((size == 2) && ((idx & 3) == 3)) {
5079 return 1;
5080 }
5081 break;
5082 default:
5083 abort();
5084 }
5085 if ((rd + stride * (nregs - 1)) > 31) {
5086 /* Attempts to write off the end of the register file
5087 * are UNPREDICTABLE; we choose to UNDEF because otherwise
5088 * the neon_load_reg() would write off the end of the array.
5089 */
5090 return 1;
5091 }
e318a60b 5092 addr = tcg_temp_new_i32();
dcc65026 5093 load_reg_var(s, addr, rn);
9ee6e8bb
PB
5094 for (reg = 0; reg < nregs; reg++) {
5095 if (load) {
58ab8e96 5096 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
5097 switch (size) {
5098 case 0:
12dcc321 5099 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5100 break;
5101 case 1:
12dcc321 5102 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5103 break;
5104 case 2:
12dcc321 5105 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5106 break;
a50f5b91
PB
5107 default: /* Avoid compiler warnings. */
5108 abort();
9ee6e8bb
PB
5109 }
5110 if (size != 2) {
8f8e3aa4 5111 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
5112 tcg_gen_deposit_i32(tmp, tmp2, tmp,
5113 shift, size ? 16 : 8);
7d1b0095 5114 tcg_temp_free_i32(tmp2);
9ee6e8bb 5115 }
8f8e3aa4 5116 neon_store_reg(rd, pass, tmp);
9ee6e8bb 5117 } else { /* Store */
8f8e3aa4
PB
5118 tmp = neon_load_reg(rd, pass);
5119 if (shift)
5120 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
5121 switch (size) {
5122 case 0:
12dcc321 5123 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5124 break;
5125 case 1:
12dcc321 5126 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5127 break;
5128 case 2:
12dcc321 5129 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5130 break;
99c475ab 5131 }
58ab8e96 5132 tcg_temp_free_i32(tmp);
99c475ab 5133 }
9ee6e8bb 5134 rd += stride;
1b2b1e54 5135 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 5136 }
e318a60b 5137 tcg_temp_free_i32(addr);
9ee6e8bb 5138 stride = nregs * (1 << size);
99c475ab 5139 }
9ee6e8bb
PB
5140 }
5141 if (rm != 15) {
39d5492a 5142 TCGv_i32 base;
b26eefb6
PB
5143
5144 base = load_reg(s, rn);
9ee6e8bb 5145 if (rm == 13) {
b26eefb6 5146 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 5147 } else {
39d5492a 5148 TCGv_i32 index;
b26eefb6
PB
5149 index = load_reg(s, rm);
5150 tcg_gen_add_i32(base, base, index);
7d1b0095 5151 tcg_temp_free_i32(index);
9ee6e8bb 5152 }
b26eefb6 5153 store_reg(s, rn, base);
9ee6e8bb
PB
5154 }
5155 return 0;
5156}
3b46e624 5157
8f8e3aa4 5158/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 5159static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
5160{
5161 tcg_gen_and_i32(t, t, c);
f669df27 5162 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
5163 tcg_gen_or_i32(dest, t, f);
5164}
5165
39d5492a 5166static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5167{
5168 switch (size) {
5169 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5170 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 5171 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
5172 default: abort();
5173 }
5174}
5175
39d5492a 5176static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5177{
5178 switch (size) {
02da0b2d
PM
5179 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5180 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5181 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
5182 default: abort();
5183 }
5184}
5185
39d5492a 5186static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5187{
5188 switch (size) {
02da0b2d
PM
5189 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5190 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5191 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
5192 default: abort();
5193 }
5194}
5195
39d5492a 5196static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
5197{
5198 switch (size) {
02da0b2d
PM
5199 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5200 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5201 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
5202 default: abort();
5203 }
5204}
5205
39d5492a 5206static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
5207 int q, int u)
5208{
5209 if (q) {
5210 if (u) {
5211 switch (size) {
5212 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5213 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5214 default: abort();
5215 }
5216 } else {
5217 switch (size) {
5218 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5219 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5220 default: abort();
5221 }
5222 }
5223 } else {
5224 if (u) {
5225 switch (size) {
b408a9b0
CL
5226 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5227 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
5228 default: abort();
5229 }
5230 } else {
5231 switch (size) {
5232 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5233 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5234 default: abort();
5235 }
5236 }
5237 }
5238}
5239
39d5492a 5240static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
5241{
5242 if (u) {
5243 switch (size) {
5244 case 0: gen_helper_neon_widen_u8(dest, src); break;
5245 case 1: gen_helper_neon_widen_u16(dest, src); break;
5246 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5247 default: abort();
5248 }
5249 } else {
5250 switch (size) {
5251 case 0: gen_helper_neon_widen_s8(dest, src); break;
5252 case 1: gen_helper_neon_widen_s16(dest, src); break;
5253 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5254 default: abort();
5255 }
5256 }
7d1b0095 5257 tcg_temp_free_i32(src);
ad69471c
PB
5258}
5259
5260static inline void gen_neon_addl(int size)
5261{
5262 switch (size) {
5263 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5264 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5265 case 2: tcg_gen_add_i64(CPU_V001); break;
5266 default: abort();
5267 }
5268}
5269
5270static inline void gen_neon_subl(int size)
5271{
5272 switch (size) {
5273 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5274 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5275 case 2: tcg_gen_sub_i64(CPU_V001); break;
5276 default: abort();
5277 }
5278}
5279
a7812ae4 5280static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
5281{
5282 switch (size) {
5283 case 0: gen_helper_neon_negl_u16(var, var); break;
5284 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
5285 case 2:
5286 tcg_gen_neg_i64(var, var);
5287 break;
ad69471c
PB
5288 default: abort();
5289 }
5290}
5291
a7812ae4 5292static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
5293{
5294 switch (size) {
02da0b2d
PM
5295 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5296 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
5297 default: abort();
5298 }
5299}
5300
39d5492a
PM
5301static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5302 int size, int u)
ad69471c 5303{
a7812ae4 5304 TCGv_i64 tmp;
ad69471c
PB
5305
5306 switch ((size << 1) | u) {
5307 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5308 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5309 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5310 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5311 case 4:
5312 tmp = gen_muls_i64_i32(a, b);
5313 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5314 tcg_temp_free_i64(tmp);
ad69471c
PB
5315 break;
5316 case 5:
5317 tmp = gen_mulu_i64_i32(a, b);
5318 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5319 tcg_temp_free_i64(tmp);
ad69471c
PB
5320 break;
5321 default: abort();
5322 }
c6067f04
CL
5323
5324 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5325 Don't forget to clean them now. */
5326 if (size < 2) {
7d1b0095
PM
5327 tcg_temp_free_i32(a);
5328 tcg_temp_free_i32(b);
c6067f04 5329 }
ad69471c
PB
5330}
5331
39d5492a
PM
5332static void gen_neon_narrow_op(int op, int u, int size,
5333 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
5334{
5335 if (op) {
5336 if (u) {
5337 gen_neon_unarrow_sats(size, dest, src);
5338 } else {
5339 gen_neon_narrow(size, dest, src);
5340 }
5341 } else {
5342 if (u) {
5343 gen_neon_narrow_satu(size, dest, src);
5344 } else {
5345 gen_neon_narrow_sats(size, dest, src);
5346 }
5347 }
5348}
5349
62698be3
PM
5350/* Symbolic constants for op fields for Neon 3-register same-length.
5351 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5352 * table A7-9.
5353 */
5354#define NEON_3R_VHADD 0
5355#define NEON_3R_VQADD 1
5356#define NEON_3R_VRHADD 2
5357#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5358#define NEON_3R_VHSUB 4
5359#define NEON_3R_VQSUB 5
5360#define NEON_3R_VCGT 6
5361#define NEON_3R_VCGE 7
5362#define NEON_3R_VSHL 8
5363#define NEON_3R_VQSHL 9
5364#define NEON_3R_VRSHL 10
5365#define NEON_3R_VQRSHL 11
5366#define NEON_3R_VMAX 12
5367#define NEON_3R_VMIN 13
5368#define NEON_3R_VABD 14
5369#define NEON_3R_VABA 15
5370#define NEON_3R_VADD_VSUB 16
5371#define NEON_3R_VTST_VCEQ 17
5372#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5373#define NEON_3R_VMUL 19
5374#define NEON_3R_VPMAX 20
5375#define NEON_3R_VPMIN 21
5376#define NEON_3R_VQDMULH_VQRDMULH 22
5377#define NEON_3R_VPADD 23
f1ecb913 5378#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
da97f52c 5379#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
5380#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5381#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5382#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5383#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5384#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 5385#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
5386
5387static const uint8_t neon_3r_sizes[] = {
5388 [NEON_3R_VHADD] = 0x7,
5389 [NEON_3R_VQADD] = 0xf,
5390 [NEON_3R_VRHADD] = 0x7,
5391 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5392 [NEON_3R_VHSUB] = 0x7,
5393 [NEON_3R_VQSUB] = 0xf,
5394 [NEON_3R_VCGT] = 0x7,
5395 [NEON_3R_VCGE] = 0x7,
5396 [NEON_3R_VSHL] = 0xf,
5397 [NEON_3R_VQSHL] = 0xf,
5398 [NEON_3R_VRSHL] = 0xf,
5399 [NEON_3R_VQRSHL] = 0xf,
5400 [NEON_3R_VMAX] = 0x7,
5401 [NEON_3R_VMIN] = 0x7,
5402 [NEON_3R_VABD] = 0x7,
5403 [NEON_3R_VABA] = 0x7,
5404 [NEON_3R_VADD_VSUB] = 0xf,
5405 [NEON_3R_VTST_VCEQ] = 0x7,
5406 [NEON_3R_VML] = 0x7,
5407 [NEON_3R_VMUL] = 0x7,
5408 [NEON_3R_VPMAX] = 0x7,
5409 [NEON_3R_VPMIN] = 0x7,
5410 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
5411 [NEON_3R_VPADD] = 0x7,
f1ecb913 5412 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
da97f52c 5413 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5414 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5415 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5416 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5417 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5418 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 5419 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5420};
5421
600b828c
PM
5422/* Symbolic constants for op fields for Neon 2-register miscellaneous.
5423 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5424 * table A7-13.
5425 */
5426#define NEON_2RM_VREV64 0
5427#define NEON_2RM_VREV32 1
5428#define NEON_2RM_VREV16 2
5429#define NEON_2RM_VPADDL 4
5430#define NEON_2RM_VPADDL_U 5
9d935509
AB
5431#define NEON_2RM_AESE 6 /* Includes AESD */
5432#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
5433#define NEON_2RM_VCLS 8
5434#define NEON_2RM_VCLZ 9
5435#define NEON_2RM_VCNT 10
5436#define NEON_2RM_VMVN 11
5437#define NEON_2RM_VPADAL 12
5438#define NEON_2RM_VPADAL_U 13
5439#define NEON_2RM_VQABS 14
5440#define NEON_2RM_VQNEG 15
5441#define NEON_2RM_VCGT0 16
5442#define NEON_2RM_VCGE0 17
5443#define NEON_2RM_VCEQ0 18
5444#define NEON_2RM_VCLE0 19
5445#define NEON_2RM_VCLT0 20
f1ecb913 5446#define NEON_2RM_SHA1H 21
600b828c
PM
5447#define NEON_2RM_VABS 22
5448#define NEON_2RM_VNEG 23
5449#define NEON_2RM_VCGT0_F 24
5450#define NEON_2RM_VCGE0_F 25
5451#define NEON_2RM_VCEQ0_F 26
5452#define NEON_2RM_VCLE0_F 27
5453#define NEON_2RM_VCLT0_F 28
5454#define NEON_2RM_VABS_F 30
5455#define NEON_2RM_VNEG_F 31
5456#define NEON_2RM_VSWP 32
5457#define NEON_2RM_VTRN 33
5458#define NEON_2RM_VUZP 34
5459#define NEON_2RM_VZIP 35
5460#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5461#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5462#define NEON_2RM_VSHLL 38
f1ecb913 5463#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 5464#define NEON_2RM_VRINTN 40
2ce70625 5465#define NEON_2RM_VRINTX 41
34f7b0a2
WN
5466#define NEON_2RM_VRINTA 42
5467#define NEON_2RM_VRINTZ 43
600b828c 5468#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 5469#define NEON_2RM_VRINTM 45
600b828c 5470#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 5471#define NEON_2RM_VRINTP 47
901ad525
WN
5472#define NEON_2RM_VCVTAU 48
5473#define NEON_2RM_VCVTAS 49
5474#define NEON_2RM_VCVTNU 50
5475#define NEON_2RM_VCVTNS 51
5476#define NEON_2RM_VCVTPU 52
5477#define NEON_2RM_VCVTPS 53
5478#define NEON_2RM_VCVTMU 54
5479#define NEON_2RM_VCVTMS 55
600b828c
PM
5480#define NEON_2RM_VRECPE 56
5481#define NEON_2RM_VRSQRTE 57
5482#define NEON_2RM_VRECPE_F 58
5483#define NEON_2RM_VRSQRTE_F 59
5484#define NEON_2RM_VCVT_FS 60
5485#define NEON_2RM_VCVT_FU 61
5486#define NEON_2RM_VCVT_SF 62
5487#define NEON_2RM_VCVT_UF 63
5488
5489static int neon_2rm_is_float_op(int op)
5490{
5491 /* Return true if this neon 2reg-misc op is float-to-float */
5492 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5493 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5494 op == NEON_2RM_VRINTM ||
5495 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5496 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5497}
5498
fe8fcf3d
PM
5499static bool neon_2rm_is_v8_op(int op)
5500{
5501 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5502 switch (op) {
5503 case NEON_2RM_VRINTN:
5504 case NEON_2RM_VRINTA:
5505 case NEON_2RM_VRINTM:
5506 case NEON_2RM_VRINTP:
5507 case NEON_2RM_VRINTZ:
5508 case NEON_2RM_VRINTX:
5509 case NEON_2RM_VCVTAU:
5510 case NEON_2RM_VCVTAS:
5511 case NEON_2RM_VCVTNU:
5512 case NEON_2RM_VCVTNS:
5513 case NEON_2RM_VCVTPU:
5514 case NEON_2RM_VCVTPS:
5515 case NEON_2RM_VCVTMU:
5516 case NEON_2RM_VCVTMS:
5517 return true;
5518 default:
5519 return false;
5520 }
5521}
5522
600b828c
PM
5523/* Each entry in this array has bit n set if the insn allows
5524 * size value n (otherwise it will UNDEF). Since unallocated
5525 * op values will have no bits set they always UNDEF.
5526 */
5527static const uint8_t neon_2rm_sizes[] = {
5528 [NEON_2RM_VREV64] = 0x7,
5529 [NEON_2RM_VREV32] = 0x3,
5530 [NEON_2RM_VREV16] = 0x1,
5531 [NEON_2RM_VPADDL] = 0x7,
5532 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5533 [NEON_2RM_AESE] = 0x1,
5534 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5535 [NEON_2RM_VCLS] = 0x7,
5536 [NEON_2RM_VCLZ] = 0x7,
5537 [NEON_2RM_VCNT] = 0x1,
5538 [NEON_2RM_VMVN] = 0x1,
5539 [NEON_2RM_VPADAL] = 0x7,
5540 [NEON_2RM_VPADAL_U] = 0x7,
5541 [NEON_2RM_VQABS] = 0x7,
5542 [NEON_2RM_VQNEG] = 0x7,
5543 [NEON_2RM_VCGT0] = 0x7,
5544 [NEON_2RM_VCGE0] = 0x7,
5545 [NEON_2RM_VCEQ0] = 0x7,
5546 [NEON_2RM_VCLE0] = 0x7,
5547 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5548 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5549 [NEON_2RM_VABS] = 0x7,
5550 [NEON_2RM_VNEG] = 0x7,
5551 [NEON_2RM_VCGT0_F] = 0x4,
5552 [NEON_2RM_VCGE0_F] = 0x4,
5553 [NEON_2RM_VCEQ0_F] = 0x4,
5554 [NEON_2RM_VCLE0_F] = 0x4,
5555 [NEON_2RM_VCLT0_F] = 0x4,
5556 [NEON_2RM_VABS_F] = 0x4,
5557 [NEON_2RM_VNEG_F] = 0x4,
5558 [NEON_2RM_VSWP] = 0x1,
5559 [NEON_2RM_VTRN] = 0x7,
5560 [NEON_2RM_VUZP] = 0x7,
5561 [NEON_2RM_VZIP] = 0x7,
5562 [NEON_2RM_VMOVN] = 0x7,
5563 [NEON_2RM_VQMOVN] = 0x7,
5564 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5565 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5566 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5567 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5568 [NEON_2RM_VRINTA] = 0x4,
5569 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5570 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5571 [NEON_2RM_VRINTM] = 0x4,
600b828c 5572 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5573 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5574 [NEON_2RM_VCVTAU] = 0x4,
5575 [NEON_2RM_VCVTAS] = 0x4,
5576 [NEON_2RM_VCVTNU] = 0x4,
5577 [NEON_2RM_VCVTNS] = 0x4,
5578 [NEON_2RM_VCVTPU] = 0x4,
5579 [NEON_2RM_VCVTPS] = 0x4,
5580 [NEON_2RM_VCVTMU] = 0x4,
5581 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5582 [NEON_2RM_VRECPE] = 0x4,
5583 [NEON_2RM_VRSQRTE] = 0x4,
5584 [NEON_2RM_VRECPE_F] = 0x4,
5585 [NEON_2RM_VRSQRTE_F] = 0x4,
5586 [NEON_2RM_VCVT_FS] = 0x4,
5587 [NEON_2RM_VCVT_FU] = 0x4,
5588 [NEON_2RM_VCVT_SF] = 0x4,
5589 [NEON_2RM_VCVT_UF] = 0x4,
5590};
5591
9ee6e8bb
PB
5592/* Translate a NEON data processing instruction. Return nonzero if the
5593 instruction is invalid.
ad69471c
PB
5594 We process data in a mixture of 32-bit and 64-bit chunks.
5595 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 5596
7dcc1f89 5597static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5598{
5599 int op;
5600 int q;
5601 int rd, rn, rm;
5602 int size;
5603 int shift;
5604 int pass;
5605 int count;
5606 int pairwise;
5607 int u;
ca9a32e4 5608 uint32_t imm, mask;
39d5492a 5609 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
1a66ac61 5610 TCGv_ptr ptr1, ptr2, ptr3;
a7812ae4 5611 TCGv_i64 tmp64;
9ee6e8bb 5612
2c7ffc41
PM
5613 /* FIXME: this access check should not take precedence over UNDEF
5614 * for invalid encodings; we will generate incorrect syndrome information
5615 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5616 */
9dbbc748 5617 if (s->fp_excp_el) {
2c7ffc41 5618 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 5619 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
5620 return 0;
5621 }
5622
5df8bac1 5623 if (!s->vfp_enabled)
9ee6e8bb
PB
5624 return 1;
5625 q = (insn & (1 << 6)) != 0;
5626 u = (insn >> 24) & 1;
5627 VFP_DREG_D(rd, insn);
5628 VFP_DREG_N(rn, insn);
5629 VFP_DREG_M(rm, insn);
5630 size = (insn >> 20) & 3;
5631 if ((insn & (1 << 23)) == 0) {
5632 /* Three register same length. */
5633 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5634 /* Catch invalid op and bad size combinations: UNDEF */
5635 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5636 return 1;
5637 }
25f84f79
PM
5638 /* All insns of this form UNDEF for either this condition or the
5639 * superset of cases "Q==1"; we catch the latter later.
5640 */
5641 if (q && ((rd | rn | rm) & 1)) {
5642 return 1;
5643 }
f1ecb913
AB
5644 /*
5645 * The SHA-1/SHA-256 3-register instructions require special treatment
5646 * here, as their size field is overloaded as an op type selector, and
5647 * they all consume their input in a single pass.
5648 */
5649 if (op == NEON_3R_SHA) {
5650 if (!q) {
5651 return 1;
5652 }
5653 if (!u) { /* SHA-1 */
d614a513 5654 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
5655 return 1;
5656 }
1a66ac61
RH
5657 ptr1 = vfp_reg_ptr(true, rd);
5658 ptr2 = vfp_reg_ptr(true, rn);
5659 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913 5660 tmp4 = tcg_const_i32(size);
1a66ac61 5661 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
f1ecb913
AB
5662 tcg_temp_free_i32(tmp4);
5663 } else { /* SHA-256 */
d614a513 5664 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
f1ecb913
AB
5665 return 1;
5666 }
1a66ac61
RH
5667 ptr1 = vfp_reg_ptr(true, rd);
5668 ptr2 = vfp_reg_ptr(true, rn);
5669 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913
AB
5670 switch (size) {
5671 case 0:
1a66ac61 5672 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
f1ecb913
AB
5673 break;
5674 case 1:
1a66ac61 5675 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
f1ecb913
AB
5676 break;
5677 case 2:
1a66ac61 5678 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
f1ecb913
AB
5679 break;
5680 }
5681 }
1a66ac61
RH
5682 tcg_temp_free_ptr(ptr1);
5683 tcg_temp_free_ptr(ptr2);
5684 tcg_temp_free_ptr(ptr3);
f1ecb913
AB
5685 return 0;
5686 }
62698be3
PM
5687 if (size == 3 && op != NEON_3R_LOGIC) {
5688 /* 64-bit element instructions. */
9ee6e8bb 5689 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5690 neon_load_reg64(cpu_V0, rn + pass);
5691 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5692 switch (op) {
62698be3 5693 case NEON_3R_VQADD:
9ee6e8bb 5694 if (u) {
02da0b2d
PM
5695 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5696 cpu_V0, cpu_V1);
2c0262af 5697 } else {
02da0b2d
PM
5698 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5699 cpu_V0, cpu_V1);
2c0262af 5700 }
9ee6e8bb 5701 break;
62698be3 5702 case NEON_3R_VQSUB:
9ee6e8bb 5703 if (u) {
02da0b2d
PM
5704 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5705 cpu_V0, cpu_V1);
ad69471c 5706 } else {
02da0b2d
PM
5707 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5708 cpu_V0, cpu_V1);
ad69471c
PB
5709 }
5710 break;
62698be3 5711 case NEON_3R_VSHL:
ad69471c
PB
5712 if (u) {
5713 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5714 } else {
5715 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5716 }
5717 break;
62698be3 5718 case NEON_3R_VQSHL:
ad69471c 5719 if (u) {
02da0b2d
PM
5720 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5721 cpu_V1, cpu_V0);
ad69471c 5722 } else {
02da0b2d
PM
5723 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5724 cpu_V1, cpu_V0);
ad69471c
PB
5725 }
5726 break;
62698be3 5727 case NEON_3R_VRSHL:
ad69471c
PB
5728 if (u) {
5729 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5730 } else {
ad69471c
PB
5731 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5732 }
5733 break;
62698be3 5734 case NEON_3R_VQRSHL:
ad69471c 5735 if (u) {
02da0b2d
PM
5736 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5737 cpu_V1, cpu_V0);
ad69471c 5738 } else {
02da0b2d
PM
5739 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5740 cpu_V1, cpu_V0);
1e8d4eec 5741 }
9ee6e8bb 5742 break;
62698be3 5743 case NEON_3R_VADD_VSUB:
9ee6e8bb 5744 if (u) {
ad69471c 5745 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5746 } else {
ad69471c 5747 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5748 }
5749 break;
5750 default:
5751 abort();
2c0262af 5752 }
ad69471c 5753 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5754 }
9ee6e8bb 5755 return 0;
2c0262af 5756 }
25f84f79 5757 pairwise = 0;
9ee6e8bb 5758 switch (op) {
62698be3
PM
5759 case NEON_3R_VSHL:
5760 case NEON_3R_VQSHL:
5761 case NEON_3R_VRSHL:
5762 case NEON_3R_VQRSHL:
9ee6e8bb 5763 {
ad69471c
PB
5764 int rtmp;
5765 /* Shift instruction operands are reversed. */
5766 rtmp = rn;
9ee6e8bb 5767 rn = rm;
ad69471c 5768 rm = rtmp;
9ee6e8bb 5769 }
2c0262af 5770 break;
25f84f79
PM
5771 case NEON_3R_VPADD:
5772 if (u) {
5773 return 1;
5774 }
5775 /* Fall through */
62698be3
PM
5776 case NEON_3R_VPMAX:
5777 case NEON_3R_VPMIN:
9ee6e8bb 5778 pairwise = 1;
2c0262af 5779 break;
25f84f79
PM
5780 case NEON_3R_FLOAT_ARITH:
5781 pairwise = (u && size < 2); /* if VPADD (float) */
5782 break;
5783 case NEON_3R_FLOAT_MINMAX:
5784 pairwise = u; /* if VPMIN/VPMAX (float) */
5785 break;
5786 case NEON_3R_FLOAT_CMP:
5787 if (!u && size) {
5788 /* no encoding for U=0 C=1x */
5789 return 1;
5790 }
5791 break;
5792 case NEON_3R_FLOAT_ACMP:
5793 if (!u) {
5794 return 1;
5795 }
5796 break;
505935fc
WN
5797 case NEON_3R_FLOAT_MISC:
5798 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5799 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5800 return 1;
5801 }
2c0262af 5802 break;
25f84f79
PM
5803 case NEON_3R_VMUL:
5804 if (u && (size != 0)) {
5805 /* UNDEF on invalid size for polynomial subcase */
5806 return 1;
5807 }
2c0262af 5808 break;
da97f52c 5809 case NEON_3R_VFM:
d614a513 5810 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
da97f52c
PM
5811 return 1;
5812 }
5813 break;
9ee6e8bb 5814 default:
2c0262af 5815 break;
9ee6e8bb 5816 }
dd8fbd78 5817
25f84f79
PM
5818 if (pairwise && q) {
5819 /* All the pairwise insns UNDEF if Q is set */
5820 return 1;
5821 }
5822
9ee6e8bb
PB
5823 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5824
5825 if (pairwise) {
5826 /* Pairwise. */
a5a14945
JR
5827 if (pass < 1) {
5828 tmp = neon_load_reg(rn, 0);
5829 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5830 } else {
a5a14945
JR
5831 tmp = neon_load_reg(rm, 0);
5832 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5833 }
5834 } else {
5835 /* Elementwise. */
dd8fbd78
FN
5836 tmp = neon_load_reg(rn, pass);
5837 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5838 }
5839 switch (op) {
62698be3 5840 case NEON_3R_VHADD:
9ee6e8bb
PB
5841 GEN_NEON_INTEGER_OP(hadd);
5842 break;
62698be3 5843 case NEON_3R_VQADD:
02da0b2d 5844 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5845 break;
62698be3 5846 case NEON_3R_VRHADD:
9ee6e8bb 5847 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5848 break;
62698be3 5849 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5850 switch ((u << 2) | size) {
5851 case 0: /* VAND */
dd8fbd78 5852 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5853 break;
5854 case 1: /* BIC */
f669df27 5855 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5856 break;
5857 case 2: /* VORR */
dd8fbd78 5858 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5859 break;
5860 case 3: /* VORN */
f669df27 5861 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5862 break;
5863 case 4: /* VEOR */
dd8fbd78 5864 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5865 break;
5866 case 5: /* VBSL */
dd8fbd78
FN
5867 tmp3 = neon_load_reg(rd, pass);
5868 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5869 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5870 break;
5871 case 6: /* VBIT */
dd8fbd78
FN
5872 tmp3 = neon_load_reg(rd, pass);
5873 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5874 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5875 break;
5876 case 7: /* VBIF */
dd8fbd78
FN
5877 tmp3 = neon_load_reg(rd, pass);
5878 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5879 tcg_temp_free_i32(tmp3);
9ee6e8bb 5880 break;
2c0262af
FB
5881 }
5882 break;
62698be3 5883 case NEON_3R_VHSUB:
9ee6e8bb
PB
5884 GEN_NEON_INTEGER_OP(hsub);
5885 break;
62698be3 5886 case NEON_3R_VQSUB:
02da0b2d 5887 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5888 break;
62698be3 5889 case NEON_3R_VCGT:
9ee6e8bb
PB
5890 GEN_NEON_INTEGER_OP(cgt);
5891 break;
62698be3 5892 case NEON_3R_VCGE:
9ee6e8bb
PB
5893 GEN_NEON_INTEGER_OP(cge);
5894 break;
62698be3 5895 case NEON_3R_VSHL:
ad69471c 5896 GEN_NEON_INTEGER_OP(shl);
2c0262af 5897 break;
62698be3 5898 case NEON_3R_VQSHL:
02da0b2d 5899 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5900 break;
62698be3 5901 case NEON_3R_VRSHL:
ad69471c 5902 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5903 break;
62698be3 5904 case NEON_3R_VQRSHL:
02da0b2d 5905 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5906 break;
62698be3 5907 case NEON_3R_VMAX:
9ee6e8bb
PB
5908 GEN_NEON_INTEGER_OP(max);
5909 break;
62698be3 5910 case NEON_3R_VMIN:
9ee6e8bb
PB
5911 GEN_NEON_INTEGER_OP(min);
5912 break;
62698be3 5913 case NEON_3R_VABD:
9ee6e8bb
PB
5914 GEN_NEON_INTEGER_OP(abd);
5915 break;
62698be3 5916 case NEON_3R_VABA:
9ee6e8bb 5917 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5918 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5919 tmp2 = neon_load_reg(rd, pass);
5920 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5921 break;
62698be3 5922 case NEON_3R_VADD_VSUB:
9ee6e8bb 5923 if (!u) { /* VADD */
62698be3 5924 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5925 } else { /* VSUB */
5926 switch (size) {
dd8fbd78
FN
5927 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5928 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5929 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 5930 default: abort();
9ee6e8bb
PB
5931 }
5932 }
5933 break;
62698be3 5934 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
5935 if (!u) { /* VTST */
5936 switch (size) {
dd8fbd78
FN
5937 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5938 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5939 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 5940 default: abort();
9ee6e8bb
PB
5941 }
5942 } else { /* VCEQ */
5943 switch (size) {
dd8fbd78
FN
5944 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5945 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5946 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 5947 default: abort();
9ee6e8bb
PB
5948 }
5949 }
5950 break;
62698be3 5951 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 5952 switch (size) {
dd8fbd78
FN
5953 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5954 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5955 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5956 default: abort();
9ee6e8bb 5957 }
7d1b0095 5958 tcg_temp_free_i32(tmp2);
dd8fbd78 5959 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5960 if (u) { /* VMLS */
dd8fbd78 5961 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 5962 } else { /* VMLA */
dd8fbd78 5963 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5964 }
5965 break;
62698be3 5966 case NEON_3R_VMUL:
9ee6e8bb 5967 if (u) { /* polynomial */
dd8fbd78 5968 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
5969 } else { /* Integer */
5970 switch (size) {
dd8fbd78
FN
5971 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5972 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5973 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5974 default: abort();
9ee6e8bb
PB
5975 }
5976 }
5977 break;
62698be3 5978 case NEON_3R_VPMAX:
9ee6e8bb
PB
5979 GEN_NEON_INTEGER_OP(pmax);
5980 break;
62698be3 5981 case NEON_3R_VPMIN:
9ee6e8bb
PB
5982 GEN_NEON_INTEGER_OP(pmin);
5983 break;
62698be3 5984 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5985 if (!u) { /* VQDMULH */
5986 switch (size) {
02da0b2d
PM
5987 case 1:
5988 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5989 break;
5990 case 2:
5991 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5992 break;
62698be3 5993 default: abort();
9ee6e8bb 5994 }
62698be3 5995 } else { /* VQRDMULH */
9ee6e8bb 5996 switch (size) {
02da0b2d
PM
5997 case 1:
5998 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5999 break;
6000 case 2:
6001 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6002 break;
62698be3 6003 default: abort();
9ee6e8bb
PB
6004 }
6005 }
6006 break;
62698be3 6007 case NEON_3R_VPADD:
9ee6e8bb 6008 switch (size) {
dd8fbd78
FN
6009 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
6010 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
6011 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 6012 default: abort();
9ee6e8bb
PB
6013 }
6014 break;
62698be3 6015 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
6016 {
6017 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
6018 switch ((u << 2) | size) {
6019 case 0: /* VADD */
aa47cfdd
PM
6020 case 4: /* VPADD */
6021 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6022 break;
6023 case 2: /* VSUB */
aa47cfdd 6024 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6025 break;
6026 case 6: /* VABD */
aa47cfdd 6027 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6028 break;
6029 default:
62698be3 6030 abort();
9ee6e8bb 6031 }
aa47cfdd 6032 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6033 break;
aa47cfdd 6034 }
62698be3 6035 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
6036 {
6037 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6038 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6039 if (!u) {
7d1b0095 6040 tcg_temp_free_i32(tmp2);
dd8fbd78 6041 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6042 if (size == 0) {
aa47cfdd 6043 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6044 } else {
aa47cfdd 6045 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
6046 }
6047 }
aa47cfdd 6048 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6049 break;
aa47cfdd 6050 }
62698be3 6051 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
6052 {
6053 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 6054 if (!u) {
aa47cfdd 6055 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 6056 } else {
aa47cfdd
PM
6057 if (size == 0) {
6058 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6059 } else {
6060 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6061 }
b5ff1b31 6062 }
aa47cfdd 6063 tcg_temp_free_ptr(fpstatus);
2c0262af 6064 break;
aa47cfdd 6065 }
62698be3 6066 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
6067 {
6068 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6069 if (size == 0) {
6070 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
6071 } else {
6072 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
6073 }
6074 tcg_temp_free_ptr(fpstatus);
2c0262af 6075 break;
aa47cfdd 6076 }
62698be3 6077 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
6078 {
6079 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6080 if (size == 0) {
f71a2ae5 6081 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 6082 } else {
f71a2ae5 6083 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
6084 }
6085 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6086 break;
aa47cfdd 6087 }
505935fc
WN
6088 case NEON_3R_FLOAT_MISC:
6089 if (u) {
6090 /* VMAXNM/VMINNM */
6091 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6092 if (size == 0) {
f71a2ae5 6093 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 6094 } else {
f71a2ae5 6095 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
6096 }
6097 tcg_temp_free_ptr(fpstatus);
6098 } else {
6099 if (size == 0) {
6100 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
6101 } else {
6102 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
6103 }
6104 }
2c0262af 6105 break;
da97f52c
PM
6106 case NEON_3R_VFM:
6107 {
6108 /* VFMA, VFMS: fused multiply-add */
6109 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6110 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
6111 if (size) {
6112 /* VFMS */
6113 gen_helper_vfp_negs(tmp, tmp);
6114 }
6115 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
6116 tcg_temp_free_i32(tmp3);
6117 tcg_temp_free_ptr(fpstatus);
6118 break;
6119 }
9ee6e8bb
PB
6120 default:
6121 abort();
2c0262af 6122 }
7d1b0095 6123 tcg_temp_free_i32(tmp2);
dd8fbd78 6124
9ee6e8bb
PB
6125 /* Save the result. For elementwise operations we can put it
6126 straight into the destination register. For pairwise operations
6127 we have to be careful to avoid clobbering the source operands. */
6128 if (pairwise && rd == rm) {
dd8fbd78 6129 neon_store_scratch(pass, tmp);
9ee6e8bb 6130 } else {
dd8fbd78 6131 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6132 }
6133
6134 } /* for pass */
6135 if (pairwise && rd == rm) {
6136 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
6137 tmp = neon_load_scratch(pass);
6138 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6139 }
6140 }
ad69471c 6141 /* End of 3 register same size operations. */
9ee6e8bb
PB
6142 } else if (insn & (1 << 4)) {
6143 if ((insn & 0x00380080) != 0) {
6144 /* Two registers and shift. */
6145 op = (insn >> 8) & 0xf;
6146 if (insn & (1 << 7)) {
cc13115b
PM
6147 /* 64-bit shift. */
6148 if (op > 7) {
6149 return 1;
6150 }
9ee6e8bb
PB
6151 size = 3;
6152 } else {
6153 size = 2;
6154 while ((insn & (1 << (size + 19))) == 0)
6155 size--;
6156 }
6157 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 6158 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
6159 by immediate using the variable shift operations. */
6160 if (op < 8) {
6161 /* Shift by immediate:
6162 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
6163 if (q && ((rd | rm) & 1)) {
6164 return 1;
6165 }
6166 if (!u && (op == 4 || op == 6)) {
6167 return 1;
6168 }
9ee6e8bb
PB
6169 /* Right shifts are encoded as N - shift, where N is the
6170 element size in bits. */
6171 if (op <= 4)
6172 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
6173 if (size == 3) {
6174 count = q + 1;
6175 } else {
6176 count = q ? 4: 2;
6177 }
6178 switch (size) {
6179 case 0:
6180 imm = (uint8_t) shift;
6181 imm |= imm << 8;
6182 imm |= imm << 16;
6183 break;
6184 case 1:
6185 imm = (uint16_t) shift;
6186 imm |= imm << 16;
6187 break;
6188 case 2:
6189 case 3:
6190 imm = shift;
6191 break;
6192 default:
6193 abort();
6194 }
6195
6196 for (pass = 0; pass < count; pass++) {
ad69471c
PB
6197 if (size == 3) {
6198 neon_load_reg64(cpu_V0, rm + pass);
6199 tcg_gen_movi_i64(cpu_V1, imm);
6200 switch (op) {
6201 case 0: /* VSHR */
6202 case 1: /* VSRA */
6203 if (u)
6204 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6205 else
ad69471c 6206 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6207 break;
ad69471c
PB
6208 case 2: /* VRSHR */
6209 case 3: /* VRSRA */
6210 if (u)
6211 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6212 else
ad69471c 6213 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6214 break;
ad69471c 6215 case 4: /* VSRI */
ad69471c
PB
6216 case 5: /* VSHL, VSLI */
6217 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6218 break;
0322b26e 6219 case 6: /* VQSHLU */
02da0b2d
PM
6220 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6221 cpu_V0, cpu_V1);
ad69471c 6222 break;
0322b26e
PM
6223 case 7: /* VQSHL */
6224 if (u) {
02da0b2d 6225 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
6226 cpu_V0, cpu_V1);
6227 } else {
02da0b2d 6228 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
6229 cpu_V0, cpu_V1);
6230 }
9ee6e8bb 6231 break;
9ee6e8bb 6232 }
ad69471c
PB
6233 if (op == 1 || op == 3) {
6234 /* Accumulate. */
5371cb81 6235 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
6236 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6237 } else if (op == 4 || (op == 5 && u)) {
6238 /* Insert */
923e6509
CL
6239 neon_load_reg64(cpu_V1, rd + pass);
6240 uint64_t mask;
6241 if (shift < -63 || shift > 63) {
6242 mask = 0;
6243 } else {
6244 if (op == 4) {
6245 mask = 0xffffffffffffffffull >> -shift;
6246 } else {
6247 mask = 0xffffffffffffffffull << shift;
6248 }
6249 }
6250 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6251 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
6252 }
6253 neon_store_reg64(cpu_V0, rd + pass);
6254 } else { /* size < 3 */
6255 /* Operands in T0 and T1. */
dd8fbd78 6256 tmp = neon_load_reg(rm, pass);
7d1b0095 6257 tmp2 = tcg_temp_new_i32();
dd8fbd78 6258 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
6259 switch (op) {
6260 case 0: /* VSHR */
6261 case 1: /* VSRA */
6262 GEN_NEON_INTEGER_OP(shl);
6263 break;
6264 case 2: /* VRSHR */
6265 case 3: /* VRSRA */
6266 GEN_NEON_INTEGER_OP(rshl);
6267 break;
6268 case 4: /* VSRI */
ad69471c
PB
6269 case 5: /* VSHL, VSLI */
6270 switch (size) {
dd8fbd78
FN
6271 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6272 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6273 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 6274 default: abort();
ad69471c
PB
6275 }
6276 break;
0322b26e 6277 case 6: /* VQSHLU */
ad69471c 6278 switch (size) {
0322b26e 6279 case 0:
02da0b2d
PM
6280 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6281 tmp, tmp2);
0322b26e
PM
6282 break;
6283 case 1:
02da0b2d
PM
6284 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6285 tmp, tmp2);
0322b26e
PM
6286 break;
6287 case 2:
02da0b2d
PM
6288 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6289 tmp, tmp2);
0322b26e
PM
6290 break;
6291 default:
cc13115b 6292 abort();
ad69471c
PB
6293 }
6294 break;
0322b26e 6295 case 7: /* VQSHL */
02da0b2d 6296 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 6297 break;
ad69471c 6298 }
7d1b0095 6299 tcg_temp_free_i32(tmp2);
ad69471c
PB
6300
6301 if (op == 1 || op == 3) {
6302 /* Accumulate. */
dd8fbd78 6303 tmp2 = neon_load_reg(rd, pass);
5371cb81 6304 gen_neon_add(size, tmp, tmp2);
7d1b0095 6305 tcg_temp_free_i32(tmp2);
ad69471c
PB
6306 } else if (op == 4 || (op == 5 && u)) {
6307 /* Insert */
6308 switch (size) {
6309 case 0:
6310 if (op == 4)
ca9a32e4 6311 mask = 0xff >> -shift;
ad69471c 6312 else
ca9a32e4
JR
6313 mask = (uint8_t)(0xff << shift);
6314 mask |= mask << 8;
6315 mask |= mask << 16;
ad69471c
PB
6316 break;
6317 case 1:
6318 if (op == 4)
ca9a32e4 6319 mask = 0xffff >> -shift;
ad69471c 6320 else
ca9a32e4
JR
6321 mask = (uint16_t)(0xffff << shift);
6322 mask |= mask << 16;
ad69471c
PB
6323 break;
6324 case 2:
ca9a32e4
JR
6325 if (shift < -31 || shift > 31) {
6326 mask = 0;
6327 } else {
6328 if (op == 4)
6329 mask = 0xffffffffu >> -shift;
6330 else
6331 mask = 0xffffffffu << shift;
6332 }
ad69471c
PB
6333 break;
6334 default:
6335 abort();
6336 }
dd8fbd78 6337 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
6338 tcg_gen_andi_i32(tmp, tmp, mask);
6339 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 6340 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 6341 tcg_temp_free_i32(tmp2);
ad69471c 6342 }
dd8fbd78 6343 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6344 }
6345 } /* for pass */
6346 } else if (op < 10) {
ad69471c 6347 /* Shift by immediate and narrow:
9ee6e8bb 6348 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 6349 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
6350 if (rm & 1) {
6351 return 1;
6352 }
9ee6e8bb
PB
6353 shift = shift - (1 << (size + 3));
6354 size++;
92cdfaeb 6355 if (size == 3) {
a7812ae4 6356 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
6357 neon_load_reg64(cpu_V0, rm);
6358 neon_load_reg64(cpu_V1, rm + 1);
6359 for (pass = 0; pass < 2; pass++) {
6360 TCGv_i64 in;
6361 if (pass == 0) {
6362 in = cpu_V0;
6363 } else {
6364 in = cpu_V1;
6365 }
ad69471c 6366 if (q) {
0b36f4cd 6367 if (input_unsigned) {
92cdfaeb 6368 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 6369 } else {
92cdfaeb 6370 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 6371 }
ad69471c 6372 } else {
0b36f4cd 6373 if (input_unsigned) {
92cdfaeb 6374 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 6375 } else {
92cdfaeb 6376 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 6377 }
ad69471c 6378 }
7d1b0095 6379 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6380 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6381 neon_store_reg(rd, pass, tmp);
6382 } /* for pass */
6383 tcg_temp_free_i64(tmp64);
6384 } else {
6385 if (size == 1) {
6386 imm = (uint16_t)shift;
6387 imm |= imm << 16;
2c0262af 6388 } else {
92cdfaeb
PM
6389 /* size == 2 */
6390 imm = (uint32_t)shift;
6391 }
6392 tmp2 = tcg_const_i32(imm);
6393 tmp4 = neon_load_reg(rm + 1, 0);
6394 tmp5 = neon_load_reg(rm + 1, 1);
6395 for (pass = 0; pass < 2; pass++) {
6396 if (pass == 0) {
6397 tmp = neon_load_reg(rm, 0);
6398 } else {
6399 tmp = tmp4;
6400 }
0b36f4cd
CL
6401 gen_neon_shift_narrow(size, tmp, tmp2, q,
6402 input_unsigned);
92cdfaeb
PM
6403 if (pass == 0) {
6404 tmp3 = neon_load_reg(rm, 1);
6405 } else {
6406 tmp3 = tmp5;
6407 }
0b36f4cd
CL
6408 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6409 input_unsigned);
36aa55dc 6410 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
6411 tcg_temp_free_i32(tmp);
6412 tcg_temp_free_i32(tmp3);
6413 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6414 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6415 neon_store_reg(rd, pass, tmp);
6416 } /* for pass */
c6067f04 6417 tcg_temp_free_i32(tmp2);
b75263d6 6418 }
9ee6e8bb 6419 } else if (op == 10) {
cc13115b
PM
6420 /* VSHLL, VMOVL */
6421 if (q || (rd & 1)) {
9ee6e8bb 6422 return 1;
cc13115b 6423 }
ad69471c
PB
6424 tmp = neon_load_reg(rm, 0);
6425 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6426 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6427 if (pass == 1)
6428 tmp = tmp2;
6429
6430 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 6431
9ee6e8bb
PB
6432 if (shift != 0) {
6433 /* The shift is less than the width of the source
ad69471c
PB
6434 type, so we can just shift the whole register. */
6435 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
6436 /* Widen the result of shift: we need to clear
6437 * the potential overflow bits resulting from
6438 * left bits of the narrow input appearing as
6439 * right bits of left the neighbour narrow
6440 * input. */
ad69471c
PB
6441 if (size < 2 || !u) {
6442 uint64_t imm64;
6443 if (size == 0) {
6444 imm = (0xffu >> (8 - shift));
6445 imm |= imm << 16;
acdf01ef 6446 } else if (size == 1) {
ad69471c 6447 imm = 0xffff >> (16 - shift);
acdf01ef
CL
6448 } else {
6449 /* size == 2 */
6450 imm = 0xffffffff >> (32 - shift);
6451 }
6452 if (size < 2) {
6453 imm64 = imm | (((uint64_t)imm) << 32);
6454 } else {
6455 imm64 = imm;
9ee6e8bb 6456 }
acdf01ef 6457 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
6458 }
6459 }
ad69471c 6460 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6461 }
f73534a5 6462 } else if (op >= 14) {
9ee6e8bb 6463 /* VCVT fixed-point. */
cc13115b
PM
6464 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6465 return 1;
6466 }
f73534a5
PM
6467 /* We have already masked out the must-be-1 top bit of imm6,
6468 * hence this 32-shift where the ARM ARM has 64-imm6.
6469 */
6470 shift = 32 - shift;
9ee6e8bb 6471 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 6472 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 6473 if (!(op & 1)) {
9ee6e8bb 6474 if (u)
5500b06c 6475 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 6476 else
5500b06c 6477 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
6478 } else {
6479 if (u)
5500b06c 6480 gen_vfp_toul(0, shift, 1);
9ee6e8bb 6481 else
5500b06c 6482 gen_vfp_tosl(0, shift, 1);
2c0262af 6483 }
4373f3ce 6484 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
6485 }
6486 } else {
9ee6e8bb
PB
6487 return 1;
6488 }
6489 } else { /* (insn & 0x00380080) == 0 */
6490 int invert;
7d80fee5
PM
6491 if (q && (rd & 1)) {
6492 return 1;
6493 }
9ee6e8bb
PB
6494
6495 op = (insn >> 8) & 0xf;
6496 /* One register and immediate. */
6497 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6498 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
6499 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6500 * We choose to not special-case this and will behave as if a
6501 * valid constant encoding of 0 had been given.
6502 */
9ee6e8bb
PB
6503 switch (op) {
6504 case 0: case 1:
6505 /* no-op */
6506 break;
6507 case 2: case 3:
6508 imm <<= 8;
6509 break;
6510 case 4: case 5:
6511 imm <<= 16;
6512 break;
6513 case 6: case 7:
6514 imm <<= 24;
6515 break;
6516 case 8: case 9:
6517 imm |= imm << 16;
6518 break;
6519 case 10: case 11:
6520 imm = (imm << 8) | (imm << 24);
6521 break;
6522 case 12:
8e31209e 6523 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
6524 break;
6525 case 13:
6526 imm = (imm << 16) | 0xffff;
6527 break;
6528 case 14:
6529 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6530 if (invert)
6531 imm = ~imm;
6532 break;
6533 case 15:
7d80fee5
PM
6534 if (invert) {
6535 return 1;
6536 }
9ee6e8bb
PB
6537 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6538 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6539 break;
6540 }
6541 if (invert)
6542 imm = ~imm;
6543
9ee6e8bb
PB
6544 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6545 if (op & 1 && op < 12) {
ad69471c 6546 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
6547 if (invert) {
6548 /* The immediate value has already been inverted, so
6549 BIC becomes AND. */
ad69471c 6550 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 6551 } else {
ad69471c 6552 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 6553 }
9ee6e8bb 6554 } else {
ad69471c 6555 /* VMOV, VMVN. */
7d1b0095 6556 tmp = tcg_temp_new_i32();
9ee6e8bb 6557 if (op == 14 && invert) {
a5a14945 6558 int n;
ad69471c
PB
6559 uint32_t val;
6560 val = 0;
9ee6e8bb
PB
6561 for (n = 0; n < 4; n++) {
6562 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 6563 val |= 0xff << (n * 8);
9ee6e8bb 6564 }
ad69471c
PB
6565 tcg_gen_movi_i32(tmp, val);
6566 } else {
6567 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 6568 }
9ee6e8bb 6569 }
ad69471c 6570 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6571 }
6572 }
e4b3861d 6573 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
6574 if (size != 3) {
6575 op = (insn >> 8) & 0xf;
6576 if ((insn & (1 << 6)) == 0) {
6577 /* Three registers of different lengths. */
6578 int src1_wide;
6579 int src2_wide;
6580 int prewiden;
526d0096
PM
6581 /* undefreq: bit 0 : UNDEF if size == 0
6582 * bit 1 : UNDEF if size == 1
6583 * bit 2 : UNDEF if size == 2
6584 * bit 3 : UNDEF if U == 1
6585 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
6586 */
6587 int undefreq;
6588 /* prewiden, src1_wide, src2_wide, undefreq */
6589 static const int neon_3reg_wide[16][4] = {
6590 {1, 0, 0, 0}, /* VADDL */
6591 {1, 1, 0, 0}, /* VADDW */
6592 {1, 0, 0, 0}, /* VSUBL */
6593 {1, 1, 0, 0}, /* VSUBW */
6594 {0, 1, 1, 0}, /* VADDHN */
6595 {0, 0, 0, 0}, /* VABAL */
6596 {0, 1, 1, 0}, /* VSUBHN */
6597 {0, 0, 0, 0}, /* VABDL */
6598 {0, 0, 0, 0}, /* VMLAL */
526d0096 6599 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 6600 {0, 0, 0, 0}, /* VMLSL */
526d0096 6601 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 6602 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 6603 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 6604 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 6605 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
6606 };
6607
6608 prewiden = neon_3reg_wide[op][0];
6609 src1_wide = neon_3reg_wide[op][1];
6610 src2_wide = neon_3reg_wide[op][2];
695272dc 6611 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 6612
526d0096
PM
6613 if ((undefreq & (1 << size)) ||
6614 ((undefreq & 8) && u)) {
695272dc
PM
6615 return 1;
6616 }
6617 if ((src1_wide && (rn & 1)) ||
6618 (src2_wide && (rm & 1)) ||
6619 (!src2_wide && (rd & 1))) {
ad69471c 6620 return 1;
695272dc 6621 }
ad69471c 6622
4e624eda
PM
6623 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6624 * outside the loop below as it only performs a single pass.
6625 */
6626 if (op == 14 && size == 2) {
6627 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6628
d614a513 6629 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
4e624eda
PM
6630 return 1;
6631 }
6632 tcg_rn = tcg_temp_new_i64();
6633 tcg_rm = tcg_temp_new_i64();
6634 tcg_rd = tcg_temp_new_i64();
6635 neon_load_reg64(tcg_rn, rn);
6636 neon_load_reg64(tcg_rm, rm);
6637 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6638 neon_store_reg64(tcg_rd, rd);
6639 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6640 neon_store_reg64(tcg_rd, rd + 1);
6641 tcg_temp_free_i64(tcg_rn);
6642 tcg_temp_free_i64(tcg_rm);
6643 tcg_temp_free_i64(tcg_rd);
6644 return 0;
6645 }
6646
9ee6e8bb
PB
6647 /* Avoid overlapping operands. Wide source operands are
6648 always aligned so will never overlap with wide
6649 destinations in problematic ways. */
8f8e3aa4 6650 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6651 tmp = neon_load_reg(rm, 1);
6652 neon_store_scratch(2, tmp);
8f8e3aa4 6653 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6654 tmp = neon_load_reg(rn, 1);
6655 neon_store_scratch(2, tmp);
9ee6e8bb 6656 }
f764718d 6657 tmp3 = NULL;
9ee6e8bb 6658 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6659 if (src1_wide) {
6660 neon_load_reg64(cpu_V0, rn + pass);
f764718d 6661 tmp = NULL;
9ee6e8bb 6662 } else {
ad69471c 6663 if (pass == 1 && rd == rn) {
dd8fbd78 6664 tmp = neon_load_scratch(2);
9ee6e8bb 6665 } else {
ad69471c
PB
6666 tmp = neon_load_reg(rn, pass);
6667 }
6668 if (prewiden) {
6669 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6670 }
6671 }
ad69471c
PB
6672 if (src2_wide) {
6673 neon_load_reg64(cpu_V1, rm + pass);
f764718d 6674 tmp2 = NULL;
9ee6e8bb 6675 } else {
ad69471c 6676 if (pass == 1 && rd == rm) {
dd8fbd78 6677 tmp2 = neon_load_scratch(2);
9ee6e8bb 6678 } else {
ad69471c
PB
6679 tmp2 = neon_load_reg(rm, pass);
6680 }
6681 if (prewiden) {
6682 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6683 }
9ee6e8bb
PB
6684 }
6685 switch (op) {
6686 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6687 gen_neon_addl(size);
9ee6e8bb 6688 break;
79b0e534 6689 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6690 gen_neon_subl(size);
9ee6e8bb
PB
6691 break;
6692 case 5: case 7: /* VABAL, VABDL */
6693 switch ((size << 1) | u) {
ad69471c
PB
6694 case 0:
6695 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6696 break;
6697 case 1:
6698 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6699 break;
6700 case 2:
6701 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6702 break;
6703 case 3:
6704 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6705 break;
6706 case 4:
6707 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6708 break;
6709 case 5:
6710 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6711 break;
9ee6e8bb
PB
6712 default: abort();
6713 }
7d1b0095
PM
6714 tcg_temp_free_i32(tmp2);
6715 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6716 break;
6717 case 8: case 9: case 10: case 11: case 12: case 13:
6718 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6719 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6720 break;
6721 case 14: /* Polynomial VMULL */
e5ca24cb 6722 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6723 tcg_temp_free_i32(tmp2);
6724 tcg_temp_free_i32(tmp);
e5ca24cb 6725 break;
695272dc
PM
6726 default: /* 15 is RESERVED: caught earlier */
6727 abort();
9ee6e8bb 6728 }
ebcd88ce
PM
6729 if (op == 13) {
6730 /* VQDMULL */
6731 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6732 neon_store_reg64(cpu_V0, rd + pass);
6733 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6734 /* Accumulate. */
ebcd88ce 6735 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6736 switch (op) {
4dc064e6
PM
6737 case 10: /* VMLSL */
6738 gen_neon_negl(cpu_V0, size);
6739 /* Fall through */
6740 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6741 gen_neon_addl(size);
9ee6e8bb
PB
6742 break;
6743 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6744 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6745 if (op == 11) {
6746 gen_neon_negl(cpu_V0, size);
6747 }
ad69471c
PB
6748 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6749 break;
9ee6e8bb
PB
6750 default:
6751 abort();
6752 }
ad69471c 6753 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6754 } else if (op == 4 || op == 6) {
6755 /* Narrowing operation. */
7d1b0095 6756 tmp = tcg_temp_new_i32();
79b0e534 6757 if (!u) {
9ee6e8bb 6758 switch (size) {
ad69471c
PB
6759 case 0:
6760 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6761 break;
6762 case 1:
6763 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6764 break;
6765 case 2:
6766 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6767 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6768 break;
9ee6e8bb
PB
6769 default: abort();
6770 }
6771 } else {
6772 switch (size) {
ad69471c
PB
6773 case 0:
6774 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6775 break;
6776 case 1:
6777 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6778 break;
6779 case 2:
6780 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6781 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6782 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6783 break;
9ee6e8bb
PB
6784 default: abort();
6785 }
6786 }
ad69471c
PB
6787 if (pass == 0) {
6788 tmp3 = tmp;
6789 } else {
6790 neon_store_reg(rd, 0, tmp3);
6791 neon_store_reg(rd, 1, tmp);
6792 }
9ee6e8bb
PB
6793 } else {
6794 /* Write back the result. */
ad69471c 6795 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6796 }
6797 }
6798 } else {
3e3326df
PM
6799 /* Two registers and a scalar. NB that for ops of this form
6800 * the ARM ARM labels bit 24 as Q, but it is in our variable
6801 * 'u', not 'q'.
6802 */
6803 if (size == 0) {
6804 return 1;
6805 }
9ee6e8bb 6806 switch (op) {
9ee6e8bb 6807 case 1: /* Float VMLA scalar */
9ee6e8bb 6808 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6809 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6810 if (size == 1) {
6811 return 1;
6812 }
6813 /* fall through */
6814 case 0: /* Integer VMLA scalar */
6815 case 4: /* Integer VMLS scalar */
6816 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6817 case 12: /* VQDMULH scalar */
6818 case 13: /* VQRDMULH scalar */
3e3326df
PM
6819 if (u && ((rd | rn) & 1)) {
6820 return 1;
6821 }
dd8fbd78
FN
6822 tmp = neon_get_scalar(size, rm);
6823 neon_store_scratch(0, tmp);
9ee6e8bb 6824 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6825 tmp = neon_load_scratch(0);
6826 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6827 if (op == 12) {
6828 if (size == 1) {
02da0b2d 6829 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6830 } else {
02da0b2d 6831 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6832 }
6833 } else if (op == 13) {
6834 if (size == 1) {
02da0b2d 6835 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6836 } else {
02da0b2d 6837 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6838 }
6839 } else if (op & 1) {
aa47cfdd
PM
6840 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6841 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6842 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6843 } else {
6844 switch (size) {
dd8fbd78
FN
6845 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6846 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6847 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6848 default: abort();
9ee6e8bb
PB
6849 }
6850 }
7d1b0095 6851 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6852 if (op < 8) {
6853 /* Accumulate. */
dd8fbd78 6854 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6855 switch (op) {
6856 case 0:
dd8fbd78 6857 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6858 break;
6859 case 1:
aa47cfdd
PM
6860 {
6861 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6862 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6863 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6864 break;
aa47cfdd 6865 }
9ee6e8bb 6866 case 4:
dd8fbd78 6867 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6868 break;
6869 case 5:
aa47cfdd
PM
6870 {
6871 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6872 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6873 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6874 break;
aa47cfdd 6875 }
9ee6e8bb
PB
6876 default:
6877 abort();
6878 }
7d1b0095 6879 tcg_temp_free_i32(tmp2);
9ee6e8bb 6880 }
dd8fbd78 6881 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6882 }
6883 break;
9ee6e8bb 6884 case 3: /* VQDMLAL scalar */
9ee6e8bb 6885 case 7: /* VQDMLSL scalar */
9ee6e8bb 6886 case 11: /* VQDMULL scalar */
3e3326df 6887 if (u == 1) {
ad69471c 6888 return 1;
3e3326df
PM
6889 }
6890 /* fall through */
6891 case 2: /* VMLAL sclar */
6892 case 6: /* VMLSL scalar */
6893 case 10: /* VMULL scalar */
6894 if (rd & 1) {
6895 return 1;
6896 }
dd8fbd78 6897 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6898 /* We need a copy of tmp2 because gen_neon_mull
6899 * deletes it during pass 0. */
7d1b0095 6900 tmp4 = tcg_temp_new_i32();
c6067f04 6901 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6902 tmp3 = neon_load_reg(rn, 1);
ad69471c 6903
9ee6e8bb 6904 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6905 if (pass == 0) {
6906 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6907 } else {
dd8fbd78 6908 tmp = tmp3;
c6067f04 6909 tmp2 = tmp4;
9ee6e8bb 6910 }
ad69471c 6911 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6912 if (op != 11) {
6913 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6914 }
9ee6e8bb 6915 switch (op) {
4dc064e6
PM
6916 case 6:
6917 gen_neon_negl(cpu_V0, size);
6918 /* Fall through */
6919 case 2:
ad69471c 6920 gen_neon_addl(size);
9ee6e8bb
PB
6921 break;
6922 case 3: case 7:
ad69471c 6923 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6924 if (op == 7) {
6925 gen_neon_negl(cpu_V0, size);
6926 }
ad69471c 6927 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6928 break;
6929 case 10:
6930 /* no-op */
6931 break;
6932 case 11:
ad69471c 6933 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6934 break;
6935 default:
6936 abort();
6937 }
ad69471c 6938 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6939 }
dd8fbd78 6940
dd8fbd78 6941
9ee6e8bb
PB
6942 break;
6943 default: /* 14 and 15 are RESERVED */
6944 return 1;
6945 }
6946 }
6947 } else { /* size == 3 */
6948 if (!u) {
6949 /* Extract. */
9ee6e8bb 6950 imm = (insn >> 8) & 0xf;
ad69471c
PB
6951
6952 if (imm > 7 && !q)
6953 return 1;
6954
52579ea1
PM
6955 if (q && ((rd | rn | rm) & 1)) {
6956 return 1;
6957 }
6958
ad69471c
PB
6959 if (imm == 0) {
6960 neon_load_reg64(cpu_V0, rn);
6961 if (q) {
6962 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6963 }
ad69471c
PB
6964 } else if (imm == 8) {
6965 neon_load_reg64(cpu_V0, rn + 1);
6966 if (q) {
6967 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6968 }
ad69471c 6969 } else if (q) {
a7812ae4 6970 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6971 if (imm < 8) {
6972 neon_load_reg64(cpu_V0, rn);
a7812ae4 6973 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6974 } else {
6975 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6976 neon_load_reg64(tmp64, rm);
ad69471c
PB
6977 }
6978 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6979 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6980 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6981 if (imm < 8) {
6982 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6983 } else {
ad69471c
PB
6984 neon_load_reg64(cpu_V1, rm + 1);
6985 imm -= 8;
9ee6e8bb 6986 }
ad69471c 6987 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6988 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6989 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6990 tcg_temp_free_i64(tmp64);
ad69471c 6991 } else {
a7812ae4 6992 /* BUGFIX */
ad69471c 6993 neon_load_reg64(cpu_V0, rn);
a7812ae4 6994 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6995 neon_load_reg64(cpu_V1, rm);
a7812ae4 6996 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6997 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6998 }
6999 neon_store_reg64(cpu_V0, rd);
7000 if (q) {
7001 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
7002 }
7003 } else if ((insn & (1 << 11)) == 0) {
7004 /* Two register misc. */
7005 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
7006 size = (insn >> 18) & 3;
600b828c
PM
7007 /* UNDEF for unknown op values and bad op-size combinations */
7008 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
7009 return 1;
7010 }
fe8fcf3d
PM
7011 if (neon_2rm_is_v8_op(op) &&
7012 !arm_dc_feature(s, ARM_FEATURE_V8)) {
7013 return 1;
7014 }
fc2a9b37
PM
7015 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
7016 q && ((rm | rd) & 1)) {
7017 return 1;
7018 }
9ee6e8bb 7019 switch (op) {
600b828c 7020 case NEON_2RM_VREV64:
9ee6e8bb 7021 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
7022 tmp = neon_load_reg(rm, pass * 2);
7023 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 7024 switch (size) {
dd8fbd78
FN
7025 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7026 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
7027 case 2: /* no-op */ break;
7028 default: abort();
7029 }
dd8fbd78 7030 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 7031 if (size == 2) {
dd8fbd78 7032 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 7033 } else {
9ee6e8bb 7034 switch (size) {
dd8fbd78
FN
7035 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
7036 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
7037 default: abort();
7038 }
dd8fbd78 7039 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
7040 }
7041 }
7042 break;
600b828c
PM
7043 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
7044 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
7045 for (pass = 0; pass < q + 1; pass++) {
7046 tmp = neon_load_reg(rm, pass * 2);
7047 gen_neon_widen(cpu_V0, tmp, size, op & 1);
7048 tmp = neon_load_reg(rm, pass * 2 + 1);
7049 gen_neon_widen(cpu_V1, tmp, size, op & 1);
7050 switch (size) {
7051 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
7052 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
7053 case 2: tcg_gen_add_i64(CPU_V001); break;
7054 default: abort();
7055 }
600b828c 7056 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 7057 /* Accumulate. */
ad69471c
PB
7058 neon_load_reg64(cpu_V1, rd + pass);
7059 gen_neon_addl(size);
9ee6e8bb 7060 }
ad69471c 7061 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7062 }
7063 break;
600b828c 7064 case NEON_2RM_VTRN:
9ee6e8bb 7065 if (size == 2) {
a5a14945 7066 int n;
9ee6e8bb 7067 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
7068 tmp = neon_load_reg(rm, n);
7069 tmp2 = neon_load_reg(rd, n + 1);
7070 neon_store_reg(rm, n, tmp2);
7071 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
7072 }
7073 } else {
7074 goto elementwise;
7075 }
7076 break;
600b828c 7077 case NEON_2RM_VUZP:
02acedf9 7078 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 7079 return 1;
9ee6e8bb
PB
7080 }
7081 break;
600b828c 7082 case NEON_2RM_VZIP:
d68a6f3a 7083 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 7084 return 1;
9ee6e8bb
PB
7085 }
7086 break;
600b828c
PM
7087 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
7088 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
7089 if (rm & 1) {
7090 return 1;
7091 }
f764718d 7092 tmp2 = NULL;
9ee6e8bb 7093 for (pass = 0; pass < 2; pass++) {
ad69471c 7094 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 7095 tmp = tcg_temp_new_i32();
600b828c
PM
7096 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
7097 tmp, cpu_V0);
ad69471c
PB
7098 if (pass == 0) {
7099 tmp2 = tmp;
7100 } else {
7101 neon_store_reg(rd, 0, tmp2);
7102 neon_store_reg(rd, 1, tmp);
9ee6e8bb 7103 }
9ee6e8bb
PB
7104 }
7105 break;
600b828c 7106 case NEON_2RM_VSHLL:
fc2a9b37 7107 if (q || (rd & 1)) {
9ee6e8bb 7108 return 1;
600b828c 7109 }
ad69471c
PB
7110 tmp = neon_load_reg(rm, 0);
7111 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 7112 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7113 if (pass == 1)
7114 tmp = tmp2;
7115 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 7116 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 7117 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7118 }
7119 break;
600b828c 7120 case NEON_2RM_VCVT_F16_F32:
d614a513 7121 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7122 q || (rm & 1)) {
7123 return 1;
7124 }
7d1b0095
PM
7125 tmp = tcg_temp_new_i32();
7126 tmp2 = tcg_temp_new_i32();
60011498 7127 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 7128 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 7129 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 7130 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
7131 tcg_gen_shli_i32(tmp2, tmp2, 16);
7132 tcg_gen_or_i32(tmp2, tmp2, tmp);
7133 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 7134 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
7135 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
7136 neon_store_reg(rd, 0, tmp2);
7d1b0095 7137 tmp2 = tcg_temp_new_i32();
2d981da7 7138 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
7139 tcg_gen_shli_i32(tmp2, tmp2, 16);
7140 tcg_gen_or_i32(tmp2, tmp2, tmp);
7141 neon_store_reg(rd, 1, tmp2);
7d1b0095 7142 tcg_temp_free_i32(tmp);
60011498 7143 break;
600b828c 7144 case NEON_2RM_VCVT_F32_F16:
d614a513 7145 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7146 q || (rd & 1)) {
7147 return 1;
7148 }
7d1b0095 7149 tmp3 = tcg_temp_new_i32();
60011498
PB
7150 tmp = neon_load_reg(rm, 0);
7151 tmp2 = neon_load_reg(rm, 1);
7152 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 7153 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
7154 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
7155 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 7156 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 7157 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 7158 tcg_temp_free_i32(tmp);
60011498 7159 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 7160 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
7161 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
7162 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 7163 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 7164 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
7165 tcg_temp_free_i32(tmp2);
7166 tcg_temp_free_i32(tmp3);
60011498 7167 break;
9d935509 7168 case NEON_2RM_AESE: case NEON_2RM_AESMC:
d614a513 7169 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
9d935509
AB
7170 || ((rm | rd) & 1)) {
7171 return 1;
7172 }
1a66ac61
RH
7173 ptr1 = vfp_reg_ptr(true, rd);
7174 ptr2 = vfp_reg_ptr(true, rm);
9d935509
AB
7175
7176 /* Bit 6 is the lowest opcode bit; it distinguishes between
7177 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7178 */
7179 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7180
7181 if (op == NEON_2RM_AESE) {
1a66ac61 7182 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
9d935509 7183 } else {
1a66ac61 7184 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
9d935509 7185 }
1a66ac61
RH
7186 tcg_temp_free_ptr(ptr1);
7187 tcg_temp_free_ptr(ptr2);
9d935509
AB
7188 tcg_temp_free_i32(tmp3);
7189 break;
f1ecb913 7190 case NEON_2RM_SHA1H:
d614a513 7191 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
f1ecb913
AB
7192 || ((rm | rd) & 1)) {
7193 return 1;
7194 }
1a66ac61
RH
7195 ptr1 = vfp_reg_ptr(true, rd);
7196 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 7197
1a66ac61 7198 gen_helper_crypto_sha1h(ptr1, ptr2);
f1ecb913 7199
1a66ac61
RH
7200 tcg_temp_free_ptr(ptr1);
7201 tcg_temp_free_ptr(ptr2);
f1ecb913
AB
7202 break;
7203 case NEON_2RM_SHA1SU1:
7204 if ((rm | rd) & 1) {
7205 return 1;
7206 }
7207 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7208 if (q) {
d614a513 7209 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
f1ecb913
AB
7210 return 1;
7211 }
d614a513 7212 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
7213 return 1;
7214 }
1a66ac61
RH
7215 ptr1 = vfp_reg_ptr(true, rd);
7216 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 7217 if (q) {
1a66ac61 7218 gen_helper_crypto_sha256su0(ptr1, ptr2);
f1ecb913 7219 } else {
1a66ac61 7220 gen_helper_crypto_sha1su1(ptr1, ptr2);
f1ecb913 7221 }
1a66ac61
RH
7222 tcg_temp_free_ptr(ptr1);
7223 tcg_temp_free_ptr(ptr2);
f1ecb913 7224 break;
9ee6e8bb
PB
7225 default:
7226 elementwise:
7227 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 7228 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7229 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7230 neon_reg_offset(rm, pass));
f764718d 7231 tmp = NULL;
9ee6e8bb 7232 } else {
dd8fbd78 7233 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
7234 }
7235 switch (op) {
600b828c 7236 case NEON_2RM_VREV32:
9ee6e8bb 7237 switch (size) {
dd8fbd78
FN
7238 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7239 case 1: gen_swap_half(tmp); break;
600b828c 7240 default: abort();
9ee6e8bb
PB
7241 }
7242 break;
600b828c 7243 case NEON_2RM_VREV16:
dd8fbd78 7244 gen_rev16(tmp);
9ee6e8bb 7245 break;
600b828c 7246 case NEON_2RM_VCLS:
9ee6e8bb 7247 switch (size) {
dd8fbd78
FN
7248 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7249 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7250 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 7251 default: abort();
9ee6e8bb
PB
7252 }
7253 break;
600b828c 7254 case NEON_2RM_VCLZ:
9ee6e8bb 7255 switch (size) {
dd8fbd78
FN
7256 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7257 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7539a012 7258 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
600b828c 7259 default: abort();
9ee6e8bb
PB
7260 }
7261 break;
600b828c 7262 case NEON_2RM_VCNT:
dd8fbd78 7263 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 7264 break;
600b828c 7265 case NEON_2RM_VMVN:
dd8fbd78 7266 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 7267 break;
600b828c 7268 case NEON_2RM_VQABS:
9ee6e8bb 7269 switch (size) {
02da0b2d
PM
7270 case 0:
7271 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7272 break;
7273 case 1:
7274 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7275 break;
7276 case 2:
7277 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7278 break;
600b828c 7279 default: abort();
9ee6e8bb
PB
7280 }
7281 break;
600b828c 7282 case NEON_2RM_VQNEG:
9ee6e8bb 7283 switch (size) {
02da0b2d
PM
7284 case 0:
7285 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7286 break;
7287 case 1:
7288 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7289 break;
7290 case 2:
7291 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7292 break;
600b828c 7293 default: abort();
9ee6e8bb
PB
7294 }
7295 break;
600b828c 7296 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 7297 tmp2 = tcg_const_i32(0);
9ee6e8bb 7298 switch(size) {
dd8fbd78
FN
7299 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7300 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7301 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 7302 default: abort();
9ee6e8bb 7303 }
39d5492a 7304 tcg_temp_free_i32(tmp2);
600b828c 7305 if (op == NEON_2RM_VCLE0) {
dd8fbd78 7306 tcg_gen_not_i32(tmp, tmp);
600b828c 7307 }
9ee6e8bb 7308 break;
600b828c 7309 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 7310 tmp2 = tcg_const_i32(0);
9ee6e8bb 7311 switch(size) {
dd8fbd78
FN
7312 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7313 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7314 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 7315 default: abort();
9ee6e8bb 7316 }
39d5492a 7317 tcg_temp_free_i32(tmp2);
600b828c 7318 if (op == NEON_2RM_VCLT0) {
dd8fbd78 7319 tcg_gen_not_i32(tmp, tmp);
600b828c 7320 }
9ee6e8bb 7321 break;
600b828c 7322 case NEON_2RM_VCEQ0:
dd8fbd78 7323 tmp2 = tcg_const_i32(0);
9ee6e8bb 7324 switch(size) {
dd8fbd78
FN
7325 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7326 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7327 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 7328 default: abort();
9ee6e8bb 7329 }
39d5492a 7330 tcg_temp_free_i32(tmp2);
9ee6e8bb 7331 break;
600b828c 7332 case NEON_2RM_VABS:
9ee6e8bb 7333 switch(size) {
dd8fbd78
FN
7334 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7335 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7336 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 7337 default: abort();
9ee6e8bb
PB
7338 }
7339 break;
600b828c 7340 case NEON_2RM_VNEG:
dd8fbd78
FN
7341 tmp2 = tcg_const_i32(0);
7342 gen_neon_rsb(size, tmp, tmp2);
39d5492a 7343 tcg_temp_free_i32(tmp2);
9ee6e8bb 7344 break;
600b828c 7345 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
7346 {
7347 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7348 tmp2 = tcg_const_i32(0);
aa47cfdd 7349 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7350 tcg_temp_free_i32(tmp2);
aa47cfdd 7351 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7352 break;
aa47cfdd 7353 }
600b828c 7354 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
7355 {
7356 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7357 tmp2 = tcg_const_i32(0);
aa47cfdd 7358 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7359 tcg_temp_free_i32(tmp2);
aa47cfdd 7360 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7361 break;
aa47cfdd 7362 }
600b828c 7363 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
7364 {
7365 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7366 tmp2 = tcg_const_i32(0);
aa47cfdd 7367 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7368 tcg_temp_free_i32(tmp2);
aa47cfdd 7369 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7370 break;
aa47cfdd 7371 }
600b828c 7372 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
7373 {
7374 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7375 tmp2 = tcg_const_i32(0);
aa47cfdd 7376 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7377 tcg_temp_free_i32(tmp2);
aa47cfdd 7378 tcg_temp_free_ptr(fpstatus);
0e326109 7379 break;
aa47cfdd 7380 }
600b828c 7381 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
7382 {
7383 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7384 tmp2 = tcg_const_i32(0);
aa47cfdd 7385 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7386 tcg_temp_free_i32(tmp2);
aa47cfdd 7387 tcg_temp_free_ptr(fpstatus);
0e326109 7388 break;
aa47cfdd 7389 }
600b828c 7390 case NEON_2RM_VABS_F:
4373f3ce 7391 gen_vfp_abs(0);
9ee6e8bb 7392 break;
600b828c 7393 case NEON_2RM_VNEG_F:
4373f3ce 7394 gen_vfp_neg(0);
9ee6e8bb 7395 break;
600b828c 7396 case NEON_2RM_VSWP:
dd8fbd78
FN
7397 tmp2 = neon_load_reg(rd, pass);
7398 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7399 break;
600b828c 7400 case NEON_2RM_VTRN:
dd8fbd78 7401 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 7402 switch (size) {
dd8fbd78
FN
7403 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7404 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 7405 default: abort();
9ee6e8bb 7406 }
dd8fbd78 7407 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7408 break;
34f7b0a2
WN
7409 case NEON_2RM_VRINTN:
7410 case NEON_2RM_VRINTA:
7411 case NEON_2RM_VRINTM:
7412 case NEON_2RM_VRINTP:
7413 case NEON_2RM_VRINTZ:
7414 {
7415 TCGv_i32 tcg_rmode;
7416 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7417 int rmode;
7418
7419 if (op == NEON_2RM_VRINTZ) {
7420 rmode = FPROUNDING_ZERO;
7421 } else {
7422 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7423 }
7424
7425 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7426 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7427 cpu_env);
7428 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7429 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7430 cpu_env);
7431 tcg_temp_free_ptr(fpstatus);
7432 tcg_temp_free_i32(tcg_rmode);
7433 break;
7434 }
2ce70625
WN
7435 case NEON_2RM_VRINTX:
7436 {
7437 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7438 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7439 tcg_temp_free_ptr(fpstatus);
7440 break;
7441 }
901ad525
WN
7442 case NEON_2RM_VCVTAU:
7443 case NEON_2RM_VCVTAS:
7444 case NEON_2RM_VCVTNU:
7445 case NEON_2RM_VCVTNS:
7446 case NEON_2RM_VCVTPU:
7447 case NEON_2RM_VCVTPS:
7448 case NEON_2RM_VCVTMU:
7449 case NEON_2RM_VCVTMS:
7450 {
7451 bool is_signed = !extract32(insn, 7, 1);
7452 TCGv_ptr fpst = get_fpstatus_ptr(1);
7453 TCGv_i32 tcg_rmode, tcg_shift;
7454 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7455
7456 tcg_shift = tcg_const_i32(0);
7457 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7458 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7459 cpu_env);
7460
7461 if (is_signed) {
7462 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7463 tcg_shift, fpst);
7464 } else {
7465 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7466 tcg_shift, fpst);
7467 }
7468
7469 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7470 cpu_env);
7471 tcg_temp_free_i32(tcg_rmode);
7472 tcg_temp_free_i32(tcg_shift);
7473 tcg_temp_free_ptr(fpst);
7474 break;
7475 }
600b828c 7476 case NEON_2RM_VRECPE:
b6d4443a
AB
7477 {
7478 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7479 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7480 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7481 break;
b6d4443a 7482 }
600b828c 7483 case NEON_2RM_VRSQRTE:
c2fb418e
AB
7484 {
7485 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7486 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7487 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7488 break;
c2fb418e 7489 }
600b828c 7490 case NEON_2RM_VRECPE_F:
b6d4443a
AB
7491 {
7492 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7493 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7494 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7495 break;
b6d4443a 7496 }
600b828c 7497 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
7498 {
7499 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7500 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7501 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7502 break;
c2fb418e 7503 }
600b828c 7504 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 7505 gen_vfp_sito(0, 1);
9ee6e8bb 7506 break;
600b828c 7507 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 7508 gen_vfp_uito(0, 1);
9ee6e8bb 7509 break;
600b828c 7510 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 7511 gen_vfp_tosiz(0, 1);
9ee6e8bb 7512 break;
600b828c 7513 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 7514 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
7515 break;
7516 default:
600b828c
PM
7517 /* Reserved op values were caught by the
7518 * neon_2rm_sizes[] check earlier.
7519 */
7520 abort();
9ee6e8bb 7521 }
600b828c 7522 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7523 tcg_gen_st_f32(cpu_F0s, cpu_env,
7524 neon_reg_offset(rd, pass));
9ee6e8bb 7525 } else {
dd8fbd78 7526 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7527 }
7528 }
7529 break;
7530 }
7531 } else if ((insn & (1 << 10)) == 0) {
7532 /* VTBL, VTBX. */
56907d77
PM
7533 int n = ((insn >> 8) & 3) + 1;
7534 if ((rn + n) > 32) {
7535 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7536 * helper function running off the end of the register file.
7537 */
7538 return 1;
7539 }
7540 n <<= 3;
9ee6e8bb 7541 if (insn & (1 << 6)) {
8f8e3aa4 7542 tmp = neon_load_reg(rd, 0);
9ee6e8bb 7543 } else {
7d1b0095 7544 tmp = tcg_temp_new_i32();
8f8e3aa4 7545 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7546 }
8f8e3aa4 7547 tmp2 = neon_load_reg(rm, 0);
e7c06c4e 7548 ptr1 = vfp_reg_ptr(true, rn);
b75263d6 7549 tmp5 = tcg_const_i32(n);
e7c06c4e 7550 gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
7d1b0095 7551 tcg_temp_free_i32(tmp);
9ee6e8bb 7552 if (insn & (1 << 6)) {
8f8e3aa4 7553 tmp = neon_load_reg(rd, 1);
9ee6e8bb 7554 } else {
7d1b0095 7555 tmp = tcg_temp_new_i32();
8f8e3aa4 7556 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7557 }
8f8e3aa4 7558 tmp3 = neon_load_reg(rm, 1);
e7c06c4e 7559 gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
25aeb69b 7560 tcg_temp_free_i32(tmp5);
e7c06c4e 7561 tcg_temp_free_ptr(ptr1);
8f8e3aa4 7562 neon_store_reg(rd, 0, tmp2);
3018f259 7563 neon_store_reg(rd, 1, tmp3);
7d1b0095 7564 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7565 } else if ((insn & 0x380) == 0) {
7566 /* VDUP */
133da6aa
JR
7567 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7568 return 1;
7569 }
9ee6e8bb 7570 if (insn & (1 << 19)) {
dd8fbd78 7571 tmp = neon_load_reg(rm, 1);
9ee6e8bb 7572 } else {
dd8fbd78 7573 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
7574 }
7575 if (insn & (1 << 16)) {
dd8fbd78 7576 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
7577 } else if (insn & (1 << 17)) {
7578 if ((insn >> 18) & 1)
dd8fbd78 7579 gen_neon_dup_high16(tmp);
9ee6e8bb 7580 else
dd8fbd78 7581 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
7582 }
7583 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 7584 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
7585 tcg_gen_mov_i32(tmp2, tmp);
7586 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 7587 }
7d1b0095 7588 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7589 } else {
7590 return 1;
7591 }
7592 }
7593 }
7594 return 0;
7595}
7596
7dcc1f89 7597static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7598{
4b6a83fb
PM
7599 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7600 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7601
7602 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7603
7604 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7605 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7606 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7607 return 1;
7608 }
d614a513 7609 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7610 return disas_iwmmxt_insn(s, insn);
d614a513 7611 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7612 return disas_dsp_insn(s, insn);
c0f4af17
PM
7613 }
7614 return 1;
4b6a83fb
PM
7615 }
7616
7617 /* Otherwise treat as a generic register access */
7618 is64 = (insn & (1 << 25)) == 0;
7619 if (!is64 && ((insn & (1 << 4)) == 0)) {
7620 /* cdp */
7621 return 1;
7622 }
7623
7624 crm = insn & 0xf;
7625 if (is64) {
7626 crn = 0;
7627 opc1 = (insn >> 4) & 0xf;
7628 opc2 = 0;
7629 rt2 = (insn >> 16) & 0xf;
7630 } else {
7631 crn = (insn >> 16) & 0xf;
7632 opc1 = (insn >> 21) & 7;
7633 opc2 = (insn >> 5) & 7;
7634 rt2 = 0;
7635 }
7636 isread = (insn >> 20) & 1;
7637 rt = (insn >> 12) & 0xf;
7638
60322b39 7639 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7640 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7641 if (ri) {
7642 /* Check access permissions */
dcbff19b 7643 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7644 return 1;
7645 }
7646
c0f4af17 7647 if (ri->accessfn ||
d614a513 7648 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7649 /* Emit code to perform further access permissions checks at
7650 * runtime; this may result in an exception.
c0f4af17
PM
7651 * Note that on XScale all cp0..c13 registers do an access check
7652 * call in order to handle c15_cpar.
f59df3f2
PM
7653 */
7654 TCGv_ptr tmpptr;
3f208fd7 7655 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
7656 uint32_t syndrome;
7657
7658 /* Note that since we are an implementation which takes an
7659 * exception on a trapped conditional instruction only if the
7660 * instruction passes its condition code check, we can take
7661 * advantage of the clause in the ARM ARM that allows us to set
7662 * the COND field in the instruction to 0xE in all cases.
7663 * We could fish the actual condition out of the insn (ARM)
7664 * or the condexec bits (Thumb) but it isn't necessary.
7665 */
7666 switch (cpnum) {
7667 case 14:
7668 if (is64) {
7669 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7670 isread, false);
8bcbf37c
PM
7671 } else {
7672 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7673 rt, isread, false);
8bcbf37c
PM
7674 }
7675 break;
7676 case 15:
7677 if (is64) {
7678 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7679 isread, false);
8bcbf37c
PM
7680 } else {
7681 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7682 rt, isread, false);
8bcbf37c
PM
7683 }
7684 break;
7685 default:
7686 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7687 * so this can only happen if this is an ARMv7 or earlier CPU,
7688 * in which case the syndrome information won't actually be
7689 * guest visible.
7690 */
d614a513 7691 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
7692 syndrome = syn_uncategorized();
7693 break;
7694 }
7695
43bfa4a1 7696 gen_set_condexec(s);
3977ee5d 7697 gen_set_pc_im(s, s->pc - 4);
f59df3f2 7698 tmpptr = tcg_const_ptr(ri);
8bcbf37c 7699 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
7700 tcg_isread = tcg_const_i32(isread);
7701 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7702 tcg_isread);
f59df3f2 7703 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7704 tcg_temp_free_i32(tcg_syn);
3f208fd7 7705 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
7706 }
7707
4b6a83fb
PM
7708 /* Handle special cases first */
7709 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7710 case ARM_CP_NOP:
7711 return 0;
7712 case ARM_CP_WFI:
7713 if (isread) {
7714 return 1;
7715 }
eaed129d 7716 gen_set_pc_im(s, s->pc);
dcba3a8d 7717 s->base.is_jmp = DISAS_WFI;
2bee5105 7718 return 0;
4b6a83fb
PM
7719 default:
7720 break;
7721 }
7722
c5a49c63 7723 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7724 gen_io_start();
7725 }
7726
4b6a83fb
PM
7727 if (isread) {
7728 /* Read */
7729 if (is64) {
7730 TCGv_i64 tmp64;
7731 TCGv_i32 tmp;
7732 if (ri->type & ARM_CP_CONST) {
7733 tmp64 = tcg_const_i64(ri->resetvalue);
7734 } else if (ri->readfn) {
7735 TCGv_ptr tmpptr;
4b6a83fb
PM
7736 tmp64 = tcg_temp_new_i64();
7737 tmpptr = tcg_const_ptr(ri);
7738 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7739 tcg_temp_free_ptr(tmpptr);
7740 } else {
7741 tmp64 = tcg_temp_new_i64();
7742 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7743 }
7744 tmp = tcg_temp_new_i32();
ecc7b3aa 7745 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
7746 store_reg(s, rt, tmp);
7747 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 7748 tmp = tcg_temp_new_i32();
ecc7b3aa 7749 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 7750 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7751 store_reg(s, rt2, tmp);
7752 } else {
39d5492a 7753 TCGv_i32 tmp;
4b6a83fb
PM
7754 if (ri->type & ARM_CP_CONST) {
7755 tmp = tcg_const_i32(ri->resetvalue);
7756 } else if (ri->readfn) {
7757 TCGv_ptr tmpptr;
4b6a83fb
PM
7758 tmp = tcg_temp_new_i32();
7759 tmpptr = tcg_const_ptr(ri);
7760 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7761 tcg_temp_free_ptr(tmpptr);
7762 } else {
7763 tmp = load_cpu_offset(ri->fieldoffset);
7764 }
7765 if (rt == 15) {
7766 /* Destination register of r15 for 32 bit loads sets
7767 * the condition codes from the high 4 bits of the value
7768 */
7769 gen_set_nzcv(tmp);
7770 tcg_temp_free_i32(tmp);
7771 } else {
7772 store_reg(s, rt, tmp);
7773 }
7774 }
7775 } else {
7776 /* Write */
7777 if (ri->type & ARM_CP_CONST) {
7778 /* If not forbidden by access permissions, treat as WI */
7779 return 0;
7780 }
7781
7782 if (is64) {
39d5492a 7783 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7784 TCGv_i64 tmp64 = tcg_temp_new_i64();
7785 tmplo = load_reg(s, rt);
7786 tmphi = load_reg(s, rt2);
7787 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7788 tcg_temp_free_i32(tmplo);
7789 tcg_temp_free_i32(tmphi);
7790 if (ri->writefn) {
7791 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7792 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7793 tcg_temp_free_ptr(tmpptr);
7794 } else {
7795 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7796 }
7797 tcg_temp_free_i64(tmp64);
7798 } else {
7799 if (ri->writefn) {
39d5492a 7800 TCGv_i32 tmp;
4b6a83fb 7801 TCGv_ptr tmpptr;
4b6a83fb
PM
7802 tmp = load_reg(s, rt);
7803 tmpptr = tcg_const_ptr(ri);
7804 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7805 tcg_temp_free_ptr(tmpptr);
7806 tcg_temp_free_i32(tmp);
7807 } else {
39d5492a 7808 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7809 store_cpu_offset(tmp, ri->fieldoffset);
7810 }
7811 }
2452731c
PM
7812 }
7813
c5a49c63 7814 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7815 /* I/O operations must end the TB here (whether read or write) */
7816 gen_io_end();
7817 gen_lookup_tb(s);
7818 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7819 /* We default to ending the TB on a coprocessor register write,
7820 * but allow this to be suppressed by the register definition
7821 * (usually only necessary to work around guest bugs).
7822 */
2452731c 7823 gen_lookup_tb(s);
4b6a83fb 7824 }
2452731c 7825
4b6a83fb
PM
7826 return 0;
7827 }
7828
626187d8
PM
7829 /* Unknown register; this might be a guest error or a QEMU
7830 * unimplemented feature.
7831 */
7832 if (is64) {
7833 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7834 "64 bit system register cp:%d opc1: %d crm:%d "
7835 "(%s)\n",
7836 isread ? "read" : "write", cpnum, opc1, crm,
7837 s->ns ? "non-secure" : "secure");
626187d8
PM
7838 } else {
7839 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7840 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7841 "(%s)\n",
7842 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7843 s->ns ? "non-secure" : "secure");
626187d8
PM
7844 }
7845
4a9a539f 7846 return 1;
9ee6e8bb
PB
7847}
7848
5e3f878a
PB
7849
7850/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7851static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7852{
39d5492a 7853 TCGv_i32 tmp;
7d1b0095 7854 tmp = tcg_temp_new_i32();
ecc7b3aa 7855 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 7856 store_reg(s, rlow, tmp);
7d1b0095 7857 tmp = tcg_temp_new_i32();
5e3f878a 7858 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 7859 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
7860 store_reg(s, rhigh, tmp);
7861}
7862
7863/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7864static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7865{
a7812ae4 7866 TCGv_i64 tmp;
39d5492a 7867 TCGv_i32 tmp2;
5e3f878a 7868
36aa55dc 7869 /* Load value and extend to 64 bits. */
a7812ae4 7870 tmp = tcg_temp_new_i64();
5e3f878a
PB
7871 tmp2 = load_reg(s, rlow);
7872 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7873 tcg_temp_free_i32(tmp2);
5e3f878a 7874 tcg_gen_add_i64(val, val, tmp);
b75263d6 7875 tcg_temp_free_i64(tmp);
5e3f878a
PB
7876}
7877
7878/* load and add a 64-bit value from a register pair. */
a7812ae4 7879static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7880{
a7812ae4 7881 TCGv_i64 tmp;
39d5492a
PM
7882 TCGv_i32 tmpl;
7883 TCGv_i32 tmph;
5e3f878a
PB
7884
7885 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7886 tmpl = load_reg(s, rlow);
7887 tmph = load_reg(s, rhigh);
a7812ae4 7888 tmp = tcg_temp_new_i64();
36aa55dc 7889 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7890 tcg_temp_free_i32(tmpl);
7891 tcg_temp_free_i32(tmph);
5e3f878a 7892 tcg_gen_add_i64(val, val, tmp);
b75263d6 7893 tcg_temp_free_i64(tmp);
5e3f878a
PB
7894}
7895
c9f10124 7896/* Set N and Z flags from hi|lo. */
39d5492a 7897static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7898{
c9f10124
RH
7899 tcg_gen_mov_i32(cpu_NF, hi);
7900 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7901}
7902
426f5abc
PB
7903/* Load/Store exclusive instructions are implemented by remembering
7904 the value/address loaded, and seeing if these are the same
354161b3 7905 when the store is performed. This should be sufficient to implement
426f5abc 7906 the architecturally mandated semantics, and avoids having to monitor
354161b3
EC
7907 regular stores. The compare vs the remembered value is done during
7908 the cmpxchg operation, but we must compare the addresses manually. */
426f5abc 7909static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7910 TCGv_i32 addr, int size)
426f5abc 7911{
94ee24e7 7912 TCGv_i32 tmp = tcg_temp_new_i32();
354161b3 7913 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc 7914
50225ad0
PM
7915 s->is_ldex = true;
7916
426f5abc 7917 if (size == 3) {
39d5492a 7918 TCGv_i32 tmp2 = tcg_temp_new_i32();
354161b3 7919 TCGv_i64 t64 = tcg_temp_new_i64();
03d05e2d 7920
3448d47b
PM
7921 /* For AArch32, architecturally the 32-bit word at the lowest
7922 * address is always Rt and the one at addr+4 is Rt2, even if
7923 * the CPU is big-endian. That means we don't want to do a
7924 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
7925 * for an architecturally 64-bit access, but instead do a
7926 * 64-bit access using MO_BE if appropriate and then split
7927 * the two halves.
7928 * This only makes a difference for BE32 user-mode, where
7929 * frob64() must not flip the two halves of the 64-bit data
7930 * but this code must treat BE32 user-mode like BE32 system.
7931 */
7932 TCGv taddr = gen_aa32_addr(s, addr, opc);
7933
7934 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
7935 tcg_temp_free(taddr);
354161b3 7936 tcg_gen_mov_i64(cpu_exclusive_val, t64);
3448d47b
PM
7937 if (s->be_data == MO_BE) {
7938 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
7939 } else {
7940 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
7941 }
354161b3
EC
7942 tcg_temp_free_i64(t64);
7943
7944 store_reg(s, rt2, tmp2);
03d05e2d 7945 } else {
354161b3 7946 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
03d05e2d 7947 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7948 }
03d05e2d
PM
7949
7950 store_reg(s, rt, tmp);
7951 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7952}
7953
7954static void gen_clrex(DisasContext *s)
7955{
03d05e2d 7956 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7957}
7958
426f5abc 7959static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7960 TCGv_i32 addr, int size)
426f5abc 7961{
354161b3
EC
7962 TCGv_i32 t0, t1, t2;
7963 TCGv_i64 extaddr;
7964 TCGv taddr;
42a268c2
RH
7965 TCGLabel *done_label;
7966 TCGLabel *fail_label;
354161b3 7967 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc
PB
7968
7969 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7970 [addr] = {Rt};
7971 {Rd} = 0;
7972 } else {
7973 {Rd} = 1;
7974 } */
7975 fail_label = gen_new_label();
7976 done_label = gen_new_label();
03d05e2d
PM
7977 extaddr = tcg_temp_new_i64();
7978 tcg_gen_extu_i32_i64(extaddr, addr);
7979 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7980 tcg_temp_free_i64(extaddr);
7981
354161b3
EC
7982 taddr = gen_aa32_addr(s, addr, opc);
7983 t0 = tcg_temp_new_i32();
7984 t1 = load_reg(s, rt);
426f5abc 7985 if (size == 3) {
354161b3
EC
7986 TCGv_i64 o64 = tcg_temp_new_i64();
7987 TCGv_i64 n64 = tcg_temp_new_i64();
03d05e2d 7988
354161b3 7989 t2 = load_reg(s, rt2);
3448d47b
PM
7990 /* For AArch32, architecturally the 32-bit word at the lowest
7991 * address is always Rt and the one at addr+4 is Rt2, even if
7992 * the CPU is big-endian. Since we're going to treat this as a
7993 * single 64-bit BE store, we need to put the two halves in the
7994 * opposite order for BE to LE, so that they end up in the right
7995 * places.
7996 * We don't want gen_aa32_frob64() because that does the wrong
7997 * thing for BE32 usermode.
7998 */
7999 if (s->be_data == MO_BE) {
8000 tcg_gen_concat_i32_i64(n64, t2, t1);
8001 } else {
8002 tcg_gen_concat_i32_i64(n64, t1, t2);
8003 }
354161b3 8004 tcg_temp_free_i32(t2);
03d05e2d 8005
354161b3
EC
8006 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
8007 get_mem_index(s), opc);
8008 tcg_temp_free_i64(n64);
8009
354161b3
EC
8010 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
8011 tcg_gen_extrl_i64_i32(t0, o64);
8012
8013 tcg_temp_free_i64(o64);
8014 } else {
8015 t2 = tcg_temp_new_i32();
8016 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
8017 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
8018 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
8019 tcg_temp_free_i32(t2);
426f5abc 8020 }
354161b3
EC
8021 tcg_temp_free_i32(t1);
8022 tcg_temp_free(taddr);
8023 tcg_gen_mov_i32(cpu_R[rd], t0);
8024 tcg_temp_free_i32(t0);
426f5abc 8025 tcg_gen_br(done_label);
354161b3 8026
426f5abc
PB
8027 gen_set_label(fail_label);
8028 tcg_gen_movi_i32(cpu_R[rd], 1);
8029 gen_set_label(done_label);
03d05e2d 8030 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc 8031}
426f5abc 8032
81465888
PM
8033/* gen_srs:
8034 * @env: CPUARMState
8035 * @s: DisasContext
8036 * @mode: mode field from insn (which stack to store to)
8037 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
8038 * @writeback: true if writeback bit set
8039 *
8040 * Generate code for the SRS (Store Return State) insn.
8041 */
8042static void gen_srs(DisasContext *s,
8043 uint32_t mode, uint32_t amode, bool writeback)
8044{
8045 int32_t offset;
cbc0326b
PM
8046 TCGv_i32 addr, tmp;
8047 bool undef = false;
8048
8049 /* SRS is:
8050 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 8051 * and specified mode is monitor mode
cbc0326b
PM
8052 * - UNDEFINED in Hyp mode
8053 * - UNPREDICTABLE in User or System mode
8054 * - UNPREDICTABLE if the specified mode is:
8055 * -- not implemented
8056 * -- not a valid mode number
8057 * -- a mode that's at a higher exception level
8058 * -- Monitor, if we are Non-secure
f01377f5 8059 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 8060 */
ba63cf47 8061 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
8062 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
8063 return;
8064 }
8065
8066 if (s->current_el == 0 || s->current_el == 2) {
8067 undef = true;
8068 }
8069
8070 switch (mode) {
8071 case ARM_CPU_MODE_USR:
8072 case ARM_CPU_MODE_FIQ:
8073 case ARM_CPU_MODE_IRQ:
8074 case ARM_CPU_MODE_SVC:
8075 case ARM_CPU_MODE_ABT:
8076 case ARM_CPU_MODE_UND:
8077 case ARM_CPU_MODE_SYS:
8078 break;
8079 case ARM_CPU_MODE_HYP:
8080 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
8081 undef = true;
8082 }
8083 break;
8084 case ARM_CPU_MODE_MON:
8085 /* No need to check specifically for "are we non-secure" because
8086 * we've already made EL0 UNDEF and handled the trap for S-EL1;
8087 * so if this isn't EL3 then we must be non-secure.
8088 */
8089 if (s->current_el != 3) {
8090 undef = true;
8091 }
8092 break;
8093 default:
8094 undef = true;
8095 }
8096
8097 if (undef) {
8098 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
8099 default_exception_el(s));
8100 return;
8101 }
8102
8103 addr = tcg_temp_new_i32();
8104 tmp = tcg_const_i32(mode);
f01377f5
PM
8105 /* get_r13_banked() will raise an exception if called from System mode */
8106 gen_set_condexec(s);
8107 gen_set_pc_im(s, s->pc - 4);
81465888
PM
8108 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8109 tcg_temp_free_i32(tmp);
8110 switch (amode) {
8111 case 0: /* DA */
8112 offset = -4;
8113 break;
8114 case 1: /* IA */
8115 offset = 0;
8116 break;
8117 case 2: /* DB */
8118 offset = -8;
8119 break;
8120 case 3: /* IB */
8121 offset = 4;
8122 break;
8123 default:
8124 abort();
8125 }
8126 tcg_gen_addi_i32(addr, addr, offset);
8127 tmp = load_reg(s, 14);
12dcc321 8128 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8129 tcg_temp_free_i32(tmp);
81465888
PM
8130 tmp = load_cpu_field(spsr);
8131 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 8132 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8133 tcg_temp_free_i32(tmp);
81465888
PM
8134 if (writeback) {
8135 switch (amode) {
8136 case 0:
8137 offset = -8;
8138 break;
8139 case 1:
8140 offset = 4;
8141 break;
8142 case 2:
8143 offset = -4;
8144 break;
8145 case 3:
8146 offset = 0;
8147 break;
8148 default:
8149 abort();
8150 }
8151 tcg_gen_addi_i32(addr, addr, offset);
8152 tmp = tcg_const_i32(mode);
8153 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8154 tcg_temp_free_i32(tmp);
8155 }
8156 tcg_temp_free_i32(addr);
dcba3a8d 8157 s->base.is_jmp = DISAS_UPDATE;
81465888
PM
8158}
8159
f4df2210 8160static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 8161{
f4df2210 8162 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
8163 TCGv_i32 tmp;
8164 TCGv_i32 tmp2;
8165 TCGv_i32 tmp3;
8166 TCGv_i32 addr;
a7812ae4 8167 TCGv_i64 tmp64;
9ee6e8bb 8168
e13886e3
PM
8169 /* M variants do not implement ARM mode; this must raise the INVSTATE
8170 * UsageFault exception.
8171 */
b53d8923 8172 if (arm_dc_feature(s, ARM_FEATURE_M)) {
e13886e3
PM
8173 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
8174 default_exception_el(s));
8175 return;
b53d8923 8176 }
9ee6e8bb
PB
8177 cond = insn >> 28;
8178 if (cond == 0xf){
be5e7a76
DES
8179 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8180 * choose to UNDEF. In ARMv5 and above the space is used
8181 * for miscellaneous unconditional instructions.
8182 */
8183 ARCH(5);
8184
9ee6e8bb
PB
8185 /* Unconditional instructions. */
8186 if (((insn >> 25) & 7) == 1) {
8187 /* NEON Data processing. */
d614a513 8188 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8189 goto illegal_op;
d614a513 8190 }
9ee6e8bb 8191
7dcc1f89 8192 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 8193 goto illegal_op;
7dcc1f89 8194 }
9ee6e8bb
PB
8195 return;
8196 }
8197 if ((insn & 0x0f100000) == 0x04000000) {
8198 /* NEON load/store. */
d614a513 8199 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8200 goto illegal_op;
d614a513 8201 }
9ee6e8bb 8202
7dcc1f89 8203 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 8204 goto illegal_op;
7dcc1f89 8205 }
9ee6e8bb
PB
8206 return;
8207 }
6a57f3eb
WN
8208 if ((insn & 0x0f000e10) == 0x0e000a00) {
8209 /* VFP. */
7dcc1f89 8210 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
8211 goto illegal_op;
8212 }
8213 return;
8214 }
3d185e5d
PM
8215 if (((insn & 0x0f30f000) == 0x0510f000) ||
8216 ((insn & 0x0f30f010) == 0x0710f000)) {
8217 if ((insn & (1 << 22)) == 0) {
8218 /* PLDW; v7MP */
d614a513 8219 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8220 goto illegal_op;
8221 }
8222 }
8223 /* Otherwise PLD; v5TE+ */
be5e7a76 8224 ARCH(5TE);
3d185e5d
PM
8225 return;
8226 }
8227 if (((insn & 0x0f70f000) == 0x0450f000) ||
8228 ((insn & 0x0f70f010) == 0x0650f000)) {
8229 ARCH(7);
8230 return; /* PLI; V7 */
8231 }
8232 if (((insn & 0x0f700000) == 0x04100000) ||
8233 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 8234 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8235 goto illegal_op;
8236 }
8237 return; /* v7MP: Unallocated memory hint: must NOP */
8238 }
8239
8240 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
8241 ARCH(6);
8242 /* setend */
9886ecdf
PB
8243 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8244 gen_helper_setend(cpu_env);
dcba3a8d 8245 s->base.is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
8246 }
8247 return;
8248 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8249 switch ((insn >> 4) & 0xf) {
8250 case 1: /* clrex */
8251 ARCH(6K);
426f5abc 8252 gen_clrex(s);
9ee6e8bb
PB
8253 return;
8254 case 4: /* dsb */
8255 case 5: /* dmb */
9ee6e8bb 8256 ARCH(7);
61e4c432 8257 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 8258 return;
6df99dec
SS
8259 case 6: /* isb */
8260 /* We need to break the TB after this insn to execute
8261 * self-modifying code correctly and also to take
8262 * any pending interrupts immediately.
8263 */
0b609cc1 8264 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 8265 return;
9ee6e8bb
PB
8266 default:
8267 goto illegal_op;
8268 }
8269 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8270 /* srs */
81465888
PM
8271 ARCH(6);
8272 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 8273 return;
ea825eee 8274 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 8275 /* rfe */
c67b6b71 8276 int32_t offset;
9ee6e8bb
PB
8277 if (IS_USER(s))
8278 goto illegal_op;
8279 ARCH(6);
8280 rn = (insn >> 16) & 0xf;
b0109805 8281 addr = load_reg(s, rn);
9ee6e8bb
PB
8282 i = (insn >> 23) & 3;
8283 switch (i) {
b0109805 8284 case 0: offset = -4; break; /* DA */
c67b6b71
FN
8285 case 1: offset = 0; break; /* IA */
8286 case 2: offset = -8; break; /* DB */
b0109805 8287 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
8288 default: abort();
8289 }
8290 if (offset)
b0109805
PB
8291 tcg_gen_addi_i32(addr, addr, offset);
8292 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 8293 tmp = tcg_temp_new_i32();
12dcc321 8294 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 8295 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8296 tmp2 = tcg_temp_new_i32();
12dcc321 8297 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
8298 if (insn & (1 << 21)) {
8299 /* Base writeback. */
8300 switch (i) {
b0109805 8301 case 0: offset = -8; break;
c67b6b71
FN
8302 case 1: offset = 4; break;
8303 case 2: offset = -4; break;
b0109805 8304 case 3: offset = 0; break;
9ee6e8bb
PB
8305 default: abort();
8306 }
8307 if (offset)
b0109805
PB
8308 tcg_gen_addi_i32(addr, addr, offset);
8309 store_reg(s, rn, addr);
8310 } else {
7d1b0095 8311 tcg_temp_free_i32(addr);
9ee6e8bb 8312 }
b0109805 8313 gen_rfe(s, tmp, tmp2);
c67b6b71 8314 return;
9ee6e8bb
PB
8315 } else if ((insn & 0x0e000000) == 0x0a000000) {
8316 /* branch link and change to thumb (blx <offset>) */
8317 int32_t offset;
8318
8319 val = (uint32_t)s->pc;
7d1b0095 8320 tmp = tcg_temp_new_i32();
d9ba4830
PB
8321 tcg_gen_movi_i32(tmp, val);
8322 store_reg(s, 14, tmp);
9ee6e8bb
PB
8323 /* Sign-extend the 24-bit offset */
8324 offset = (((int32_t)insn) << 8) >> 8;
8325 /* offset * 4 + bit24 * 2 + (thumb bit) */
8326 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8327 /* pipeline offset */
8328 val += 4;
be5e7a76 8329 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 8330 gen_bx_im(s, val);
9ee6e8bb
PB
8331 return;
8332 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 8333 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 8334 /* iWMMXt register transfer. */
c0f4af17 8335 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 8336 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 8337 return;
c0f4af17
PM
8338 }
8339 }
9ee6e8bb
PB
8340 }
8341 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8342 /* Coprocessor double register transfer. */
be5e7a76 8343 ARCH(5TE);
9ee6e8bb
PB
8344 } else if ((insn & 0x0f000010) == 0x0e000010) {
8345 /* Additional coprocessor register transfer. */
7997d92f 8346 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
8347 uint32_t mask;
8348 uint32_t val;
8349 /* cps (privileged) */
8350 if (IS_USER(s))
8351 return;
8352 mask = val = 0;
8353 if (insn & (1 << 19)) {
8354 if (insn & (1 << 8))
8355 mask |= CPSR_A;
8356 if (insn & (1 << 7))
8357 mask |= CPSR_I;
8358 if (insn & (1 << 6))
8359 mask |= CPSR_F;
8360 if (insn & (1 << 18))
8361 val |= mask;
8362 }
7997d92f 8363 if (insn & (1 << 17)) {
9ee6e8bb
PB
8364 mask |= CPSR_M;
8365 val |= (insn & 0x1f);
8366 }
8367 if (mask) {
2fbac54b 8368 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
8369 }
8370 return;
8371 }
8372 goto illegal_op;
8373 }
8374 if (cond != 0xe) {
8375 /* if not always execute, we generate a conditional jump to
8376 next instruction */
8377 s->condlabel = gen_new_label();
39fb730a 8378 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8379 s->condjmp = 1;
8380 }
8381 if ((insn & 0x0f900000) == 0x03000000) {
8382 if ((insn & (1 << 21)) == 0) {
8383 ARCH(6T2);
8384 rd = (insn >> 12) & 0xf;
8385 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8386 if ((insn & (1 << 22)) == 0) {
8387 /* MOVW */
7d1b0095 8388 tmp = tcg_temp_new_i32();
5e3f878a 8389 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
8390 } else {
8391 /* MOVT */
5e3f878a 8392 tmp = load_reg(s, rd);
86831435 8393 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8394 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 8395 }
5e3f878a 8396 store_reg(s, rd, tmp);
9ee6e8bb
PB
8397 } else {
8398 if (((insn >> 12) & 0xf) != 0xf)
8399 goto illegal_op;
8400 if (((insn >> 16) & 0xf) == 0) {
8401 gen_nop_hint(s, insn & 0xff);
8402 } else {
8403 /* CPSR = immediate */
8404 val = insn & 0xff;
8405 shift = ((insn >> 8) & 0xf) * 2;
8406 if (shift)
8407 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 8408 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
8409 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8410 i, val)) {
9ee6e8bb 8411 goto illegal_op;
7dcc1f89 8412 }
9ee6e8bb
PB
8413 }
8414 }
8415 } else if ((insn & 0x0f900000) == 0x01000000
8416 && (insn & 0x00000090) != 0x00000090) {
8417 /* miscellaneous instructions */
8418 op1 = (insn >> 21) & 3;
8419 sh = (insn >> 4) & 0xf;
8420 rm = insn & 0xf;
8421 switch (sh) {
8bfd0550
PM
8422 case 0x0: /* MSR, MRS */
8423 if (insn & (1 << 9)) {
8424 /* MSR (banked) and MRS (banked) */
8425 int sysm = extract32(insn, 16, 4) |
8426 (extract32(insn, 8, 1) << 4);
8427 int r = extract32(insn, 22, 1);
8428
8429 if (op1 & 1) {
8430 /* MSR (banked) */
8431 gen_msr_banked(s, r, sysm, rm);
8432 } else {
8433 /* MRS (banked) */
8434 int rd = extract32(insn, 12, 4);
8435
8436 gen_mrs_banked(s, r, sysm, rd);
8437 }
8438 break;
8439 }
8440
8441 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
8442 if (op1 & 1) {
8443 /* PSR = reg */
2fbac54b 8444 tmp = load_reg(s, rm);
9ee6e8bb 8445 i = ((op1 & 2) != 0);
7dcc1f89 8446 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
8447 goto illegal_op;
8448 } else {
8449 /* reg = PSR */
8450 rd = (insn >> 12) & 0xf;
8451 if (op1 & 2) {
8452 if (IS_USER(s))
8453 goto illegal_op;
d9ba4830 8454 tmp = load_cpu_field(spsr);
9ee6e8bb 8455 } else {
7d1b0095 8456 tmp = tcg_temp_new_i32();
9ef39277 8457 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8458 }
d9ba4830 8459 store_reg(s, rd, tmp);
9ee6e8bb
PB
8460 }
8461 break;
8462 case 0x1:
8463 if (op1 == 1) {
8464 /* branch/exchange thumb (bx). */
be5e7a76 8465 ARCH(4T);
d9ba4830
PB
8466 tmp = load_reg(s, rm);
8467 gen_bx(s, tmp);
9ee6e8bb
PB
8468 } else if (op1 == 3) {
8469 /* clz */
be5e7a76 8470 ARCH(5);
9ee6e8bb 8471 rd = (insn >> 12) & 0xf;
1497c961 8472 tmp = load_reg(s, rm);
7539a012 8473 tcg_gen_clzi_i32(tmp, tmp, 32);
1497c961 8474 store_reg(s, rd, tmp);
9ee6e8bb
PB
8475 } else {
8476 goto illegal_op;
8477 }
8478 break;
8479 case 0x2:
8480 if (op1 == 1) {
8481 ARCH(5J); /* bxj */
8482 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8483 tmp = load_reg(s, rm);
8484 gen_bx(s, tmp);
9ee6e8bb
PB
8485 } else {
8486 goto illegal_op;
8487 }
8488 break;
8489 case 0x3:
8490 if (op1 != 1)
8491 goto illegal_op;
8492
be5e7a76 8493 ARCH(5);
9ee6e8bb 8494 /* branch link/exchange thumb (blx) */
d9ba4830 8495 tmp = load_reg(s, rm);
7d1b0095 8496 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
8497 tcg_gen_movi_i32(tmp2, s->pc);
8498 store_reg(s, 14, tmp2);
8499 gen_bx(s, tmp);
9ee6e8bb 8500 break;
eb0ecd5a
WN
8501 case 0x4:
8502 {
8503 /* crc32/crc32c */
8504 uint32_t c = extract32(insn, 8, 4);
8505
8506 /* Check this CPU supports ARMv8 CRC instructions.
8507 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8508 * Bits 8, 10 and 11 should be zero.
8509 */
d614a513 8510 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
eb0ecd5a
WN
8511 (c & 0xd) != 0) {
8512 goto illegal_op;
8513 }
8514
8515 rn = extract32(insn, 16, 4);
8516 rd = extract32(insn, 12, 4);
8517
8518 tmp = load_reg(s, rn);
8519 tmp2 = load_reg(s, rm);
aa633469
PM
8520 if (op1 == 0) {
8521 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8522 } else if (op1 == 1) {
8523 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8524 }
eb0ecd5a
WN
8525 tmp3 = tcg_const_i32(1 << op1);
8526 if (c & 0x2) {
8527 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8528 } else {
8529 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8530 }
8531 tcg_temp_free_i32(tmp2);
8532 tcg_temp_free_i32(tmp3);
8533 store_reg(s, rd, tmp);
8534 break;
8535 }
9ee6e8bb 8536 case 0x5: /* saturating add/subtract */
be5e7a76 8537 ARCH(5TE);
9ee6e8bb
PB
8538 rd = (insn >> 12) & 0xf;
8539 rn = (insn >> 16) & 0xf;
b40d0353 8540 tmp = load_reg(s, rm);
5e3f878a 8541 tmp2 = load_reg(s, rn);
9ee6e8bb 8542 if (op1 & 2)
9ef39277 8543 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 8544 if (op1 & 1)
9ef39277 8545 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8546 else
9ef39277 8547 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8548 tcg_temp_free_i32(tmp2);
5e3f878a 8549 store_reg(s, rd, tmp);
9ee6e8bb 8550 break;
49e14940 8551 case 7:
d4a2dc67
PM
8552 {
8553 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e 8554 switch (op1) {
19a6e31c
PM
8555 case 0:
8556 /* HLT */
8557 gen_hlt(s, imm16);
8558 break;
37e6456e
PM
8559 case 1:
8560 /* bkpt */
8561 ARCH(5);
8562 gen_exception_insn(s, 4, EXCP_BKPT,
73710361
GB
8563 syn_aa32_bkpt(imm16, false),
8564 default_exception_el(s));
37e6456e
PM
8565 break;
8566 case 2:
8567 /* Hypervisor call (v7) */
8568 ARCH(7);
8569 if (IS_USER(s)) {
8570 goto illegal_op;
8571 }
8572 gen_hvc(s, imm16);
8573 break;
8574 case 3:
8575 /* Secure monitor call (v6+) */
8576 ARCH(6K);
8577 if (IS_USER(s)) {
8578 goto illegal_op;
8579 }
8580 gen_smc(s);
8581 break;
8582 default:
19a6e31c 8583 g_assert_not_reached();
49e14940 8584 }
9ee6e8bb 8585 break;
d4a2dc67 8586 }
9ee6e8bb
PB
8587 case 0x8: /* signed multiply */
8588 case 0xa:
8589 case 0xc:
8590 case 0xe:
be5e7a76 8591 ARCH(5TE);
9ee6e8bb
PB
8592 rs = (insn >> 8) & 0xf;
8593 rn = (insn >> 12) & 0xf;
8594 rd = (insn >> 16) & 0xf;
8595 if (op1 == 1) {
8596 /* (32 * 16) >> 16 */
5e3f878a
PB
8597 tmp = load_reg(s, rm);
8598 tmp2 = load_reg(s, rs);
9ee6e8bb 8599 if (sh & 4)
5e3f878a 8600 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8601 else
5e3f878a 8602 gen_sxth(tmp2);
a7812ae4
PB
8603 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8604 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8605 tmp = tcg_temp_new_i32();
ecc7b3aa 8606 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 8607 tcg_temp_free_i64(tmp64);
9ee6e8bb 8608 if ((sh & 2) == 0) {
5e3f878a 8609 tmp2 = load_reg(s, rn);
9ef39277 8610 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8611 tcg_temp_free_i32(tmp2);
9ee6e8bb 8612 }
5e3f878a 8613 store_reg(s, rd, tmp);
9ee6e8bb
PB
8614 } else {
8615 /* 16 * 16 */
5e3f878a
PB
8616 tmp = load_reg(s, rm);
8617 tmp2 = load_reg(s, rs);
8618 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 8619 tcg_temp_free_i32(tmp2);
9ee6e8bb 8620 if (op1 == 2) {
a7812ae4
PB
8621 tmp64 = tcg_temp_new_i64();
8622 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8623 tcg_temp_free_i32(tmp);
a7812ae4
PB
8624 gen_addq(s, tmp64, rn, rd);
8625 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 8626 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8627 } else {
8628 if (op1 == 0) {
5e3f878a 8629 tmp2 = load_reg(s, rn);
9ef39277 8630 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8631 tcg_temp_free_i32(tmp2);
9ee6e8bb 8632 }
5e3f878a 8633 store_reg(s, rd, tmp);
9ee6e8bb
PB
8634 }
8635 }
8636 break;
8637 default:
8638 goto illegal_op;
8639 }
8640 } else if (((insn & 0x0e000000) == 0 &&
8641 (insn & 0x00000090) != 0x90) ||
8642 ((insn & 0x0e000000) == (1 << 25))) {
8643 int set_cc, logic_cc, shiftop;
8644
8645 op1 = (insn >> 21) & 0xf;
8646 set_cc = (insn >> 20) & 1;
8647 logic_cc = table_logic_cc[op1] & set_cc;
8648
8649 /* data processing instruction */
8650 if (insn & (1 << 25)) {
8651 /* immediate operand */
8652 val = insn & 0xff;
8653 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 8654 if (shift) {
9ee6e8bb 8655 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 8656 }
7d1b0095 8657 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
8658 tcg_gen_movi_i32(tmp2, val);
8659 if (logic_cc && shift) {
8660 gen_set_CF_bit31(tmp2);
8661 }
9ee6e8bb
PB
8662 } else {
8663 /* register */
8664 rm = (insn) & 0xf;
e9bb4aa9 8665 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8666 shiftop = (insn >> 5) & 3;
8667 if (!(insn & (1 << 4))) {
8668 shift = (insn >> 7) & 0x1f;
e9bb4aa9 8669 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
8670 } else {
8671 rs = (insn >> 8) & 0xf;
8984bd2e 8672 tmp = load_reg(s, rs);
e9bb4aa9 8673 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
8674 }
8675 }
8676 if (op1 != 0x0f && op1 != 0x0d) {
8677 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
8678 tmp = load_reg(s, rn);
8679 } else {
f764718d 8680 tmp = NULL;
9ee6e8bb
PB
8681 }
8682 rd = (insn >> 12) & 0xf;
8683 switch(op1) {
8684 case 0x00:
e9bb4aa9
JR
8685 tcg_gen_and_i32(tmp, tmp, tmp2);
8686 if (logic_cc) {
8687 gen_logic_CC(tmp);
8688 }
7dcc1f89 8689 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8690 break;
8691 case 0x01:
e9bb4aa9
JR
8692 tcg_gen_xor_i32(tmp, tmp, tmp2);
8693 if (logic_cc) {
8694 gen_logic_CC(tmp);
8695 }
7dcc1f89 8696 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8697 break;
8698 case 0x02:
8699 if (set_cc && rd == 15) {
8700 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 8701 if (IS_USER(s)) {
9ee6e8bb 8702 goto illegal_op;
e9bb4aa9 8703 }
72485ec4 8704 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 8705 gen_exception_return(s, tmp);
9ee6e8bb 8706 } else {
e9bb4aa9 8707 if (set_cc) {
72485ec4 8708 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8709 } else {
8710 tcg_gen_sub_i32(tmp, tmp, tmp2);
8711 }
7dcc1f89 8712 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8713 }
8714 break;
8715 case 0x03:
e9bb4aa9 8716 if (set_cc) {
72485ec4 8717 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8718 } else {
8719 tcg_gen_sub_i32(tmp, tmp2, tmp);
8720 }
7dcc1f89 8721 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8722 break;
8723 case 0x04:
e9bb4aa9 8724 if (set_cc) {
72485ec4 8725 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8726 } else {
8727 tcg_gen_add_i32(tmp, tmp, tmp2);
8728 }
7dcc1f89 8729 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8730 break;
8731 case 0x05:
e9bb4aa9 8732 if (set_cc) {
49b4c31e 8733 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8734 } else {
8735 gen_add_carry(tmp, tmp, tmp2);
8736 }
7dcc1f89 8737 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8738 break;
8739 case 0x06:
e9bb4aa9 8740 if (set_cc) {
2de68a49 8741 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8742 } else {
8743 gen_sub_carry(tmp, tmp, tmp2);
8744 }
7dcc1f89 8745 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8746 break;
8747 case 0x07:
e9bb4aa9 8748 if (set_cc) {
2de68a49 8749 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8750 } else {
8751 gen_sub_carry(tmp, tmp2, tmp);
8752 }
7dcc1f89 8753 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8754 break;
8755 case 0x08:
8756 if (set_cc) {
e9bb4aa9
JR
8757 tcg_gen_and_i32(tmp, tmp, tmp2);
8758 gen_logic_CC(tmp);
9ee6e8bb 8759 }
7d1b0095 8760 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8761 break;
8762 case 0x09:
8763 if (set_cc) {
e9bb4aa9
JR
8764 tcg_gen_xor_i32(tmp, tmp, tmp2);
8765 gen_logic_CC(tmp);
9ee6e8bb 8766 }
7d1b0095 8767 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8768 break;
8769 case 0x0a:
8770 if (set_cc) {
72485ec4 8771 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 8772 }
7d1b0095 8773 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8774 break;
8775 case 0x0b:
8776 if (set_cc) {
72485ec4 8777 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 8778 }
7d1b0095 8779 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8780 break;
8781 case 0x0c:
e9bb4aa9
JR
8782 tcg_gen_or_i32(tmp, tmp, tmp2);
8783 if (logic_cc) {
8784 gen_logic_CC(tmp);
8785 }
7dcc1f89 8786 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8787 break;
8788 case 0x0d:
8789 if (logic_cc && rd == 15) {
8790 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 8791 if (IS_USER(s)) {
9ee6e8bb 8792 goto illegal_op;
e9bb4aa9
JR
8793 }
8794 gen_exception_return(s, tmp2);
9ee6e8bb 8795 } else {
e9bb4aa9
JR
8796 if (logic_cc) {
8797 gen_logic_CC(tmp2);
8798 }
7dcc1f89 8799 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8800 }
8801 break;
8802 case 0x0e:
f669df27 8803 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
8804 if (logic_cc) {
8805 gen_logic_CC(tmp);
8806 }
7dcc1f89 8807 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8808 break;
8809 default:
8810 case 0x0f:
e9bb4aa9
JR
8811 tcg_gen_not_i32(tmp2, tmp2);
8812 if (logic_cc) {
8813 gen_logic_CC(tmp2);
8814 }
7dcc1f89 8815 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8816 break;
8817 }
e9bb4aa9 8818 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 8819 tcg_temp_free_i32(tmp2);
e9bb4aa9 8820 }
9ee6e8bb
PB
8821 } else {
8822 /* other instructions */
8823 op1 = (insn >> 24) & 0xf;
8824 switch(op1) {
8825 case 0x0:
8826 case 0x1:
8827 /* multiplies, extra load/stores */
8828 sh = (insn >> 5) & 3;
8829 if (sh == 0) {
8830 if (op1 == 0x0) {
8831 rd = (insn >> 16) & 0xf;
8832 rn = (insn >> 12) & 0xf;
8833 rs = (insn >> 8) & 0xf;
8834 rm = (insn) & 0xf;
8835 op1 = (insn >> 20) & 0xf;
8836 switch (op1) {
8837 case 0: case 1: case 2: case 3: case 6:
8838 /* 32 bit mul */
5e3f878a
PB
8839 tmp = load_reg(s, rs);
8840 tmp2 = load_reg(s, rm);
8841 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8842 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8843 if (insn & (1 << 22)) {
8844 /* Subtract (mls) */
8845 ARCH(6T2);
5e3f878a
PB
8846 tmp2 = load_reg(s, rn);
8847 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 8848 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8849 } else if (insn & (1 << 21)) {
8850 /* Add */
5e3f878a
PB
8851 tmp2 = load_reg(s, rn);
8852 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8853 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8854 }
8855 if (insn & (1 << 20))
5e3f878a
PB
8856 gen_logic_CC(tmp);
8857 store_reg(s, rd, tmp);
9ee6e8bb 8858 break;
8aac08b1
AJ
8859 case 4:
8860 /* 64 bit mul double accumulate (UMAAL) */
8861 ARCH(6);
8862 tmp = load_reg(s, rs);
8863 tmp2 = load_reg(s, rm);
8864 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8865 gen_addq_lo(s, tmp64, rn);
8866 gen_addq_lo(s, tmp64, rd);
8867 gen_storeq_reg(s, rn, rd, tmp64);
8868 tcg_temp_free_i64(tmp64);
8869 break;
8870 case 8: case 9: case 10: case 11:
8871 case 12: case 13: case 14: case 15:
8872 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
8873 tmp = load_reg(s, rs);
8874 tmp2 = load_reg(s, rm);
8aac08b1 8875 if (insn & (1 << 22)) {
c9f10124 8876 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 8877 } else {
c9f10124 8878 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
8879 }
8880 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
8881 TCGv_i32 al = load_reg(s, rn);
8882 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8883 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8884 tcg_temp_free_i32(al);
8885 tcg_temp_free_i32(ah);
9ee6e8bb 8886 }
8aac08b1 8887 if (insn & (1 << 20)) {
c9f10124 8888 gen_logicq_cc(tmp, tmp2);
8aac08b1 8889 }
c9f10124
RH
8890 store_reg(s, rn, tmp);
8891 store_reg(s, rd, tmp2);
9ee6e8bb 8892 break;
8aac08b1
AJ
8893 default:
8894 goto illegal_op;
9ee6e8bb
PB
8895 }
8896 } else {
8897 rn = (insn >> 16) & 0xf;
8898 rd = (insn >> 12) & 0xf;
8899 if (insn & (1 << 23)) {
8900 /* load/store exclusive */
2359bf80 8901 int op2 = (insn >> 8) & 3;
86753403 8902 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8903
8904 switch (op2) {
8905 case 0: /* lda/stl */
8906 if (op1 == 1) {
8907 goto illegal_op;
8908 }
8909 ARCH(8);
8910 break;
8911 case 1: /* reserved */
8912 goto illegal_op;
8913 case 2: /* ldaex/stlex */
8914 ARCH(8);
8915 break;
8916 case 3: /* ldrex/strex */
8917 if (op1) {
8918 ARCH(6K);
8919 } else {
8920 ARCH(6);
8921 }
8922 break;
8923 }
8924
3174f8e9 8925 addr = tcg_temp_local_new_i32();
98a46317 8926 load_reg_var(s, addr, rn);
2359bf80
MR
8927
8928 /* Since the emulation does not have barriers,
8929 the acquire/release semantics need no special
8930 handling */
8931 if (op2 == 0) {
8932 if (insn & (1 << 20)) {
8933 tmp = tcg_temp_new_i32();
8934 switch (op1) {
8935 case 0: /* lda */
9bb6558a
PM
8936 gen_aa32_ld32u_iss(s, tmp, addr,
8937 get_mem_index(s),
8938 rd | ISSIsAcqRel);
2359bf80
MR
8939 break;
8940 case 2: /* ldab */
9bb6558a
PM
8941 gen_aa32_ld8u_iss(s, tmp, addr,
8942 get_mem_index(s),
8943 rd | ISSIsAcqRel);
2359bf80
MR
8944 break;
8945 case 3: /* ldah */
9bb6558a
PM
8946 gen_aa32_ld16u_iss(s, tmp, addr,
8947 get_mem_index(s),
8948 rd | ISSIsAcqRel);
2359bf80
MR
8949 break;
8950 default:
8951 abort();
8952 }
8953 store_reg(s, rd, tmp);
8954 } else {
8955 rm = insn & 0xf;
8956 tmp = load_reg(s, rm);
8957 switch (op1) {
8958 case 0: /* stl */
9bb6558a
PM
8959 gen_aa32_st32_iss(s, tmp, addr,
8960 get_mem_index(s),
8961 rm | ISSIsAcqRel);
2359bf80
MR
8962 break;
8963 case 2: /* stlb */
9bb6558a
PM
8964 gen_aa32_st8_iss(s, tmp, addr,
8965 get_mem_index(s),
8966 rm | ISSIsAcqRel);
2359bf80
MR
8967 break;
8968 case 3: /* stlh */
9bb6558a
PM
8969 gen_aa32_st16_iss(s, tmp, addr,
8970 get_mem_index(s),
8971 rm | ISSIsAcqRel);
2359bf80
MR
8972 break;
8973 default:
8974 abort();
8975 }
8976 tcg_temp_free_i32(tmp);
8977 }
8978 } else if (insn & (1 << 20)) {
86753403
PB
8979 switch (op1) {
8980 case 0: /* ldrex */
426f5abc 8981 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8982 break;
8983 case 1: /* ldrexd */
426f5abc 8984 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8985 break;
8986 case 2: /* ldrexb */
426f5abc 8987 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8988 break;
8989 case 3: /* ldrexh */
426f5abc 8990 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8991 break;
8992 default:
8993 abort();
8994 }
9ee6e8bb
PB
8995 } else {
8996 rm = insn & 0xf;
86753403
PB
8997 switch (op1) {
8998 case 0: /* strex */
426f5abc 8999 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
9000 break;
9001 case 1: /* strexd */
502e64fe 9002 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
9003 break;
9004 case 2: /* strexb */
426f5abc 9005 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
9006 break;
9007 case 3: /* strexh */
426f5abc 9008 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
9009 break;
9010 default:
9011 abort();
9012 }
9ee6e8bb 9013 }
39d5492a 9014 tcg_temp_free_i32(addr);
9ee6e8bb 9015 } else {
cf12bce0
EC
9016 TCGv taddr;
9017 TCGMemOp opc = s->be_data;
9018
9ee6e8bb
PB
9019 /* SWP instruction */
9020 rm = (insn) & 0xf;
9021
9ee6e8bb 9022 if (insn & (1 << 22)) {
cf12bce0 9023 opc |= MO_UB;
9ee6e8bb 9024 } else {
cf12bce0 9025 opc |= MO_UL | MO_ALIGN;
9ee6e8bb 9026 }
cf12bce0
EC
9027
9028 addr = load_reg(s, rn);
9029 taddr = gen_aa32_addr(s, addr, opc);
7d1b0095 9030 tcg_temp_free_i32(addr);
cf12bce0
EC
9031
9032 tmp = load_reg(s, rm);
9033 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
9034 get_mem_index(s), opc);
9035 tcg_temp_free(taddr);
9036 store_reg(s, rd, tmp);
9ee6e8bb
PB
9037 }
9038 }
9039 } else {
9040 int address_offset;
3960c336 9041 bool load = insn & (1 << 20);
63f26fcf
PM
9042 bool wbit = insn & (1 << 21);
9043 bool pbit = insn & (1 << 24);
3960c336 9044 bool doubleword = false;
9bb6558a
PM
9045 ISSInfo issinfo;
9046
9ee6e8bb
PB
9047 /* Misc load/store */
9048 rn = (insn >> 16) & 0xf;
9049 rd = (insn >> 12) & 0xf;
3960c336 9050
9bb6558a
PM
9051 /* ISS not valid if writeback */
9052 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
9053
3960c336
PM
9054 if (!load && (sh & 2)) {
9055 /* doubleword */
9056 ARCH(5TE);
9057 if (rd & 1) {
9058 /* UNPREDICTABLE; we choose to UNDEF */
9059 goto illegal_op;
9060 }
9061 load = (sh & 1) == 0;
9062 doubleword = true;
9063 }
9064
b0109805 9065 addr = load_reg(s, rn);
63f26fcf 9066 if (pbit) {
b0109805 9067 gen_add_datah_offset(s, insn, 0, addr);
63f26fcf 9068 }
9ee6e8bb 9069 address_offset = 0;
3960c336
PM
9070
9071 if (doubleword) {
9072 if (!load) {
9ee6e8bb 9073 /* store */
b0109805 9074 tmp = load_reg(s, rd);
12dcc321 9075 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9076 tcg_temp_free_i32(tmp);
b0109805
PB
9077 tcg_gen_addi_i32(addr, addr, 4);
9078 tmp = load_reg(s, rd + 1);
12dcc321 9079 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9080 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9081 } else {
9082 /* load */
5a839c0d 9083 tmp = tcg_temp_new_i32();
12dcc321 9084 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9085 store_reg(s, rd, tmp);
9086 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 9087 tmp = tcg_temp_new_i32();
12dcc321 9088 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9089 rd++;
9ee6e8bb
PB
9090 }
9091 address_offset = -4;
3960c336
PM
9092 } else if (load) {
9093 /* load */
9094 tmp = tcg_temp_new_i32();
9095 switch (sh) {
9096 case 1:
9bb6558a
PM
9097 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9098 issinfo);
3960c336
PM
9099 break;
9100 case 2:
9bb6558a
PM
9101 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
9102 issinfo);
3960c336
PM
9103 break;
9104 default:
9105 case 3:
9bb6558a
PM
9106 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
9107 issinfo);
3960c336
PM
9108 break;
9109 }
9ee6e8bb
PB
9110 } else {
9111 /* store */
b0109805 9112 tmp = load_reg(s, rd);
9bb6558a 9113 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
5a839c0d 9114 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9115 }
9116 /* Perform base writeback before the loaded value to
9117 ensure correct behavior with overlapping index registers.
b6af0975 9118 ldrd with base writeback is undefined if the
9ee6e8bb 9119 destination and index registers overlap. */
63f26fcf 9120 if (!pbit) {
b0109805
PB
9121 gen_add_datah_offset(s, insn, address_offset, addr);
9122 store_reg(s, rn, addr);
63f26fcf 9123 } else if (wbit) {
9ee6e8bb 9124 if (address_offset)
b0109805
PB
9125 tcg_gen_addi_i32(addr, addr, address_offset);
9126 store_reg(s, rn, addr);
9127 } else {
7d1b0095 9128 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9129 }
9130 if (load) {
9131 /* Complete the load. */
b0109805 9132 store_reg(s, rd, tmp);
9ee6e8bb
PB
9133 }
9134 }
9135 break;
9136 case 0x4:
9137 case 0x5:
9138 goto do_ldst;
9139 case 0x6:
9140 case 0x7:
9141 if (insn & (1 << 4)) {
9142 ARCH(6);
9143 /* Armv6 Media instructions. */
9144 rm = insn & 0xf;
9145 rn = (insn >> 16) & 0xf;
2c0262af 9146 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
9147 rs = (insn >> 8) & 0xf;
9148 switch ((insn >> 23) & 3) {
9149 case 0: /* Parallel add/subtract. */
9150 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
9151 tmp = load_reg(s, rn);
9152 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9153 sh = (insn >> 5) & 7;
9154 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
9155 goto illegal_op;
6ddbc6e4 9156 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 9157 tcg_temp_free_i32(tmp2);
6ddbc6e4 9158 store_reg(s, rd, tmp);
9ee6e8bb
PB
9159 break;
9160 case 1:
9161 if ((insn & 0x00700020) == 0) {
6c95676b 9162 /* Halfword pack. */
3670669c
PB
9163 tmp = load_reg(s, rn);
9164 tmp2 = load_reg(s, rm);
9ee6e8bb 9165 shift = (insn >> 7) & 0x1f;
3670669c
PB
9166 if (insn & (1 << 6)) {
9167 /* pkhtb */
22478e79
AZ
9168 if (shift == 0)
9169 shift = 31;
9170 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 9171 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 9172 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
9173 } else {
9174 /* pkhbt */
22478e79
AZ
9175 if (shift)
9176 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 9177 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
9178 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9179 }
9180 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9181 tcg_temp_free_i32(tmp2);
3670669c 9182 store_reg(s, rd, tmp);
9ee6e8bb
PB
9183 } else if ((insn & 0x00200020) == 0x00200000) {
9184 /* [us]sat */
6ddbc6e4 9185 tmp = load_reg(s, rm);
9ee6e8bb
PB
9186 shift = (insn >> 7) & 0x1f;
9187 if (insn & (1 << 6)) {
9188 if (shift == 0)
9189 shift = 31;
6ddbc6e4 9190 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9191 } else {
6ddbc6e4 9192 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
9193 }
9194 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9195 tmp2 = tcg_const_i32(sh);
9196 if (insn & (1 << 22))
9ef39277 9197 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 9198 else
9ef39277 9199 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 9200 tcg_temp_free_i32(tmp2);
6ddbc6e4 9201 store_reg(s, rd, tmp);
9ee6e8bb
PB
9202 } else if ((insn & 0x00300fe0) == 0x00200f20) {
9203 /* [us]sat16 */
6ddbc6e4 9204 tmp = load_reg(s, rm);
9ee6e8bb 9205 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9206 tmp2 = tcg_const_i32(sh);
9207 if (insn & (1 << 22))
9ef39277 9208 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9209 else
9ef39277 9210 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9211 tcg_temp_free_i32(tmp2);
6ddbc6e4 9212 store_reg(s, rd, tmp);
9ee6e8bb
PB
9213 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
9214 /* Select bytes. */
6ddbc6e4
PB
9215 tmp = load_reg(s, rn);
9216 tmp2 = load_reg(s, rm);
7d1b0095 9217 tmp3 = tcg_temp_new_i32();
0ecb72a5 9218 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 9219 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9220 tcg_temp_free_i32(tmp3);
9221 tcg_temp_free_i32(tmp2);
6ddbc6e4 9222 store_reg(s, rd, tmp);
9ee6e8bb 9223 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 9224 tmp = load_reg(s, rm);
9ee6e8bb 9225 shift = (insn >> 10) & 3;
1301f322 9226 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9227 rotate, a shift is sufficient. */
9228 if (shift != 0)
f669df27 9229 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9230 op1 = (insn >> 20) & 7;
9231 switch (op1) {
5e3f878a
PB
9232 case 0: gen_sxtb16(tmp); break;
9233 case 2: gen_sxtb(tmp); break;
9234 case 3: gen_sxth(tmp); break;
9235 case 4: gen_uxtb16(tmp); break;
9236 case 6: gen_uxtb(tmp); break;
9237 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
9238 default: goto illegal_op;
9239 }
9240 if (rn != 15) {
5e3f878a 9241 tmp2 = load_reg(s, rn);
9ee6e8bb 9242 if ((op1 & 3) == 0) {
5e3f878a 9243 gen_add16(tmp, tmp2);
9ee6e8bb 9244 } else {
5e3f878a 9245 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9246 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9247 }
9248 }
6c95676b 9249 store_reg(s, rd, tmp);
9ee6e8bb
PB
9250 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9251 /* rev */
b0109805 9252 tmp = load_reg(s, rm);
9ee6e8bb
PB
9253 if (insn & (1 << 22)) {
9254 if (insn & (1 << 7)) {
b0109805 9255 gen_revsh(tmp);
9ee6e8bb
PB
9256 } else {
9257 ARCH(6T2);
b0109805 9258 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9259 }
9260 } else {
9261 if (insn & (1 << 7))
b0109805 9262 gen_rev16(tmp);
9ee6e8bb 9263 else
66896cb8 9264 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 9265 }
b0109805 9266 store_reg(s, rd, tmp);
9ee6e8bb
PB
9267 } else {
9268 goto illegal_op;
9269 }
9270 break;
9271 case 2: /* Multiplies (Type 3). */
41e9564d
PM
9272 switch ((insn >> 20) & 0x7) {
9273 case 5:
9274 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9275 /* op2 not 00x or 11x : UNDEF */
9276 goto illegal_op;
9277 }
838fa72d
AJ
9278 /* Signed multiply most significant [accumulate].
9279 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
9280 tmp = load_reg(s, rm);
9281 tmp2 = load_reg(s, rs);
a7812ae4 9282 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 9283
955a7dd5 9284 if (rd != 15) {
838fa72d 9285 tmp = load_reg(s, rd);
9ee6e8bb 9286 if (insn & (1 << 6)) {
838fa72d 9287 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 9288 } else {
838fa72d 9289 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
9290 }
9291 }
838fa72d
AJ
9292 if (insn & (1 << 5)) {
9293 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9294 }
9295 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9296 tmp = tcg_temp_new_i32();
ecc7b3aa 9297 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 9298 tcg_temp_free_i64(tmp64);
955a7dd5 9299 store_reg(s, rn, tmp);
41e9564d
PM
9300 break;
9301 case 0:
9302 case 4:
9303 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9304 if (insn & (1 << 7)) {
9305 goto illegal_op;
9306 }
9307 tmp = load_reg(s, rm);
9308 tmp2 = load_reg(s, rs);
9ee6e8bb 9309 if (insn & (1 << 5))
5e3f878a
PB
9310 gen_swap_half(tmp2);
9311 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9312 if (insn & (1 << 22)) {
5e3f878a 9313 /* smlald, smlsld */
33bbd75a
PC
9314 TCGv_i64 tmp64_2;
9315
a7812ae4 9316 tmp64 = tcg_temp_new_i64();
33bbd75a 9317 tmp64_2 = tcg_temp_new_i64();
a7812ae4 9318 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 9319 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 9320 tcg_temp_free_i32(tmp);
33bbd75a
PC
9321 tcg_temp_free_i32(tmp2);
9322 if (insn & (1 << 6)) {
9323 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9324 } else {
9325 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9326 }
9327 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
9328 gen_addq(s, tmp64, rd, rn);
9329 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 9330 tcg_temp_free_i64(tmp64);
9ee6e8bb 9331 } else {
5e3f878a 9332 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
9333 if (insn & (1 << 6)) {
9334 /* This subtraction cannot overflow. */
9335 tcg_gen_sub_i32(tmp, tmp, tmp2);
9336 } else {
9337 /* This addition cannot overflow 32 bits;
9338 * however it may overflow considered as a
9339 * signed operation, in which case we must set
9340 * the Q flag.
9341 */
9342 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9343 }
9344 tcg_temp_free_i32(tmp2);
22478e79 9345 if (rd != 15)
9ee6e8bb 9346 {
22478e79 9347 tmp2 = load_reg(s, rd);
9ef39277 9348 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9349 tcg_temp_free_i32(tmp2);
9ee6e8bb 9350 }
22478e79 9351 store_reg(s, rn, tmp);
9ee6e8bb 9352 }
41e9564d 9353 break;
b8b8ea05
PM
9354 case 1:
9355 case 3:
9356 /* SDIV, UDIV */
d614a513 9357 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
b8b8ea05
PM
9358 goto illegal_op;
9359 }
9360 if (((insn >> 5) & 7) || (rd != 15)) {
9361 goto illegal_op;
9362 }
9363 tmp = load_reg(s, rm);
9364 tmp2 = load_reg(s, rs);
9365 if (insn & (1 << 21)) {
9366 gen_helper_udiv(tmp, tmp, tmp2);
9367 } else {
9368 gen_helper_sdiv(tmp, tmp, tmp2);
9369 }
9370 tcg_temp_free_i32(tmp2);
9371 store_reg(s, rn, tmp);
9372 break;
41e9564d
PM
9373 default:
9374 goto illegal_op;
9ee6e8bb
PB
9375 }
9376 break;
9377 case 3:
9378 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9379 switch (op1) {
9380 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
9381 ARCH(6);
9382 tmp = load_reg(s, rm);
9383 tmp2 = load_reg(s, rs);
9384 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9385 tcg_temp_free_i32(tmp2);
ded9d295
AZ
9386 if (rd != 15) {
9387 tmp2 = load_reg(s, rd);
6ddbc6e4 9388 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9389 tcg_temp_free_i32(tmp2);
9ee6e8bb 9390 }
ded9d295 9391 store_reg(s, rn, tmp);
9ee6e8bb
PB
9392 break;
9393 case 0x20: case 0x24: case 0x28: case 0x2c:
9394 /* Bitfield insert/clear. */
9395 ARCH(6T2);
9396 shift = (insn >> 7) & 0x1f;
9397 i = (insn >> 16) & 0x1f;
45140a57
KB
9398 if (i < shift) {
9399 /* UNPREDICTABLE; we choose to UNDEF */
9400 goto illegal_op;
9401 }
9ee6e8bb
PB
9402 i = i + 1 - shift;
9403 if (rm == 15) {
7d1b0095 9404 tmp = tcg_temp_new_i32();
5e3f878a 9405 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 9406 } else {
5e3f878a 9407 tmp = load_reg(s, rm);
9ee6e8bb
PB
9408 }
9409 if (i != 32) {
5e3f878a 9410 tmp2 = load_reg(s, rd);
d593c48e 9411 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 9412 tcg_temp_free_i32(tmp2);
9ee6e8bb 9413 }
5e3f878a 9414 store_reg(s, rd, tmp);
9ee6e8bb
PB
9415 break;
9416 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9417 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 9418 ARCH(6T2);
5e3f878a 9419 tmp = load_reg(s, rm);
9ee6e8bb
PB
9420 shift = (insn >> 7) & 0x1f;
9421 i = ((insn >> 16) & 0x1f) + 1;
9422 if (shift + i > 32)
9423 goto illegal_op;
9424 if (i < 32) {
9425 if (op1 & 0x20) {
59a71b4c 9426 tcg_gen_extract_i32(tmp, tmp, shift, i);
9ee6e8bb 9427 } else {
59a71b4c 9428 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9ee6e8bb
PB
9429 }
9430 }
5e3f878a 9431 store_reg(s, rd, tmp);
9ee6e8bb
PB
9432 break;
9433 default:
9434 goto illegal_op;
9435 }
9436 break;
9437 }
9438 break;
9439 }
9440 do_ldst:
9441 /* Check for undefined extension instructions
9442 * per the ARM Bible IE:
9443 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9444 */
9445 sh = (0xf << 20) | (0xf << 4);
9446 if (op1 == 0x7 && ((insn & sh) == sh))
9447 {
9448 goto illegal_op;
9449 }
9450 /* load/store byte/word */
9451 rn = (insn >> 16) & 0xf;
9452 rd = (insn >> 12) & 0xf;
b0109805 9453 tmp2 = load_reg(s, rn);
a99caa48
PM
9454 if ((insn & 0x01200000) == 0x00200000) {
9455 /* ldrt/strt */
579d21cc 9456 i = get_a32_user_mem_index(s);
a99caa48
PM
9457 } else {
9458 i = get_mem_index(s);
9459 }
9ee6e8bb 9460 if (insn & (1 << 24))
b0109805 9461 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
9462 if (insn & (1 << 20)) {
9463 /* load */
5a839c0d 9464 tmp = tcg_temp_new_i32();
9ee6e8bb 9465 if (insn & (1 << 22)) {
9bb6558a 9466 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9467 } else {
9bb6558a 9468 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9469 }
9ee6e8bb
PB
9470 } else {
9471 /* store */
b0109805 9472 tmp = load_reg(s, rd);
5a839c0d 9473 if (insn & (1 << 22)) {
9bb6558a 9474 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
5a839c0d 9475 } else {
9bb6558a 9476 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
5a839c0d
PM
9477 }
9478 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9479 }
9480 if (!(insn & (1 << 24))) {
b0109805
PB
9481 gen_add_data_offset(s, insn, tmp2);
9482 store_reg(s, rn, tmp2);
9483 } else if (insn & (1 << 21)) {
9484 store_reg(s, rn, tmp2);
9485 } else {
7d1b0095 9486 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9487 }
9488 if (insn & (1 << 20)) {
9489 /* Complete the load. */
7dcc1f89 9490 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
9491 }
9492 break;
9493 case 0x08:
9494 case 0x09:
9495 {
da3e53dd
PM
9496 int j, n, loaded_base;
9497 bool exc_return = false;
9498 bool is_load = extract32(insn, 20, 1);
9499 bool user = false;
39d5492a 9500 TCGv_i32 loaded_var;
9ee6e8bb
PB
9501 /* load/store multiple words */
9502 /* XXX: store correct base if write back */
9ee6e8bb 9503 if (insn & (1 << 22)) {
da3e53dd 9504 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
9505 if (IS_USER(s))
9506 goto illegal_op; /* only usable in supervisor mode */
9507
da3e53dd
PM
9508 if (is_load && extract32(insn, 15, 1)) {
9509 exc_return = true;
9510 } else {
9511 user = true;
9512 }
9ee6e8bb
PB
9513 }
9514 rn = (insn >> 16) & 0xf;
b0109805 9515 addr = load_reg(s, rn);
9ee6e8bb
PB
9516
9517 /* compute total size */
9518 loaded_base = 0;
f764718d 9519 loaded_var = NULL;
9ee6e8bb
PB
9520 n = 0;
9521 for(i=0;i<16;i++) {
9522 if (insn & (1 << i))
9523 n++;
9524 }
9525 /* XXX: test invalid n == 0 case ? */
9526 if (insn & (1 << 23)) {
9527 if (insn & (1 << 24)) {
9528 /* pre increment */
b0109805 9529 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9530 } else {
9531 /* post increment */
9532 }
9533 } else {
9534 if (insn & (1 << 24)) {
9535 /* pre decrement */
b0109805 9536 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9537 } else {
9538 /* post decrement */
9539 if (n != 1)
b0109805 9540 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9541 }
9542 }
9543 j = 0;
9544 for(i=0;i<16;i++) {
9545 if (insn & (1 << i)) {
da3e53dd 9546 if (is_load) {
9ee6e8bb 9547 /* load */
5a839c0d 9548 tmp = tcg_temp_new_i32();
12dcc321 9549 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 9550 if (user) {
b75263d6 9551 tmp2 = tcg_const_i32(i);
1ce94f81 9552 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 9553 tcg_temp_free_i32(tmp2);
7d1b0095 9554 tcg_temp_free_i32(tmp);
9ee6e8bb 9555 } else if (i == rn) {
b0109805 9556 loaded_var = tmp;
9ee6e8bb 9557 loaded_base = 1;
fb0e8e79
PM
9558 } else if (rn == 15 && exc_return) {
9559 store_pc_exc_ret(s, tmp);
9ee6e8bb 9560 } else {
7dcc1f89 9561 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
9562 }
9563 } else {
9564 /* store */
9565 if (i == 15) {
9566 /* special case: r15 = PC + 8 */
9567 val = (long)s->pc + 4;
7d1b0095 9568 tmp = tcg_temp_new_i32();
b0109805 9569 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 9570 } else if (user) {
7d1b0095 9571 tmp = tcg_temp_new_i32();
b75263d6 9572 tmp2 = tcg_const_i32(i);
9ef39277 9573 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 9574 tcg_temp_free_i32(tmp2);
9ee6e8bb 9575 } else {
b0109805 9576 tmp = load_reg(s, i);
9ee6e8bb 9577 }
12dcc321 9578 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9579 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9580 }
9581 j++;
9582 /* no need to add after the last transfer */
9583 if (j != n)
b0109805 9584 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9585 }
9586 }
9587 if (insn & (1 << 21)) {
9588 /* write back */
9589 if (insn & (1 << 23)) {
9590 if (insn & (1 << 24)) {
9591 /* pre increment */
9592 } else {
9593 /* post increment */
b0109805 9594 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9595 }
9596 } else {
9597 if (insn & (1 << 24)) {
9598 /* pre decrement */
9599 if (n != 1)
b0109805 9600 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9601 } else {
9602 /* post decrement */
b0109805 9603 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9604 }
9605 }
b0109805
PB
9606 store_reg(s, rn, addr);
9607 } else {
7d1b0095 9608 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9609 }
9610 if (loaded_base) {
b0109805 9611 store_reg(s, rn, loaded_var);
9ee6e8bb 9612 }
da3e53dd 9613 if (exc_return) {
9ee6e8bb 9614 /* Restore CPSR from SPSR. */
d9ba4830 9615 tmp = load_cpu_field(spsr);
235ea1f5 9616 gen_helper_cpsr_write_eret(cpu_env, tmp);
7d1b0095 9617 tcg_temp_free_i32(tmp);
b29fd33d 9618 /* Must exit loop to check un-masked IRQs */
dcba3a8d 9619 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb
PB
9620 }
9621 }
9622 break;
9623 case 0xa:
9624 case 0xb:
9625 {
9626 int32_t offset;
9627
9628 /* branch (and link) */
9629 val = (int32_t)s->pc;
9630 if (insn & (1 << 24)) {
7d1b0095 9631 tmp = tcg_temp_new_i32();
5e3f878a
PB
9632 tcg_gen_movi_i32(tmp, val);
9633 store_reg(s, 14, tmp);
9ee6e8bb 9634 }
534df156
PM
9635 offset = sextract32(insn << 2, 0, 26);
9636 val += offset + 4;
9ee6e8bb
PB
9637 gen_jmp(s, val);
9638 }
9639 break;
9640 case 0xc:
9641 case 0xd:
9642 case 0xe:
6a57f3eb
WN
9643 if (((insn >> 8) & 0xe) == 10) {
9644 /* VFP. */
7dcc1f89 9645 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9646 goto illegal_op;
9647 }
7dcc1f89 9648 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 9649 /* Coprocessor. */
9ee6e8bb 9650 goto illegal_op;
6a57f3eb 9651 }
9ee6e8bb
PB
9652 break;
9653 case 0xf:
9654 /* swi */
eaed129d 9655 gen_set_pc_im(s, s->pc);
d4a2dc67 9656 s->svc_imm = extract32(insn, 0, 24);
dcba3a8d 9657 s->base.is_jmp = DISAS_SWI;
9ee6e8bb
PB
9658 break;
9659 default:
9660 illegal_op:
73710361
GB
9661 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9662 default_exception_el(s));
9ee6e8bb
PB
9663 break;
9664 }
9665 }
9666}
9667
296e5a0a
PM
9668static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
9669{
9670 /* Return true if this is a 16 bit instruction. We must be precise
9671 * about this (matching the decode). We assume that s->pc still
9672 * points to the first 16 bits of the insn.
9673 */
9674 if ((insn >> 11) < 0x1d) {
9675 /* Definitely a 16-bit instruction */
9676 return true;
9677 }
9678
9679 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
9680 * first half of a 32-bit Thumb insn. Thumb-1 cores might
9681 * end up actually treating this as two 16-bit insns, though,
9682 * if it's half of a bl/blx pair that might span a page boundary.
9683 */
9684 if (arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
9685 /* Thumb2 cores (including all M profile ones) always treat
9686 * 32-bit insns as 32-bit.
9687 */
9688 return false;
9689 }
9690
9691 if ((insn >> 11) == 0x1e && (s->pc < s->next_page_start - 3)) {
9692 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
9693 * is not on the next page; we merge this into a 32-bit
9694 * insn.
9695 */
9696 return false;
9697 }
9698 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
9699 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
9700 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
9701 * -- handle as single 16 bit insn
9702 */
9703 return true;
9704}
9705
9ee6e8bb
PB
9706/* Return true if this is a Thumb-2 logical op. */
9707static int
9708thumb2_logic_op(int op)
9709{
9710 return (op < 8);
9711}
9712
9713/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9714 then set condition code flags based on the result of the operation.
9715 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9716 to the high bit of T1.
9717 Returns zero if the opcode is valid. */
9718
9719static int
39d5492a
PM
9720gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9721 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
9722{
9723 int logic_cc;
9724
9725 logic_cc = 0;
9726 switch (op) {
9727 case 0: /* and */
396e467c 9728 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
9729 logic_cc = conds;
9730 break;
9731 case 1: /* bic */
f669df27 9732 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
9733 logic_cc = conds;
9734 break;
9735 case 2: /* orr */
396e467c 9736 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
9737 logic_cc = conds;
9738 break;
9739 case 3: /* orn */
29501f1b 9740 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
9741 logic_cc = conds;
9742 break;
9743 case 4: /* eor */
396e467c 9744 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
9745 logic_cc = conds;
9746 break;
9747 case 8: /* add */
9748 if (conds)
72485ec4 9749 gen_add_CC(t0, t0, t1);
9ee6e8bb 9750 else
396e467c 9751 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
9752 break;
9753 case 10: /* adc */
9754 if (conds)
49b4c31e 9755 gen_adc_CC(t0, t0, t1);
9ee6e8bb 9756 else
396e467c 9757 gen_adc(t0, t1);
9ee6e8bb
PB
9758 break;
9759 case 11: /* sbc */
2de68a49
RH
9760 if (conds) {
9761 gen_sbc_CC(t0, t0, t1);
9762 } else {
396e467c 9763 gen_sub_carry(t0, t0, t1);
2de68a49 9764 }
9ee6e8bb
PB
9765 break;
9766 case 13: /* sub */
9767 if (conds)
72485ec4 9768 gen_sub_CC(t0, t0, t1);
9ee6e8bb 9769 else
396e467c 9770 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
9771 break;
9772 case 14: /* rsb */
9773 if (conds)
72485ec4 9774 gen_sub_CC(t0, t1, t0);
9ee6e8bb 9775 else
396e467c 9776 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
9777 break;
9778 default: /* 5, 6, 7, 9, 12, 15. */
9779 return 1;
9780 }
9781 if (logic_cc) {
396e467c 9782 gen_logic_CC(t0);
9ee6e8bb 9783 if (shifter_out)
396e467c 9784 gen_set_CF_bit31(t1);
9ee6e8bb
PB
9785 }
9786 return 0;
9787}
9788
2eea841c
PM
9789/* Translate a 32-bit thumb instruction. */
9790static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 9791{
296e5a0a 9792 uint32_t imm, shift, offset;
9ee6e8bb 9793 uint32_t rd, rn, rm, rs;
39d5492a
PM
9794 TCGv_i32 tmp;
9795 TCGv_i32 tmp2;
9796 TCGv_i32 tmp3;
9797 TCGv_i32 addr;
a7812ae4 9798 TCGv_i64 tmp64;
9ee6e8bb
PB
9799 int op;
9800 int shiftop;
9801 int conds;
9802 int logic_cc;
9803
296e5a0a
PM
9804 /* The only 32 bit insn that's allowed for Thumb1 is the combined
9805 * BL/BLX prefix and suffix.
9806 */
9ee6e8bb
PB
9807 if ((insn & 0xf800e800) != 0xf000e800) {
9808 ARCH(6T2);
9809 }
9810
9811 rn = (insn >> 16) & 0xf;
9812 rs = (insn >> 12) & 0xf;
9813 rd = (insn >> 8) & 0xf;
9814 rm = insn & 0xf;
9815 switch ((insn >> 25) & 0xf) {
9816 case 0: case 1: case 2: case 3:
9817 /* 16-bit instructions. Should never happen. */
9818 abort();
9819 case 4:
9820 if (insn & (1 << 22)) {
ebfe27c5
PM
9821 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
9822 * - load/store doubleword, load/store exclusive, ldacq/strel,
5158de24 9823 * table branch, TT.
ebfe27c5 9824 */
76eff04d
PM
9825 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
9826 arm_dc_feature(s, ARM_FEATURE_V8)) {
9827 /* 0b1110_1001_0111_1111_1110_1001_0111_111
9828 * - SG (v8M only)
9829 * The bulk of the behaviour for this instruction is implemented
9830 * in v7m_handle_execute_nsc(), which deals with the insn when
9831 * it is executed by a CPU in non-secure state from memory
9832 * which is Secure & NonSecure-Callable.
9833 * Here we only need to handle the remaining cases:
9834 * * in NS memory (including the "security extension not
9835 * implemented" case) : NOP
9836 * * in S memory but CPU already secure (clear IT bits)
9837 * We know that the attribute for the memory this insn is
9838 * in must match the current CPU state, because otherwise
9839 * get_phys_addr_pmsav8 would have generated an exception.
9840 */
9841 if (s->v8m_secure) {
9842 /* Like the IT insn, we don't need to generate any code */
9843 s->condexec_cond = 0;
9844 s->condexec_mask = 0;
9845 }
9846 } else if (insn & 0x01200000) {
ebfe27c5
PM
9847 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9848 * - load/store dual (post-indexed)
9849 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
9850 * - load/store dual (literal and immediate)
9851 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9852 * - load/store dual (pre-indexed)
9853 */
9ee6e8bb 9854 if (rn == 15) {
ebfe27c5
PM
9855 if (insn & (1 << 21)) {
9856 /* UNPREDICTABLE */
9857 goto illegal_op;
9858 }
7d1b0095 9859 addr = tcg_temp_new_i32();
b0109805 9860 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 9861 } else {
b0109805 9862 addr = load_reg(s, rn);
9ee6e8bb
PB
9863 }
9864 offset = (insn & 0xff) * 4;
9865 if ((insn & (1 << 23)) == 0)
9866 offset = -offset;
9867 if (insn & (1 << 24)) {
b0109805 9868 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
9869 offset = 0;
9870 }
9871 if (insn & (1 << 20)) {
9872 /* ldrd */
e2592fad 9873 tmp = tcg_temp_new_i32();
12dcc321 9874 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9875 store_reg(s, rs, tmp);
9876 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9877 tmp = tcg_temp_new_i32();
12dcc321 9878 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9879 store_reg(s, rd, tmp);
9ee6e8bb
PB
9880 } else {
9881 /* strd */
b0109805 9882 tmp = load_reg(s, rs);
12dcc321 9883 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9884 tcg_temp_free_i32(tmp);
b0109805
PB
9885 tcg_gen_addi_i32(addr, addr, 4);
9886 tmp = load_reg(s, rd);
12dcc321 9887 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9888 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9889 }
9890 if (insn & (1 << 21)) {
9891 /* Base writeback. */
b0109805
PB
9892 tcg_gen_addi_i32(addr, addr, offset - 4);
9893 store_reg(s, rn, addr);
9894 } else {
7d1b0095 9895 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9896 }
9897 } else if ((insn & (1 << 23)) == 0) {
ebfe27c5
PM
9898 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
9899 * - load/store exclusive word
5158de24 9900 * - TT (v8M only)
ebfe27c5
PM
9901 */
9902 if (rs == 15) {
5158de24
PM
9903 if (!(insn & (1 << 20)) &&
9904 arm_dc_feature(s, ARM_FEATURE_M) &&
9905 arm_dc_feature(s, ARM_FEATURE_V8)) {
9906 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
9907 * - TT (v8M only)
9908 */
9909 bool alt = insn & (1 << 7);
9910 TCGv_i32 addr, op, ttresp;
9911
9912 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
9913 /* we UNDEF for these UNPREDICTABLE cases */
9914 goto illegal_op;
9915 }
9916
9917 if (alt && !s->v8m_secure) {
9918 goto illegal_op;
9919 }
9920
9921 addr = load_reg(s, rn);
9922 op = tcg_const_i32(extract32(insn, 6, 2));
9923 ttresp = tcg_temp_new_i32();
9924 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
9925 tcg_temp_free_i32(addr);
9926 tcg_temp_free_i32(op);
9927 store_reg(s, rd, ttresp);
384c6c03 9928 break;
5158de24 9929 }
ebfe27c5
PM
9930 goto illegal_op;
9931 }
39d5492a 9932 addr = tcg_temp_local_new_i32();
98a46317 9933 load_reg_var(s, addr, rn);
426f5abc 9934 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 9935 if (insn & (1 << 20)) {
426f5abc 9936 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 9937 } else {
426f5abc 9938 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 9939 }
39d5492a 9940 tcg_temp_free_i32(addr);
2359bf80 9941 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
9942 /* Table Branch. */
9943 if (rn == 15) {
7d1b0095 9944 addr = tcg_temp_new_i32();
b0109805 9945 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 9946 } else {
b0109805 9947 addr = load_reg(s, rn);
9ee6e8bb 9948 }
b26eefb6 9949 tmp = load_reg(s, rm);
b0109805 9950 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
9951 if (insn & (1 << 4)) {
9952 /* tbh */
b0109805 9953 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9954 tcg_temp_free_i32(tmp);
e2592fad 9955 tmp = tcg_temp_new_i32();
12dcc321 9956 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9957 } else { /* tbb */
7d1b0095 9958 tcg_temp_free_i32(tmp);
e2592fad 9959 tmp = tcg_temp_new_i32();
12dcc321 9960 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9961 }
7d1b0095 9962 tcg_temp_free_i32(addr);
b0109805
PB
9963 tcg_gen_shli_i32(tmp, tmp, 1);
9964 tcg_gen_addi_i32(tmp, tmp, s->pc);
9965 store_reg(s, 15, tmp);
9ee6e8bb 9966 } else {
2359bf80 9967 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 9968 op = (insn >> 4) & 0x3;
2359bf80
MR
9969 switch (op2) {
9970 case 0:
426f5abc 9971 goto illegal_op;
2359bf80
MR
9972 case 1:
9973 /* Load/store exclusive byte/halfword/doubleword */
9974 if (op == 2) {
9975 goto illegal_op;
9976 }
9977 ARCH(7);
9978 break;
9979 case 2:
9980 /* Load-acquire/store-release */
9981 if (op == 3) {
9982 goto illegal_op;
9983 }
9984 /* Fall through */
9985 case 3:
9986 /* Load-acquire/store-release exclusive */
9987 ARCH(8);
9988 break;
426f5abc 9989 }
39d5492a 9990 addr = tcg_temp_local_new_i32();
98a46317 9991 load_reg_var(s, addr, rn);
2359bf80
MR
9992 if (!(op2 & 1)) {
9993 if (insn & (1 << 20)) {
9994 tmp = tcg_temp_new_i32();
9995 switch (op) {
9996 case 0: /* ldab */
9bb6558a
PM
9997 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
9998 rs | ISSIsAcqRel);
2359bf80
MR
9999 break;
10000 case 1: /* ldah */
9bb6558a
PM
10001 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
10002 rs | ISSIsAcqRel);
2359bf80
MR
10003 break;
10004 case 2: /* lda */
9bb6558a
PM
10005 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
10006 rs | ISSIsAcqRel);
2359bf80
MR
10007 break;
10008 default:
10009 abort();
10010 }
10011 store_reg(s, rs, tmp);
10012 } else {
10013 tmp = load_reg(s, rs);
10014 switch (op) {
10015 case 0: /* stlb */
9bb6558a
PM
10016 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
10017 rs | ISSIsAcqRel);
2359bf80
MR
10018 break;
10019 case 1: /* stlh */
9bb6558a
PM
10020 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
10021 rs | ISSIsAcqRel);
2359bf80
MR
10022 break;
10023 case 2: /* stl */
9bb6558a
PM
10024 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
10025 rs | ISSIsAcqRel);
2359bf80
MR
10026 break;
10027 default:
10028 abort();
10029 }
10030 tcg_temp_free_i32(tmp);
10031 }
10032 } else if (insn & (1 << 20)) {
426f5abc 10033 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 10034 } else {
426f5abc 10035 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 10036 }
39d5492a 10037 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10038 }
10039 } else {
10040 /* Load/store multiple, RFE, SRS. */
10041 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 10042 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 10043 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10044 goto illegal_op;
00115976 10045 }
9ee6e8bb
PB
10046 if (insn & (1 << 20)) {
10047 /* rfe */
b0109805
PB
10048 addr = load_reg(s, rn);
10049 if ((insn & (1 << 24)) == 0)
10050 tcg_gen_addi_i32(addr, addr, -8);
10051 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 10052 tmp = tcg_temp_new_i32();
12dcc321 10053 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 10054 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 10055 tmp2 = tcg_temp_new_i32();
12dcc321 10056 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
10057 if (insn & (1 << 21)) {
10058 /* Base writeback. */
b0109805
PB
10059 if (insn & (1 << 24)) {
10060 tcg_gen_addi_i32(addr, addr, 4);
10061 } else {
10062 tcg_gen_addi_i32(addr, addr, -4);
10063 }
10064 store_reg(s, rn, addr);
10065 } else {
7d1b0095 10066 tcg_temp_free_i32(addr);
9ee6e8bb 10067 }
b0109805 10068 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
10069 } else {
10070 /* srs */
81465888
PM
10071 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
10072 insn & (1 << 21));
9ee6e8bb
PB
10073 }
10074 } else {
5856d44e 10075 int i, loaded_base = 0;
39d5492a 10076 TCGv_i32 loaded_var;
9ee6e8bb 10077 /* Load/store multiple. */
b0109805 10078 addr = load_reg(s, rn);
9ee6e8bb
PB
10079 offset = 0;
10080 for (i = 0; i < 16; i++) {
10081 if (insn & (1 << i))
10082 offset += 4;
10083 }
10084 if (insn & (1 << 24)) {
b0109805 10085 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
10086 }
10087
f764718d 10088 loaded_var = NULL;
9ee6e8bb
PB
10089 for (i = 0; i < 16; i++) {
10090 if ((insn & (1 << i)) == 0)
10091 continue;
10092 if (insn & (1 << 20)) {
10093 /* Load. */
e2592fad 10094 tmp = tcg_temp_new_i32();
12dcc321 10095 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10096 if (i == 15) {
3bb8a96f 10097 gen_bx_excret(s, tmp);
5856d44e
YO
10098 } else if (i == rn) {
10099 loaded_var = tmp;
10100 loaded_base = 1;
9ee6e8bb 10101 } else {
b0109805 10102 store_reg(s, i, tmp);
9ee6e8bb
PB
10103 }
10104 } else {
10105 /* Store. */
b0109805 10106 tmp = load_reg(s, i);
12dcc321 10107 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10108 tcg_temp_free_i32(tmp);
9ee6e8bb 10109 }
b0109805 10110 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 10111 }
5856d44e
YO
10112 if (loaded_base) {
10113 store_reg(s, rn, loaded_var);
10114 }
9ee6e8bb
PB
10115 if (insn & (1 << 21)) {
10116 /* Base register writeback. */
10117 if (insn & (1 << 24)) {
b0109805 10118 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
10119 }
10120 /* Fault if writeback register is in register list. */
10121 if (insn & (1 << rn))
10122 goto illegal_op;
b0109805
PB
10123 store_reg(s, rn, addr);
10124 } else {
7d1b0095 10125 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10126 }
10127 }
10128 }
10129 break;
2af9ab77
JB
10130 case 5:
10131
9ee6e8bb 10132 op = (insn >> 21) & 0xf;
2af9ab77 10133 if (op == 6) {
62b44f05
AR
10134 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10135 goto illegal_op;
10136 }
2af9ab77
JB
10137 /* Halfword pack. */
10138 tmp = load_reg(s, rn);
10139 tmp2 = load_reg(s, rm);
10140 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
10141 if (insn & (1 << 5)) {
10142 /* pkhtb */
10143 if (shift == 0)
10144 shift = 31;
10145 tcg_gen_sari_i32(tmp2, tmp2, shift);
10146 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
10147 tcg_gen_ext16u_i32(tmp2, tmp2);
10148 } else {
10149 /* pkhbt */
10150 if (shift)
10151 tcg_gen_shli_i32(tmp2, tmp2, shift);
10152 tcg_gen_ext16u_i32(tmp, tmp);
10153 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
10154 }
10155 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 10156 tcg_temp_free_i32(tmp2);
3174f8e9
FN
10157 store_reg(s, rd, tmp);
10158 } else {
2af9ab77
JB
10159 /* Data processing register constant shift. */
10160 if (rn == 15) {
7d1b0095 10161 tmp = tcg_temp_new_i32();
2af9ab77
JB
10162 tcg_gen_movi_i32(tmp, 0);
10163 } else {
10164 tmp = load_reg(s, rn);
10165 }
10166 tmp2 = load_reg(s, rm);
10167
10168 shiftop = (insn >> 4) & 3;
10169 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10170 conds = (insn & (1 << 20)) != 0;
10171 logic_cc = (conds && thumb2_logic_op(op));
10172 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
10173 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
10174 goto illegal_op;
7d1b0095 10175 tcg_temp_free_i32(tmp2);
2af9ab77
JB
10176 if (rd != 15) {
10177 store_reg(s, rd, tmp);
10178 } else {
7d1b0095 10179 tcg_temp_free_i32(tmp);
2af9ab77 10180 }
3174f8e9 10181 }
9ee6e8bb
PB
10182 break;
10183 case 13: /* Misc data processing. */
10184 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
10185 if (op < 4 && (insn & 0xf000) != 0xf000)
10186 goto illegal_op;
10187 switch (op) {
10188 case 0: /* Register controlled shift. */
8984bd2e
PB
10189 tmp = load_reg(s, rn);
10190 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10191 if ((insn & 0x70) != 0)
10192 goto illegal_op;
10193 op = (insn >> 21) & 3;
8984bd2e
PB
10194 logic_cc = (insn & (1 << 20)) != 0;
10195 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
10196 if (logic_cc)
10197 gen_logic_CC(tmp);
bedb8a6b 10198 store_reg(s, rd, tmp);
9ee6e8bb
PB
10199 break;
10200 case 1: /* Sign/zero extend. */
62b44f05
AR
10201 op = (insn >> 20) & 7;
10202 switch (op) {
10203 case 0: /* SXTAH, SXTH */
10204 case 1: /* UXTAH, UXTH */
10205 case 4: /* SXTAB, SXTB */
10206 case 5: /* UXTAB, UXTB */
10207 break;
10208 case 2: /* SXTAB16, SXTB16 */
10209 case 3: /* UXTAB16, UXTB16 */
10210 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10211 goto illegal_op;
10212 }
10213 break;
10214 default:
10215 goto illegal_op;
10216 }
10217 if (rn != 15) {
10218 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10219 goto illegal_op;
10220 }
10221 }
5e3f878a 10222 tmp = load_reg(s, rm);
9ee6e8bb 10223 shift = (insn >> 4) & 3;
1301f322 10224 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
10225 rotate, a shift is sufficient. */
10226 if (shift != 0)
f669df27 10227 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
10228 op = (insn >> 20) & 7;
10229 switch (op) {
5e3f878a
PB
10230 case 0: gen_sxth(tmp); break;
10231 case 1: gen_uxth(tmp); break;
10232 case 2: gen_sxtb16(tmp); break;
10233 case 3: gen_uxtb16(tmp); break;
10234 case 4: gen_sxtb(tmp); break;
10235 case 5: gen_uxtb(tmp); break;
62b44f05
AR
10236 default:
10237 g_assert_not_reached();
9ee6e8bb
PB
10238 }
10239 if (rn != 15) {
5e3f878a 10240 tmp2 = load_reg(s, rn);
9ee6e8bb 10241 if ((op >> 1) == 1) {
5e3f878a 10242 gen_add16(tmp, tmp2);
9ee6e8bb 10243 } else {
5e3f878a 10244 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10245 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10246 }
10247 }
5e3f878a 10248 store_reg(s, rd, tmp);
9ee6e8bb
PB
10249 break;
10250 case 2: /* SIMD add/subtract. */
62b44f05
AR
10251 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10252 goto illegal_op;
10253 }
9ee6e8bb
PB
10254 op = (insn >> 20) & 7;
10255 shift = (insn >> 4) & 7;
10256 if ((op & 3) == 3 || (shift & 3) == 3)
10257 goto illegal_op;
6ddbc6e4
PB
10258 tmp = load_reg(s, rn);
10259 tmp2 = load_reg(s, rm);
10260 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 10261 tcg_temp_free_i32(tmp2);
6ddbc6e4 10262 store_reg(s, rd, tmp);
9ee6e8bb
PB
10263 break;
10264 case 3: /* Other data processing. */
10265 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
10266 if (op < 4) {
10267 /* Saturating add/subtract. */
62b44f05
AR
10268 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10269 goto illegal_op;
10270 }
d9ba4830
PB
10271 tmp = load_reg(s, rn);
10272 tmp2 = load_reg(s, rm);
9ee6e8bb 10273 if (op & 1)
9ef39277 10274 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 10275 if (op & 2)
9ef39277 10276 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 10277 else
9ef39277 10278 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 10279 tcg_temp_free_i32(tmp2);
9ee6e8bb 10280 } else {
62b44f05
AR
10281 switch (op) {
10282 case 0x0a: /* rbit */
10283 case 0x08: /* rev */
10284 case 0x09: /* rev16 */
10285 case 0x0b: /* revsh */
10286 case 0x18: /* clz */
10287 break;
10288 case 0x10: /* sel */
10289 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10290 goto illegal_op;
10291 }
10292 break;
10293 case 0x20: /* crc32/crc32c */
10294 case 0x21:
10295 case 0x22:
10296 case 0x28:
10297 case 0x29:
10298 case 0x2a:
10299 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
10300 goto illegal_op;
10301 }
10302 break;
10303 default:
10304 goto illegal_op;
10305 }
d9ba4830 10306 tmp = load_reg(s, rn);
9ee6e8bb
PB
10307 switch (op) {
10308 case 0x0a: /* rbit */
d9ba4830 10309 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
10310 break;
10311 case 0x08: /* rev */
66896cb8 10312 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
10313 break;
10314 case 0x09: /* rev16 */
d9ba4830 10315 gen_rev16(tmp);
9ee6e8bb
PB
10316 break;
10317 case 0x0b: /* revsh */
d9ba4830 10318 gen_revsh(tmp);
9ee6e8bb
PB
10319 break;
10320 case 0x10: /* sel */
d9ba4830 10321 tmp2 = load_reg(s, rm);
7d1b0095 10322 tmp3 = tcg_temp_new_i32();
0ecb72a5 10323 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 10324 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10325 tcg_temp_free_i32(tmp3);
10326 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10327 break;
10328 case 0x18: /* clz */
7539a012 10329 tcg_gen_clzi_i32(tmp, tmp, 32);
9ee6e8bb 10330 break;
eb0ecd5a
WN
10331 case 0x20:
10332 case 0x21:
10333 case 0x22:
10334 case 0x28:
10335 case 0x29:
10336 case 0x2a:
10337 {
10338 /* crc32/crc32c */
10339 uint32_t sz = op & 0x3;
10340 uint32_t c = op & 0x8;
10341
eb0ecd5a 10342 tmp2 = load_reg(s, rm);
aa633469
PM
10343 if (sz == 0) {
10344 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10345 } else if (sz == 1) {
10346 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10347 }
eb0ecd5a
WN
10348 tmp3 = tcg_const_i32(1 << sz);
10349 if (c) {
10350 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10351 } else {
10352 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10353 }
10354 tcg_temp_free_i32(tmp2);
10355 tcg_temp_free_i32(tmp3);
10356 break;
10357 }
9ee6e8bb 10358 default:
62b44f05 10359 g_assert_not_reached();
9ee6e8bb
PB
10360 }
10361 }
d9ba4830 10362 store_reg(s, rd, tmp);
9ee6e8bb
PB
10363 break;
10364 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
10365 switch ((insn >> 20) & 7) {
10366 case 0: /* 32 x 32 -> 32 */
10367 case 7: /* Unsigned sum of absolute differences. */
10368 break;
10369 case 1: /* 16 x 16 -> 32 */
10370 case 2: /* Dual multiply add. */
10371 case 3: /* 32 * 16 -> 32msb */
10372 case 4: /* Dual multiply subtract. */
10373 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10374 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10375 goto illegal_op;
10376 }
10377 break;
10378 }
9ee6e8bb 10379 op = (insn >> 4) & 0xf;
d9ba4830
PB
10380 tmp = load_reg(s, rn);
10381 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10382 switch ((insn >> 20) & 7) {
10383 case 0: /* 32 x 32 -> 32 */
d9ba4830 10384 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 10385 tcg_temp_free_i32(tmp2);
9ee6e8bb 10386 if (rs != 15) {
d9ba4830 10387 tmp2 = load_reg(s, rs);
9ee6e8bb 10388 if (op)
d9ba4830 10389 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 10390 else
d9ba4830 10391 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10392 tcg_temp_free_i32(tmp2);
9ee6e8bb 10393 }
9ee6e8bb
PB
10394 break;
10395 case 1: /* 16 x 16 -> 32 */
d9ba4830 10396 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10397 tcg_temp_free_i32(tmp2);
9ee6e8bb 10398 if (rs != 15) {
d9ba4830 10399 tmp2 = load_reg(s, rs);
9ef39277 10400 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10401 tcg_temp_free_i32(tmp2);
9ee6e8bb 10402 }
9ee6e8bb
PB
10403 break;
10404 case 2: /* Dual multiply add. */
10405 case 4: /* Dual multiply subtract. */
10406 if (op)
d9ba4830
PB
10407 gen_swap_half(tmp2);
10408 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10409 if (insn & (1 << 22)) {
e1d177b9 10410 /* This subtraction cannot overflow. */
d9ba4830 10411 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10412 } else {
e1d177b9
PM
10413 /* This addition cannot overflow 32 bits;
10414 * however it may overflow considered as a signed
10415 * operation, in which case we must set the Q flag.
10416 */
9ef39277 10417 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10418 }
7d1b0095 10419 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10420 if (rs != 15)
10421 {
d9ba4830 10422 tmp2 = load_reg(s, rs);
9ef39277 10423 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10424 tcg_temp_free_i32(tmp2);
9ee6e8bb 10425 }
9ee6e8bb
PB
10426 break;
10427 case 3: /* 32 * 16 -> 32msb */
10428 if (op)
d9ba4830 10429 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 10430 else
d9ba4830 10431 gen_sxth(tmp2);
a7812ae4
PB
10432 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10433 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 10434 tmp = tcg_temp_new_i32();
ecc7b3aa 10435 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 10436 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10437 if (rs != 15)
10438 {
d9ba4830 10439 tmp2 = load_reg(s, rs);
9ef39277 10440 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10441 tcg_temp_free_i32(tmp2);
9ee6e8bb 10442 }
9ee6e8bb 10443 break;
838fa72d
AJ
10444 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10445 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10446 if (rs != 15) {
838fa72d
AJ
10447 tmp = load_reg(s, rs);
10448 if (insn & (1 << 20)) {
10449 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 10450 } else {
838fa72d 10451 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 10452 }
2c0262af 10453 }
838fa72d
AJ
10454 if (insn & (1 << 4)) {
10455 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10456 }
10457 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 10458 tmp = tcg_temp_new_i32();
ecc7b3aa 10459 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 10460 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10461 break;
10462 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 10463 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10464 tcg_temp_free_i32(tmp2);
9ee6e8bb 10465 if (rs != 15) {
d9ba4830
PB
10466 tmp2 = load_reg(s, rs);
10467 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10468 tcg_temp_free_i32(tmp2);
5fd46862 10469 }
9ee6e8bb 10470 break;
2c0262af 10471 }
d9ba4830 10472 store_reg(s, rd, tmp);
2c0262af 10473 break;
9ee6e8bb
PB
10474 case 6: case 7: /* 64-bit multiply, Divide. */
10475 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
10476 tmp = load_reg(s, rn);
10477 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10478 if ((op & 0x50) == 0x10) {
10479 /* sdiv, udiv */
d614a513 10480 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 10481 goto illegal_op;
47789990 10482 }
9ee6e8bb 10483 if (op & 0x20)
5e3f878a 10484 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 10485 else
5e3f878a 10486 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 10487 tcg_temp_free_i32(tmp2);
5e3f878a 10488 store_reg(s, rd, tmp);
9ee6e8bb
PB
10489 } else if ((op & 0xe) == 0xc) {
10490 /* Dual multiply accumulate long. */
62b44f05
AR
10491 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10492 tcg_temp_free_i32(tmp);
10493 tcg_temp_free_i32(tmp2);
10494 goto illegal_op;
10495 }
9ee6e8bb 10496 if (op & 1)
5e3f878a
PB
10497 gen_swap_half(tmp2);
10498 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10499 if (op & 0x10) {
5e3f878a 10500 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 10501 } else {
5e3f878a 10502 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 10503 }
7d1b0095 10504 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10505 /* BUGFIX */
10506 tmp64 = tcg_temp_new_i64();
10507 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10508 tcg_temp_free_i32(tmp);
a7812ae4
PB
10509 gen_addq(s, tmp64, rs, rd);
10510 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10511 tcg_temp_free_i64(tmp64);
2c0262af 10512 } else {
9ee6e8bb
PB
10513 if (op & 0x20) {
10514 /* Unsigned 64-bit multiply */
a7812ae4 10515 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 10516 } else {
9ee6e8bb
PB
10517 if (op & 8) {
10518 /* smlalxy */
62b44f05
AR
10519 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10520 tcg_temp_free_i32(tmp2);
10521 tcg_temp_free_i32(tmp);
10522 goto illegal_op;
10523 }
5e3f878a 10524 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10525 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10526 tmp64 = tcg_temp_new_i64();
10527 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10528 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10529 } else {
10530 /* Signed 64-bit multiply */
a7812ae4 10531 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10532 }
b5ff1b31 10533 }
9ee6e8bb
PB
10534 if (op & 4) {
10535 /* umaal */
62b44f05
AR
10536 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10537 tcg_temp_free_i64(tmp64);
10538 goto illegal_op;
10539 }
a7812ae4
PB
10540 gen_addq_lo(s, tmp64, rs);
10541 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
10542 } else if (op & 0x40) {
10543 /* 64-bit accumulate. */
a7812ae4 10544 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 10545 }
a7812ae4 10546 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10547 tcg_temp_free_i64(tmp64);
5fd46862 10548 }
2c0262af 10549 break;
9ee6e8bb
PB
10550 }
10551 break;
10552 case 6: case 7: case 14: case 15:
10553 /* Coprocessor. */
7517748e
PM
10554 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10555 /* We don't currently implement M profile FP support,
10556 * so this entire space should give a NOCP fault.
10557 */
10558 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
10559 default_exception_el(s));
10560 break;
10561 }
9ee6e8bb
PB
10562 if (((insn >> 24) & 3) == 3) {
10563 /* Translate into the equivalent ARM encoding. */
f06053e3 10564 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 10565 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 10566 goto illegal_op;
7dcc1f89 10567 }
6a57f3eb 10568 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 10569 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10570 goto illegal_op;
10571 }
9ee6e8bb
PB
10572 } else {
10573 if (insn & (1 << 28))
10574 goto illegal_op;
7dcc1f89 10575 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 10576 goto illegal_op;
7dcc1f89 10577 }
9ee6e8bb
PB
10578 }
10579 break;
10580 case 8: case 9: case 10: case 11:
10581 if (insn & (1 << 15)) {
10582 /* Branches, misc control. */
10583 if (insn & 0x5000) {
10584 /* Unconditional branch. */
10585 /* signextend(hw1[10:0]) -> offset[:12]. */
10586 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10587 /* hw1[10:0] -> offset[11:1]. */
10588 offset |= (insn & 0x7ff) << 1;
10589 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10590 offset[24:22] already have the same value because of the
10591 sign extension above. */
10592 offset ^= ((~insn) & (1 << 13)) << 10;
10593 offset ^= ((~insn) & (1 << 11)) << 11;
10594
9ee6e8bb
PB
10595 if (insn & (1 << 14)) {
10596 /* Branch and link. */
3174f8e9 10597 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 10598 }
3b46e624 10599
b0109805 10600 offset += s->pc;
9ee6e8bb
PB
10601 if (insn & (1 << 12)) {
10602 /* b/bl */
b0109805 10603 gen_jmp(s, offset);
9ee6e8bb
PB
10604 } else {
10605 /* blx */
b0109805 10606 offset &= ~(uint32_t)2;
be5e7a76 10607 /* thumb2 bx, no need to check */
b0109805 10608 gen_bx_im(s, offset);
2c0262af 10609 }
9ee6e8bb
PB
10610 } else if (((insn >> 23) & 7) == 7) {
10611 /* Misc control */
10612 if (insn & (1 << 13))
10613 goto illegal_op;
10614
10615 if (insn & (1 << 26)) {
001b3cab
PM
10616 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10617 goto illegal_op;
10618 }
37e6456e
PM
10619 if (!(insn & (1 << 20))) {
10620 /* Hypervisor call (v7) */
10621 int imm16 = extract32(insn, 16, 4) << 12
10622 | extract32(insn, 0, 12);
10623 ARCH(7);
10624 if (IS_USER(s)) {
10625 goto illegal_op;
10626 }
10627 gen_hvc(s, imm16);
10628 } else {
10629 /* Secure monitor call (v6+) */
10630 ARCH(6K);
10631 if (IS_USER(s)) {
10632 goto illegal_op;
10633 }
10634 gen_smc(s);
10635 }
2c0262af 10636 } else {
9ee6e8bb
PB
10637 op = (insn >> 20) & 7;
10638 switch (op) {
10639 case 0: /* msr cpsr. */
b53d8923 10640 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e 10641 tmp = load_reg(s, rn);
b28b3377
PM
10642 /* the constant is the mask and SYSm fields */
10643 addr = tcg_const_i32(insn & 0xfff);
8984bd2e 10644 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 10645 tcg_temp_free_i32(addr);
7d1b0095 10646 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10647 gen_lookup_tb(s);
10648 break;
10649 }
10650 /* fall through */
10651 case 1: /* msr spsr. */
b53d8923 10652 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10653 goto illegal_op;
b53d8923 10654 }
8bfd0550
PM
10655
10656 if (extract32(insn, 5, 1)) {
10657 /* MSR (banked) */
10658 int sysm = extract32(insn, 8, 4) |
10659 (extract32(insn, 4, 1) << 4);
10660 int r = op & 1;
10661
10662 gen_msr_banked(s, r, sysm, rm);
10663 break;
10664 }
10665
10666 /* MSR (for PSRs) */
2fbac54b
FN
10667 tmp = load_reg(s, rn);
10668 if (gen_set_psr(s,
7dcc1f89 10669 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 10670 op == 1, tmp))
9ee6e8bb
PB
10671 goto illegal_op;
10672 break;
10673 case 2: /* cps, nop-hint. */
10674 if (((insn >> 8) & 7) == 0) {
10675 gen_nop_hint(s, insn & 0xff);
10676 }
10677 /* Implemented as NOP in user mode. */
10678 if (IS_USER(s))
10679 break;
10680 offset = 0;
10681 imm = 0;
10682 if (insn & (1 << 10)) {
10683 if (insn & (1 << 7))
10684 offset |= CPSR_A;
10685 if (insn & (1 << 6))
10686 offset |= CPSR_I;
10687 if (insn & (1 << 5))
10688 offset |= CPSR_F;
10689 if (insn & (1 << 9))
10690 imm = CPSR_A | CPSR_I | CPSR_F;
10691 }
10692 if (insn & (1 << 8)) {
10693 offset |= 0x1f;
10694 imm |= (insn & 0x1f);
10695 }
10696 if (offset) {
2fbac54b 10697 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
10698 }
10699 break;
10700 case 3: /* Special control operations. */
426f5abc 10701 ARCH(7);
9ee6e8bb
PB
10702 op = (insn >> 4) & 0xf;
10703 switch (op) {
10704 case 2: /* clrex */
426f5abc 10705 gen_clrex(s);
9ee6e8bb
PB
10706 break;
10707 case 4: /* dsb */
10708 case 5: /* dmb */
61e4c432 10709 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 10710 break;
6df99dec
SS
10711 case 6: /* isb */
10712 /* We need to break the TB after this insn
10713 * to execute self-modifying code correctly
10714 * and also to take any pending interrupts
10715 * immediately.
10716 */
0b609cc1 10717 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 10718 break;
9ee6e8bb
PB
10719 default:
10720 goto illegal_op;
10721 }
10722 break;
10723 case 4: /* bxj */
9d7c59c8
PM
10724 /* Trivial implementation equivalent to bx.
10725 * This instruction doesn't exist at all for M-profile.
10726 */
10727 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10728 goto illegal_op;
10729 }
d9ba4830
PB
10730 tmp = load_reg(s, rn);
10731 gen_bx(s, tmp);
9ee6e8bb
PB
10732 break;
10733 case 5: /* Exception return. */
b8b45b68
RV
10734 if (IS_USER(s)) {
10735 goto illegal_op;
10736 }
10737 if (rn != 14 || rd != 15) {
10738 goto illegal_op;
10739 }
10740 tmp = load_reg(s, rn);
10741 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10742 gen_exception_return(s, tmp);
10743 break;
8bfd0550 10744 case 6: /* MRS */
43ac6574
PM
10745 if (extract32(insn, 5, 1) &&
10746 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10747 /* MRS (banked) */
10748 int sysm = extract32(insn, 16, 4) |
10749 (extract32(insn, 4, 1) << 4);
10750
10751 gen_mrs_banked(s, 0, sysm, rd);
10752 break;
10753 }
10754
3d54026f
PM
10755 if (extract32(insn, 16, 4) != 0xf) {
10756 goto illegal_op;
10757 }
10758 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
10759 extract32(insn, 0, 8) != 0) {
10760 goto illegal_op;
10761 }
10762
8bfd0550 10763 /* mrs cpsr */
7d1b0095 10764 tmp = tcg_temp_new_i32();
b53d8923 10765 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10766 addr = tcg_const_i32(insn & 0xff);
10767 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 10768 tcg_temp_free_i32(addr);
9ee6e8bb 10769 } else {
9ef39277 10770 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 10771 }
8984bd2e 10772 store_reg(s, rd, tmp);
9ee6e8bb 10773 break;
8bfd0550 10774 case 7: /* MRS */
43ac6574
PM
10775 if (extract32(insn, 5, 1) &&
10776 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10777 /* MRS (banked) */
10778 int sysm = extract32(insn, 16, 4) |
10779 (extract32(insn, 4, 1) << 4);
10780
10781 gen_mrs_banked(s, 1, sysm, rd);
10782 break;
10783 }
10784
10785 /* mrs spsr. */
9ee6e8bb 10786 /* Not accessible in user mode. */
b53d8923 10787 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10788 goto illegal_op;
b53d8923 10789 }
3d54026f
PM
10790
10791 if (extract32(insn, 16, 4) != 0xf ||
10792 extract32(insn, 0, 8) != 0) {
10793 goto illegal_op;
10794 }
10795
d9ba4830
PB
10796 tmp = load_cpu_field(spsr);
10797 store_reg(s, rd, tmp);
9ee6e8bb 10798 break;
2c0262af
FB
10799 }
10800 }
9ee6e8bb
PB
10801 } else {
10802 /* Conditional branch. */
10803 op = (insn >> 22) & 0xf;
10804 /* Generate a conditional jump to next instruction. */
10805 s->condlabel = gen_new_label();
39fb730a 10806 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
10807 s->condjmp = 1;
10808
10809 /* offset[11:1] = insn[10:0] */
10810 offset = (insn & 0x7ff) << 1;
10811 /* offset[17:12] = insn[21:16]. */
10812 offset |= (insn & 0x003f0000) >> 4;
10813 /* offset[31:20] = insn[26]. */
10814 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10815 /* offset[18] = insn[13]. */
10816 offset |= (insn & (1 << 13)) << 5;
10817 /* offset[19] = insn[11]. */
10818 offset |= (insn & (1 << 11)) << 8;
10819
10820 /* jump to the offset */
b0109805 10821 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
10822 }
10823 } else {
10824 /* Data processing immediate. */
10825 if (insn & (1 << 25)) {
10826 if (insn & (1 << 24)) {
10827 if (insn & (1 << 20))
10828 goto illegal_op;
10829 /* Bitfield/Saturate. */
10830 op = (insn >> 21) & 7;
10831 imm = insn & 0x1f;
10832 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 10833 if (rn == 15) {
7d1b0095 10834 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
10835 tcg_gen_movi_i32(tmp, 0);
10836 } else {
10837 tmp = load_reg(s, rn);
10838 }
9ee6e8bb
PB
10839 switch (op) {
10840 case 2: /* Signed bitfield extract. */
10841 imm++;
10842 if (shift + imm > 32)
10843 goto illegal_op;
59a71b4c
RH
10844 if (imm < 32) {
10845 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
10846 }
9ee6e8bb
PB
10847 break;
10848 case 6: /* Unsigned bitfield extract. */
10849 imm++;
10850 if (shift + imm > 32)
10851 goto illegal_op;
59a71b4c
RH
10852 if (imm < 32) {
10853 tcg_gen_extract_i32(tmp, tmp, shift, imm);
10854 }
9ee6e8bb
PB
10855 break;
10856 case 3: /* Bitfield insert/clear. */
10857 if (imm < shift)
10858 goto illegal_op;
10859 imm = imm + 1 - shift;
10860 if (imm != 32) {
6ddbc6e4 10861 tmp2 = load_reg(s, rd);
d593c48e 10862 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 10863 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10864 }
10865 break;
10866 case 7:
10867 goto illegal_op;
10868 default: /* Saturate. */
9ee6e8bb
PB
10869 if (shift) {
10870 if (op & 1)
6ddbc6e4 10871 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 10872 else
6ddbc6e4 10873 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 10874 }
6ddbc6e4 10875 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
10876 if (op & 4) {
10877 /* Unsigned. */
62b44f05
AR
10878 if ((op & 1) && shift == 0) {
10879 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10880 tcg_temp_free_i32(tmp);
10881 tcg_temp_free_i32(tmp2);
10882 goto illegal_op;
10883 }
9ef39277 10884 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10885 } else {
9ef39277 10886 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 10887 }
2c0262af 10888 } else {
9ee6e8bb 10889 /* Signed. */
62b44f05
AR
10890 if ((op & 1) && shift == 0) {
10891 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10892 tcg_temp_free_i32(tmp);
10893 tcg_temp_free_i32(tmp2);
10894 goto illegal_op;
10895 }
9ef39277 10896 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10897 } else {
9ef39277 10898 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 10899 }
2c0262af 10900 }
b75263d6 10901 tcg_temp_free_i32(tmp2);
9ee6e8bb 10902 break;
2c0262af 10903 }
6ddbc6e4 10904 store_reg(s, rd, tmp);
9ee6e8bb
PB
10905 } else {
10906 imm = ((insn & 0x04000000) >> 15)
10907 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10908 if (insn & (1 << 22)) {
10909 /* 16-bit immediate. */
10910 imm |= (insn >> 4) & 0xf000;
10911 if (insn & (1 << 23)) {
10912 /* movt */
5e3f878a 10913 tmp = load_reg(s, rd);
86831435 10914 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 10915 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 10916 } else {
9ee6e8bb 10917 /* movw */
7d1b0095 10918 tmp = tcg_temp_new_i32();
5e3f878a 10919 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
10920 }
10921 } else {
9ee6e8bb
PB
10922 /* Add/sub 12-bit immediate. */
10923 if (rn == 15) {
b0109805 10924 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 10925 if (insn & (1 << 23))
b0109805 10926 offset -= imm;
9ee6e8bb 10927 else
b0109805 10928 offset += imm;
7d1b0095 10929 tmp = tcg_temp_new_i32();
5e3f878a 10930 tcg_gen_movi_i32(tmp, offset);
2c0262af 10931 } else {
5e3f878a 10932 tmp = load_reg(s, rn);
9ee6e8bb 10933 if (insn & (1 << 23))
5e3f878a 10934 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 10935 else
5e3f878a 10936 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 10937 }
9ee6e8bb 10938 }
5e3f878a 10939 store_reg(s, rd, tmp);
191abaa2 10940 }
9ee6e8bb
PB
10941 } else {
10942 int shifter_out = 0;
10943 /* modified 12-bit immediate. */
10944 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10945 imm = (insn & 0xff);
10946 switch (shift) {
10947 case 0: /* XY */
10948 /* Nothing to do. */
10949 break;
10950 case 1: /* 00XY00XY */
10951 imm |= imm << 16;
10952 break;
10953 case 2: /* XY00XY00 */
10954 imm |= imm << 16;
10955 imm <<= 8;
10956 break;
10957 case 3: /* XYXYXYXY */
10958 imm |= imm << 16;
10959 imm |= imm << 8;
10960 break;
10961 default: /* Rotated constant. */
10962 shift = (shift << 1) | (imm >> 7);
10963 imm |= 0x80;
10964 imm = imm << (32 - shift);
10965 shifter_out = 1;
10966 break;
b5ff1b31 10967 }
7d1b0095 10968 tmp2 = tcg_temp_new_i32();
3174f8e9 10969 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 10970 rn = (insn >> 16) & 0xf;
3174f8e9 10971 if (rn == 15) {
7d1b0095 10972 tmp = tcg_temp_new_i32();
3174f8e9
FN
10973 tcg_gen_movi_i32(tmp, 0);
10974 } else {
10975 tmp = load_reg(s, rn);
10976 }
9ee6e8bb
PB
10977 op = (insn >> 21) & 0xf;
10978 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 10979 shifter_out, tmp, tmp2))
9ee6e8bb 10980 goto illegal_op;
7d1b0095 10981 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10982 rd = (insn >> 8) & 0xf;
10983 if (rd != 15) {
3174f8e9
FN
10984 store_reg(s, rd, tmp);
10985 } else {
7d1b0095 10986 tcg_temp_free_i32(tmp);
2c0262af 10987 }
2c0262af 10988 }
9ee6e8bb
PB
10989 }
10990 break;
10991 case 12: /* Load/store single data item. */
10992 {
10993 int postinc = 0;
10994 int writeback = 0;
a99caa48 10995 int memidx;
9bb6558a
PM
10996 ISSInfo issinfo;
10997
9ee6e8bb 10998 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 10999 if (disas_neon_ls_insn(s, insn)) {
c1713132 11000 goto illegal_op;
7dcc1f89 11001 }
9ee6e8bb
PB
11002 break;
11003 }
a2fdc890
PM
11004 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
11005 if (rs == 15) {
11006 if (!(insn & (1 << 20))) {
11007 goto illegal_op;
11008 }
11009 if (op != 2) {
11010 /* Byte or halfword load space with dest == r15 : memory hints.
11011 * Catch them early so we don't emit pointless addressing code.
11012 * This space is a mix of:
11013 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
11014 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
11015 * cores)
11016 * unallocated hints, which must be treated as NOPs
11017 * UNPREDICTABLE space, which we NOP or UNDEF depending on
11018 * which is easiest for the decoding logic
11019 * Some space which must UNDEF
11020 */
11021 int op1 = (insn >> 23) & 3;
11022 int op2 = (insn >> 6) & 0x3f;
11023 if (op & 2) {
11024 goto illegal_op;
11025 }
11026 if (rn == 15) {
02afbf64
PM
11027 /* UNPREDICTABLE, unallocated hint or
11028 * PLD/PLDW/PLI (literal)
11029 */
2eea841c 11030 return;
a2fdc890
PM
11031 }
11032 if (op1 & 1) {
2eea841c 11033 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
11034 }
11035 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
2eea841c 11036 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
11037 }
11038 /* UNDEF space, or an UNPREDICTABLE */
2eea841c 11039 goto illegal_op;
a2fdc890
PM
11040 }
11041 }
a99caa48 11042 memidx = get_mem_index(s);
9ee6e8bb 11043 if (rn == 15) {
7d1b0095 11044 addr = tcg_temp_new_i32();
9ee6e8bb
PB
11045 /* PC relative. */
11046 /* s->pc has already been incremented by 4. */
11047 imm = s->pc & 0xfffffffc;
11048 if (insn & (1 << 23))
11049 imm += insn & 0xfff;
11050 else
11051 imm -= insn & 0xfff;
b0109805 11052 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 11053 } else {
b0109805 11054 addr = load_reg(s, rn);
9ee6e8bb
PB
11055 if (insn & (1 << 23)) {
11056 /* Positive offset. */
11057 imm = insn & 0xfff;
b0109805 11058 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 11059 } else {
9ee6e8bb 11060 imm = insn & 0xff;
2a0308c5
PM
11061 switch ((insn >> 8) & 0xf) {
11062 case 0x0: /* Shifted Register. */
9ee6e8bb 11063 shift = (insn >> 4) & 0xf;
2a0308c5
PM
11064 if (shift > 3) {
11065 tcg_temp_free_i32(addr);
18c9b560 11066 goto illegal_op;
2a0308c5 11067 }
b26eefb6 11068 tmp = load_reg(s, rm);
9ee6e8bb 11069 if (shift)
b26eefb6 11070 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 11071 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11072 tcg_temp_free_i32(tmp);
9ee6e8bb 11073 break;
2a0308c5 11074 case 0xc: /* Negative offset. */
b0109805 11075 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 11076 break;
2a0308c5 11077 case 0xe: /* User privilege. */
b0109805 11078 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 11079 memidx = get_a32_user_mem_index(s);
9ee6e8bb 11080 break;
2a0308c5 11081 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
11082 imm = -imm;
11083 /* Fall through. */
2a0308c5 11084 case 0xb: /* Post-increment. */
9ee6e8bb
PB
11085 postinc = 1;
11086 writeback = 1;
11087 break;
2a0308c5 11088 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
11089 imm = -imm;
11090 /* Fall through. */
2a0308c5 11091 case 0xf: /* Pre-increment. */
b0109805 11092 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
11093 writeback = 1;
11094 break;
11095 default:
2a0308c5 11096 tcg_temp_free_i32(addr);
b7bcbe95 11097 goto illegal_op;
9ee6e8bb
PB
11098 }
11099 }
11100 }
9bb6558a
PM
11101
11102 issinfo = writeback ? ISSInvalid : rs;
11103
9ee6e8bb
PB
11104 if (insn & (1 << 20)) {
11105 /* Load. */
5a839c0d 11106 tmp = tcg_temp_new_i32();
a2fdc890 11107 switch (op) {
5a839c0d 11108 case 0:
9bb6558a 11109 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11110 break;
11111 case 4:
9bb6558a 11112 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11113 break;
11114 case 1:
9bb6558a 11115 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11116 break;
11117 case 5:
9bb6558a 11118 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11119 break;
11120 case 2:
9bb6558a 11121 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 11122 break;
2a0308c5 11123 default:
5a839c0d 11124 tcg_temp_free_i32(tmp);
2a0308c5
PM
11125 tcg_temp_free_i32(addr);
11126 goto illegal_op;
a2fdc890
PM
11127 }
11128 if (rs == 15) {
3bb8a96f 11129 gen_bx_excret(s, tmp);
9ee6e8bb 11130 } else {
a2fdc890 11131 store_reg(s, rs, tmp);
9ee6e8bb
PB
11132 }
11133 } else {
11134 /* Store. */
b0109805 11135 tmp = load_reg(s, rs);
9ee6e8bb 11136 switch (op) {
5a839c0d 11137 case 0:
9bb6558a 11138 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11139 break;
11140 case 1:
9bb6558a 11141 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11142 break;
11143 case 2:
9bb6558a 11144 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 11145 break;
2a0308c5 11146 default:
5a839c0d 11147 tcg_temp_free_i32(tmp);
2a0308c5
PM
11148 tcg_temp_free_i32(addr);
11149 goto illegal_op;
b7bcbe95 11150 }
5a839c0d 11151 tcg_temp_free_i32(tmp);
2c0262af 11152 }
9ee6e8bb 11153 if (postinc)
b0109805
PB
11154 tcg_gen_addi_i32(addr, addr, imm);
11155 if (writeback) {
11156 store_reg(s, rn, addr);
11157 } else {
7d1b0095 11158 tcg_temp_free_i32(addr);
b0109805 11159 }
9ee6e8bb
PB
11160 }
11161 break;
11162 default:
11163 goto illegal_op;
2c0262af 11164 }
2eea841c 11165 return;
9ee6e8bb 11166illegal_op:
2eea841c
PM
11167 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11168 default_exception_el(s));
2c0262af
FB
11169}
11170
296e5a0a 11171static void disas_thumb_insn(DisasContext *s, uint32_t insn)
99c475ab 11172{
296e5a0a 11173 uint32_t val, op, rm, rn, rd, shift, cond;
99c475ab
FB
11174 int32_t offset;
11175 int i;
39d5492a
PM
11176 TCGv_i32 tmp;
11177 TCGv_i32 tmp2;
11178 TCGv_i32 addr;
99c475ab 11179
99c475ab
FB
11180 switch (insn >> 12) {
11181 case 0: case 1:
396e467c 11182
99c475ab
FB
11183 rd = insn & 7;
11184 op = (insn >> 11) & 3;
11185 if (op == 3) {
11186 /* add/subtract */
11187 rn = (insn >> 3) & 7;
396e467c 11188 tmp = load_reg(s, rn);
99c475ab
FB
11189 if (insn & (1 << 10)) {
11190 /* immediate */
7d1b0095 11191 tmp2 = tcg_temp_new_i32();
396e467c 11192 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
11193 } else {
11194 /* reg */
11195 rm = (insn >> 6) & 7;
396e467c 11196 tmp2 = load_reg(s, rm);
99c475ab 11197 }
9ee6e8bb
PB
11198 if (insn & (1 << 9)) {
11199 if (s->condexec_mask)
396e467c 11200 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 11201 else
72485ec4 11202 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
11203 } else {
11204 if (s->condexec_mask)
396e467c 11205 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 11206 else
72485ec4 11207 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 11208 }
7d1b0095 11209 tcg_temp_free_i32(tmp2);
396e467c 11210 store_reg(s, rd, tmp);
99c475ab
FB
11211 } else {
11212 /* shift immediate */
11213 rm = (insn >> 3) & 7;
11214 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
11215 tmp = load_reg(s, rm);
11216 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
11217 if (!s->condexec_mask)
11218 gen_logic_CC(tmp);
11219 store_reg(s, rd, tmp);
99c475ab
FB
11220 }
11221 break;
11222 case 2: case 3:
11223 /* arithmetic large immediate */
11224 op = (insn >> 11) & 3;
11225 rd = (insn >> 8) & 0x7;
396e467c 11226 if (op == 0) { /* mov */
7d1b0095 11227 tmp = tcg_temp_new_i32();
396e467c 11228 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 11229 if (!s->condexec_mask)
396e467c
FN
11230 gen_logic_CC(tmp);
11231 store_reg(s, rd, tmp);
11232 } else {
11233 tmp = load_reg(s, rd);
7d1b0095 11234 tmp2 = tcg_temp_new_i32();
396e467c
FN
11235 tcg_gen_movi_i32(tmp2, insn & 0xff);
11236 switch (op) {
11237 case 1: /* cmp */
72485ec4 11238 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11239 tcg_temp_free_i32(tmp);
11240 tcg_temp_free_i32(tmp2);
396e467c
FN
11241 break;
11242 case 2: /* add */
11243 if (s->condexec_mask)
11244 tcg_gen_add_i32(tmp, tmp, tmp2);
11245 else
72485ec4 11246 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 11247 tcg_temp_free_i32(tmp2);
396e467c
FN
11248 store_reg(s, rd, tmp);
11249 break;
11250 case 3: /* sub */
11251 if (s->condexec_mask)
11252 tcg_gen_sub_i32(tmp, tmp, tmp2);
11253 else
72485ec4 11254 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 11255 tcg_temp_free_i32(tmp2);
396e467c
FN
11256 store_reg(s, rd, tmp);
11257 break;
11258 }
99c475ab 11259 }
99c475ab
FB
11260 break;
11261 case 4:
11262 if (insn & (1 << 11)) {
11263 rd = (insn >> 8) & 7;
5899f386
FB
11264 /* load pc-relative. Bit 1 of PC is ignored. */
11265 val = s->pc + 2 + ((insn & 0xff) * 4);
11266 val &= ~(uint32_t)2;
7d1b0095 11267 addr = tcg_temp_new_i32();
b0109805 11268 tcg_gen_movi_i32(addr, val);
c40c8556 11269 tmp = tcg_temp_new_i32();
9bb6558a
PM
11270 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11271 rd | ISSIs16Bit);
7d1b0095 11272 tcg_temp_free_i32(addr);
b0109805 11273 store_reg(s, rd, tmp);
99c475ab
FB
11274 break;
11275 }
11276 if (insn & (1 << 10)) {
ebfe27c5
PM
11277 /* 0b0100_01xx_xxxx_xxxx
11278 * - data processing extended, branch and exchange
11279 */
99c475ab
FB
11280 rd = (insn & 7) | ((insn >> 4) & 8);
11281 rm = (insn >> 3) & 0xf;
11282 op = (insn >> 8) & 3;
11283 switch (op) {
11284 case 0: /* add */
396e467c
FN
11285 tmp = load_reg(s, rd);
11286 tmp2 = load_reg(s, rm);
11287 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11288 tcg_temp_free_i32(tmp2);
396e467c 11289 store_reg(s, rd, tmp);
99c475ab
FB
11290 break;
11291 case 1: /* cmp */
396e467c
FN
11292 tmp = load_reg(s, rd);
11293 tmp2 = load_reg(s, rm);
72485ec4 11294 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11295 tcg_temp_free_i32(tmp2);
11296 tcg_temp_free_i32(tmp);
99c475ab
FB
11297 break;
11298 case 2: /* mov/cpy */
396e467c
FN
11299 tmp = load_reg(s, rm);
11300 store_reg(s, rd, tmp);
99c475ab 11301 break;
ebfe27c5
PM
11302 case 3:
11303 {
11304 /* 0b0100_0111_xxxx_xxxx
11305 * - branch [and link] exchange thumb register
11306 */
11307 bool link = insn & (1 << 7);
11308
fb602cb7 11309 if (insn & 3) {
ebfe27c5
PM
11310 goto undef;
11311 }
11312 if (link) {
be5e7a76 11313 ARCH(5);
ebfe27c5 11314 }
fb602cb7
PM
11315 if ((insn & 4)) {
11316 /* BXNS/BLXNS: only exists for v8M with the
11317 * security extensions, and always UNDEF if NonSecure.
11318 * We don't implement these in the user-only mode
11319 * either (in theory you can use them from Secure User
11320 * mode but they are too tied in to system emulation.)
11321 */
11322 if (!s->v8m_secure || IS_USER_ONLY) {
11323 goto undef;
11324 }
11325 if (link) {
3e3fa230 11326 gen_blxns(s, rm);
fb602cb7
PM
11327 } else {
11328 gen_bxns(s, rm);
11329 }
11330 break;
11331 }
11332 /* BLX/BX */
ebfe27c5
PM
11333 tmp = load_reg(s, rm);
11334 if (link) {
99c475ab 11335 val = (uint32_t)s->pc | 1;
7d1b0095 11336 tmp2 = tcg_temp_new_i32();
b0109805
PB
11337 tcg_gen_movi_i32(tmp2, val);
11338 store_reg(s, 14, tmp2);
3bb8a96f
PM
11339 gen_bx(s, tmp);
11340 } else {
11341 /* Only BX works as exception-return, not BLX */
11342 gen_bx_excret(s, tmp);
99c475ab 11343 }
99c475ab
FB
11344 break;
11345 }
ebfe27c5 11346 }
99c475ab
FB
11347 break;
11348 }
11349
11350 /* data processing register */
11351 rd = insn & 7;
11352 rm = (insn >> 3) & 7;
11353 op = (insn >> 6) & 0xf;
11354 if (op == 2 || op == 3 || op == 4 || op == 7) {
11355 /* the shift/rotate ops want the operands backwards */
11356 val = rm;
11357 rm = rd;
11358 rd = val;
11359 val = 1;
11360 } else {
11361 val = 0;
11362 }
11363
396e467c 11364 if (op == 9) { /* neg */
7d1b0095 11365 tmp = tcg_temp_new_i32();
396e467c
FN
11366 tcg_gen_movi_i32(tmp, 0);
11367 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11368 tmp = load_reg(s, rd);
11369 } else {
f764718d 11370 tmp = NULL;
396e467c 11371 }
99c475ab 11372
396e467c 11373 tmp2 = load_reg(s, rm);
5899f386 11374 switch (op) {
99c475ab 11375 case 0x0: /* and */
396e467c 11376 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 11377 if (!s->condexec_mask)
396e467c 11378 gen_logic_CC(tmp);
99c475ab
FB
11379 break;
11380 case 0x1: /* eor */
396e467c 11381 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 11382 if (!s->condexec_mask)
396e467c 11383 gen_logic_CC(tmp);
99c475ab
FB
11384 break;
11385 case 0x2: /* lsl */
9ee6e8bb 11386 if (s->condexec_mask) {
365af80e 11387 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 11388 } else {
9ef39277 11389 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11390 gen_logic_CC(tmp2);
9ee6e8bb 11391 }
99c475ab
FB
11392 break;
11393 case 0x3: /* lsr */
9ee6e8bb 11394 if (s->condexec_mask) {
365af80e 11395 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 11396 } else {
9ef39277 11397 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11398 gen_logic_CC(tmp2);
9ee6e8bb 11399 }
99c475ab
FB
11400 break;
11401 case 0x4: /* asr */
9ee6e8bb 11402 if (s->condexec_mask) {
365af80e 11403 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 11404 } else {
9ef39277 11405 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11406 gen_logic_CC(tmp2);
9ee6e8bb 11407 }
99c475ab
FB
11408 break;
11409 case 0x5: /* adc */
49b4c31e 11410 if (s->condexec_mask) {
396e467c 11411 gen_adc(tmp, tmp2);
49b4c31e
RH
11412 } else {
11413 gen_adc_CC(tmp, tmp, tmp2);
11414 }
99c475ab
FB
11415 break;
11416 case 0x6: /* sbc */
2de68a49 11417 if (s->condexec_mask) {
396e467c 11418 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
11419 } else {
11420 gen_sbc_CC(tmp, tmp, tmp2);
11421 }
99c475ab
FB
11422 break;
11423 case 0x7: /* ror */
9ee6e8bb 11424 if (s->condexec_mask) {
f669df27
AJ
11425 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11426 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 11427 } else {
9ef39277 11428 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11429 gen_logic_CC(tmp2);
9ee6e8bb 11430 }
99c475ab
FB
11431 break;
11432 case 0x8: /* tst */
396e467c
FN
11433 tcg_gen_and_i32(tmp, tmp, tmp2);
11434 gen_logic_CC(tmp);
99c475ab 11435 rd = 16;
5899f386 11436 break;
99c475ab 11437 case 0x9: /* neg */
9ee6e8bb 11438 if (s->condexec_mask)
396e467c 11439 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 11440 else
72485ec4 11441 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11442 break;
11443 case 0xa: /* cmp */
72485ec4 11444 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11445 rd = 16;
11446 break;
11447 case 0xb: /* cmn */
72485ec4 11448 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
11449 rd = 16;
11450 break;
11451 case 0xc: /* orr */
396e467c 11452 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 11453 if (!s->condexec_mask)
396e467c 11454 gen_logic_CC(tmp);
99c475ab
FB
11455 break;
11456 case 0xd: /* mul */
7b2919a0 11457 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 11458 if (!s->condexec_mask)
396e467c 11459 gen_logic_CC(tmp);
99c475ab
FB
11460 break;
11461 case 0xe: /* bic */
f669df27 11462 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 11463 if (!s->condexec_mask)
396e467c 11464 gen_logic_CC(tmp);
99c475ab
FB
11465 break;
11466 case 0xf: /* mvn */
396e467c 11467 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 11468 if (!s->condexec_mask)
396e467c 11469 gen_logic_CC(tmp2);
99c475ab 11470 val = 1;
5899f386 11471 rm = rd;
99c475ab
FB
11472 break;
11473 }
11474 if (rd != 16) {
396e467c
FN
11475 if (val) {
11476 store_reg(s, rm, tmp2);
11477 if (op != 0xf)
7d1b0095 11478 tcg_temp_free_i32(tmp);
396e467c
FN
11479 } else {
11480 store_reg(s, rd, tmp);
7d1b0095 11481 tcg_temp_free_i32(tmp2);
396e467c
FN
11482 }
11483 } else {
7d1b0095
PM
11484 tcg_temp_free_i32(tmp);
11485 tcg_temp_free_i32(tmp2);
99c475ab
FB
11486 }
11487 break;
11488
11489 case 5:
11490 /* load/store register offset. */
11491 rd = insn & 7;
11492 rn = (insn >> 3) & 7;
11493 rm = (insn >> 6) & 7;
11494 op = (insn >> 9) & 7;
b0109805 11495 addr = load_reg(s, rn);
b26eefb6 11496 tmp = load_reg(s, rm);
b0109805 11497 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11498 tcg_temp_free_i32(tmp);
99c475ab 11499
c40c8556 11500 if (op < 3) { /* store */
b0109805 11501 tmp = load_reg(s, rd);
c40c8556
PM
11502 } else {
11503 tmp = tcg_temp_new_i32();
11504 }
99c475ab
FB
11505
11506 switch (op) {
11507 case 0: /* str */
9bb6558a 11508 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11509 break;
11510 case 1: /* strh */
9bb6558a 11511 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11512 break;
11513 case 2: /* strb */
9bb6558a 11514 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11515 break;
11516 case 3: /* ldrsb */
9bb6558a 11517 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11518 break;
11519 case 4: /* ldr */
9bb6558a 11520 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11521 break;
11522 case 5: /* ldrh */
9bb6558a 11523 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11524 break;
11525 case 6: /* ldrb */
9bb6558a 11526 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11527 break;
11528 case 7: /* ldrsh */
9bb6558a 11529 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11530 break;
11531 }
c40c8556 11532 if (op >= 3) { /* load */
b0109805 11533 store_reg(s, rd, tmp);
c40c8556
PM
11534 } else {
11535 tcg_temp_free_i32(tmp);
11536 }
7d1b0095 11537 tcg_temp_free_i32(addr);
99c475ab
FB
11538 break;
11539
11540 case 6:
11541 /* load/store word immediate offset */
11542 rd = insn & 7;
11543 rn = (insn >> 3) & 7;
b0109805 11544 addr = load_reg(s, rn);
99c475ab 11545 val = (insn >> 4) & 0x7c;
b0109805 11546 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11547
11548 if (insn & (1 << 11)) {
11549 /* load */
c40c8556 11550 tmp = tcg_temp_new_i32();
12dcc321 11551 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11552 store_reg(s, rd, tmp);
99c475ab
FB
11553 } else {
11554 /* store */
b0109805 11555 tmp = load_reg(s, rd);
12dcc321 11556 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11557 tcg_temp_free_i32(tmp);
99c475ab 11558 }
7d1b0095 11559 tcg_temp_free_i32(addr);
99c475ab
FB
11560 break;
11561
11562 case 7:
11563 /* load/store byte immediate offset */
11564 rd = insn & 7;
11565 rn = (insn >> 3) & 7;
b0109805 11566 addr = load_reg(s, rn);
99c475ab 11567 val = (insn >> 6) & 0x1f;
b0109805 11568 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11569
11570 if (insn & (1 << 11)) {
11571 /* load */
c40c8556 11572 tmp = tcg_temp_new_i32();
9bb6558a 11573 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11574 store_reg(s, rd, tmp);
99c475ab
FB
11575 } else {
11576 /* store */
b0109805 11577 tmp = load_reg(s, rd);
9bb6558a 11578 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11579 tcg_temp_free_i32(tmp);
99c475ab 11580 }
7d1b0095 11581 tcg_temp_free_i32(addr);
99c475ab
FB
11582 break;
11583
11584 case 8:
11585 /* load/store halfword immediate offset */
11586 rd = insn & 7;
11587 rn = (insn >> 3) & 7;
b0109805 11588 addr = load_reg(s, rn);
99c475ab 11589 val = (insn >> 5) & 0x3e;
b0109805 11590 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11591
11592 if (insn & (1 << 11)) {
11593 /* load */
c40c8556 11594 tmp = tcg_temp_new_i32();
9bb6558a 11595 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11596 store_reg(s, rd, tmp);
99c475ab
FB
11597 } else {
11598 /* store */
b0109805 11599 tmp = load_reg(s, rd);
9bb6558a 11600 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11601 tcg_temp_free_i32(tmp);
99c475ab 11602 }
7d1b0095 11603 tcg_temp_free_i32(addr);
99c475ab
FB
11604 break;
11605
11606 case 9:
11607 /* load/store from stack */
11608 rd = (insn >> 8) & 7;
b0109805 11609 addr = load_reg(s, 13);
99c475ab 11610 val = (insn & 0xff) * 4;
b0109805 11611 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11612
11613 if (insn & (1 << 11)) {
11614 /* load */
c40c8556 11615 tmp = tcg_temp_new_i32();
9bb6558a 11616 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11617 store_reg(s, rd, tmp);
99c475ab
FB
11618 } else {
11619 /* store */
b0109805 11620 tmp = load_reg(s, rd);
9bb6558a 11621 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11622 tcg_temp_free_i32(tmp);
99c475ab 11623 }
7d1b0095 11624 tcg_temp_free_i32(addr);
99c475ab
FB
11625 break;
11626
11627 case 10:
11628 /* add to high reg */
11629 rd = (insn >> 8) & 7;
5899f386
FB
11630 if (insn & (1 << 11)) {
11631 /* SP */
5e3f878a 11632 tmp = load_reg(s, 13);
5899f386
FB
11633 } else {
11634 /* PC. bit 1 is ignored. */
7d1b0095 11635 tmp = tcg_temp_new_i32();
5e3f878a 11636 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 11637 }
99c475ab 11638 val = (insn & 0xff) * 4;
5e3f878a
PB
11639 tcg_gen_addi_i32(tmp, tmp, val);
11640 store_reg(s, rd, tmp);
99c475ab
FB
11641 break;
11642
11643 case 11:
11644 /* misc */
11645 op = (insn >> 8) & 0xf;
11646 switch (op) {
11647 case 0:
11648 /* adjust stack pointer */
b26eefb6 11649 tmp = load_reg(s, 13);
99c475ab
FB
11650 val = (insn & 0x7f) * 4;
11651 if (insn & (1 << 7))
6a0d8a1d 11652 val = -(int32_t)val;
b26eefb6
PB
11653 tcg_gen_addi_i32(tmp, tmp, val);
11654 store_reg(s, 13, tmp);
99c475ab
FB
11655 break;
11656
9ee6e8bb
PB
11657 case 2: /* sign/zero extend. */
11658 ARCH(6);
11659 rd = insn & 7;
11660 rm = (insn >> 3) & 7;
b0109805 11661 tmp = load_reg(s, rm);
9ee6e8bb 11662 switch ((insn >> 6) & 3) {
b0109805
PB
11663 case 0: gen_sxth(tmp); break;
11664 case 1: gen_sxtb(tmp); break;
11665 case 2: gen_uxth(tmp); break;
11666 case 3: gen_uxtb(tmp); break;
9ee6e8bb 11667 }
b0109805 11668 store_reg(s, rd, tmp);
9ee6e8bb 11669 break;
99c475ab
FB
11670 case 4: case 5: case 0xc: case 0xd:
11671 /* push/pop */
b0109805 11672 addr = load_reg(s, 13);
5899f386
FB
11673 if (insn & (1 << 8))
11674 offset = 4;
99c475ab 11675 else
5899f386
FB
11676 offset = 0;
11677 for (i = 0; i < 8; i++) {
11678 if (insn & (1 << i))
11679 offset += 4;
11680 }
11681 if ((insn & (1 << 11)) == 0) {
b0109805 11682 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11683 }
99c475ab
FB
11684 for (i = 0; i < 8; i++) {
11685 if (insn & (1 << i)) {
11686 if (insn & (1 << 11)) {
11687 /* pop */
c40c8556 11688 tmp = tcg_temp_new_i32();
12dcc321 11689 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11690 store_reg(s, i, tmp);
99c475ab
FB
11691 } else {
11692 /* push */
b0109805 11693 tmp = load_reg(s, i);
12dcc321 11694 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11695 tcg_temp_free_i32(tmp);
99c475ab 11696 }
5899f386 11697 /* advance to the next address. */
b0109805 11698 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11699 }
11700 }
f764718d 11701 tmp = NULL;
99c475ab
FB
11702 if (insn & (1 << 8)) {
11703 if (insn & (1 << 11)) {
11704 /* pop pc */
c40c8556 11705 tmp = tcg_temp_new_i32();
12dcc321 11706 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11707 /* don't set the pc until the rest of the instruction
11708 has completed */
11709 } else {
11710 /* push lr */
b0109805 11711 tmp = load_reg(s, 14);
12dcc321 11712 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11713 tcg_temp_free_i32(tmp);
99c475ab 11714 }
b0109805 11715 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 11716 }
5899f386 11717 if ((insn & (1 << 11)) == 0) {
b0109805 11718 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11719 }
99c475ab 11720 /* write back the new stack pointer */
b0109805 11721 store_reg(s, 13, addr);
99c475ab 11722 /* set the new PC value */
be5e7a76 11723 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 11724 store_reg_from_load(s, 15, tmp);
be5e7a76 11725 }
99c475ab
FB
11726 break;
11727
9ee6e8bb
PB
11728 case 1: case 3: case 9: case 11: /* czb */
11729 rm = insn & 7;
d9ba4830 11730 tmp = load_reg(s, rm);
9ee6e8bb
PB
11731 s->condlabel = gen_new_label();
11732 s->condjmp = 1;
11733 if (insn & (1 << 11))
cb63669a 11734 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 11735 else
cb63669a 11736 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 11737 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11738 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11739 val = (uint32_t)s->pc + 2;
11740 val += offset;
11741 gen_jmp(s, val);
11742 break;
11743
11744 case 15: /* IT, nop-hint. */
11745 if ((insn & 0xf) == 0) {
11746 gen_nop_hint(s, (insn >> 4) & 0xf);
11747 break;
11748 }
11749 /* If Then. */
11750 s->condexec_cond = (insn >> 4) & 0xe;
11751 s->condexec_mask = insn & 0x1f;
11752 /* No actual code generated for this insn, just setup state. */
11753 break;
11754
06c949e6 11755 case 0xe: /* bkpt */
d4a2dc67
PM
11756 {
11757 int imm8 = extract32(insn, 0, 8);
be5e7a76 11758 ARCH(5);
73710361
GB
11759 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
11760 default_exception_el(s));
06c949e6 11761 break;
d4a2dc67 11762 }
06c949e6 11763
19a6e31c
PM
11764 case 0xa: /* rev, and hlt */
11765 {
11766 int op1 = extract32(insn, 6, 2);
11767
11768 if (op1 == 2) {
11769 /* HLT */
11770 int imm6 = extract32(insn, 0, 6);
11771
11772 gen_hlt(s, imm6);
11773 break;
11774 }
11775
11776 /* Otherwise this is rev */
9ee6e8bb
PB
11777 ARCH(6);
11778 rn = (insn >> 3) & 0x7;
11779 rd = insn & 0x7;
b0109805 11780 tmp = load_reg(s, rn);
19a6e31c 11781 switch (op1) {
66896cb8 11782 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
11783 case 1: gen_rev16(tmp); break;
11784 case 3: gen_revsh(tmp); break;
19a6e31c
PM
11785 default:
11786 g_assert_not_reached();
9ee6e8bb 11787 }
b0109805 11788 store_reg(s, rd, tmp);
9ee6e8bb 11789 break;
19a6e31c 11790 }
9ee6e8bb 11791
d9e028c1
PM
11792 case 6:
11793 switch ((insn >> 5) & 7) {
11794 case 2:
11795 /* setend */
11796 ARCH(6);
9886ecdf
PB
11797 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11798 gen_helper_setend(cpu_env);
dcba3a8d 11799 s->base.is_jmp = DISAS_UPDATE;
d9e028c1 11800 }
9ee6e8bb 11801 break;
d9e028c1
PM
11802 case 3:
11803 /* cps */
11804 ARCH(6);
11805 if (IS_USER(s)) {
11806 break;
8984bd2e 11807 }
b53d8923 11808 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
11809 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11810 /* FAULTMASK */
11811 if (insn & 1) {
11812 addr = tcg_const_i32(19);
11813 gen_helper_v7m_msr(cpu_env, addr, tmp);
11814 tcg_temp_free_i32(addr);
11815 }
11816 /* PRIMASK */
11817 if (insn & 2) {
11818 addr = tcg_const_i32(16);
11819 gen_helper_v7m_msr(cpu_env, addr, tmp);
11820 tcg_temp_free_i32(addr);
11821 }
11822 tcg_temp_free_i32(tmp);
11823 gen_lookup_tb(s);
11824 } else {
11825 if (insn & (1 << 4)) {
11826 shift = CPSR_A | CPSR_I | CPSR_F;
11827 } else {
11828 shift = 0;
11829 }
11830 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 11831 }
d9e028c1
PM
11832 break;
11833 default:
11834 goto undef;
9ee6e8bb
PB
11835 }
11836 break;
11837
99c475ab
FB
11838 default:
11839 goto undef;
11840 }
11841 break;
11842
11843 case 12:
a7d3970d 11844 {
99c475ab 11845 /* load/store multiple */
f764718d 11846 TCGv_i32 loaded_var = NULL;
99c475ab 11847 rn = (insn >> 8) & 0x7;
b0109805 11848 addr = load_reg(s, rn);
99c475ab
FB
11849 for (i = 0; i < 8; i++) {
11850 if (insn & (1 << i)) {
99c475ab
FB
11851 if (insn & (1 << 11)) {
11852 /* load */
c40c8556 11853 tmp = tcg_temp_new_i32();
12dcc321 11854 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
11855 if (i == rn) {
11856 loaded_var = tmp;
11857 } else {
11858 store_reg(s, i, tmp);
11859 }
99c475ab
FB
11860 } else {
11861 /* store */
b0109805 11862 tmp = load_reg(s, i);
12dcc321 11863 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11864 tcg_temp_free_i32(tmp);
99c475ab 11865 }
5899f386 11866 /* advance to the next address */
b0109805 11867 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11868 }
11869 }
b0109805 11870 if ((insn & (1 << rn)) == 0) {
a7d3970d 11871 /* base reg not in list: base register writeback */
b0109805
PB
11872 store_reg(s, rn, addr);
11873 } else {
a7d3970d
PM
11874 /* base reg in list: if load, complete it now */
11875 if (insn & (1 << 11)) {
11876 store_reg(s, rn, loaded_var);
11877 }
7d1b0095 11878 tcg_temp_free_i32(addr);
b0109805 11879 }
99c475ab 11880 break;
a7d3970d 11881 }
99c475ab
FB
11882 case 13:
11883 /* conditional branch or swi */
11884 cond = (insn >> 8) & 0xf;
11885 if (cond == 0xe)
11886 goto undef;
11887
11888 if (cond == 0xf) {
11889 /* swi */
eaed129d 11890 gen_set_pc_im(s, s->pc);
d4a2dc67 11891 s->svc_imm = extract32(insn, 0, 8);
dcba3a8d 11892 s->base.is_jmp = DISAS_SWI;
99c475ab
FB
11893 break;
11894 }
11895 /* generate a conditional jump to next instruction */
e50e6a20 11896 s->condlabel = gen_new_label();
39fb730a 11897 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 11898 s->condjmp = 1;
99c475ab
FB
11899
11900 /* jump to the offset */
5899f386 11901 val = (uint32_t)s->pc + 2;
99c475ab 11902 offset = ((int32_t)insn << 24) >> 24;
5899f386 11903 val += offset << 1;
8aaca4c0 11904 gen_jmp(s, val);
99c475ab
FB
11905 break;
11906
11907 case 14:
358bf29e 11908 if (insn & (1 << 11)) {
296e5a0a
PM
11909 /* thumb_insn_is_16bit() ensures we can't get here for
11910 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
11911 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
11912 */
11913 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
11914 ARCH(5);
11915 offset = ((insn & 0x7ff) << 1);
11916 tmp = load_reg(s, 14);
11917 tcg_gen_addi_i32(tmp, tmp, offset);
11918 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
11919
11920 tmp2 = tcg_temp_new_i32();
11921 tcg_gen_movi_i32(tmp2, s->pc | 1);
11922 store_reg(s, 14, tmp2);
11923 gen_bx(s, tmp);
358bf29e
PB
11924 break;
11925 }
9ee6e8bb 11926 /* unconditional branch */
99c475ab
FB
11927 val = (uint32_t)s->pc;
11928 offset = ((int32_t)insn << 21) >> 21;
11929 val += (offset << 1) + 2;
8aaca4c0 11930 gen_jmp(s, val);
99c475ab
FB
11931 break;
11932
11933 case 15:
296e5a0a
PM
11934 /* thumb_insn_is_16bit() ensures we can't get here for
11935 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
11936 */
11937 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
11938
11939 if (insn & (1 << 11)) {
11940 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
11941 offset = ((insn & 0x7ff) << 1) | 1;
11942 tmp = load_reg(s, 14);
11943 tcg_gen_addi_i32(tmp, tmp, offset);
11944
11945 tmp2 = tcg_temp_new_i32();
11946 tcg_gen_movi_i32(tmp2, s->pc | 1);
11947 store_reg(s, 14, tmp2);
11948 gen_bx(s, tmp);
11949 } else {
11950 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
11951 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
11952
11953 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
11954 }
9ee6e8bb 11955 break;
99c475ab
FB
11956 }
11957 return;
9ee6e8bb 11958illegal_op:
99c475ab 11959undef:
73710361
GB
11960 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11961 default_exception_el(s));
99c475ab
FB
11962}
11963
541ebcd4
PM
11964static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11965{
11966 /* Return true if the insn at dc->pc might cross a page boundary.
11967 * (False positives are OK, false negatives are not.)
5b8d7289
PM
11968 * We know this is a Thumb insn, and our caller ensures we are
11969 * only called if dc->pc is less than 4 bytes from the page
11970 * boundary, so we cross the page if the first 16 bits indicate
11971 * that this is a 32 bit insn.
541ebcd4 11972 */
5b8d7289 11973 uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4 11974
5b8d7289 11975 return !thumb_insn_is_16bit(s, insn);
541ebcd4
PM
11976}
11977
1d8a5535
LV
11978static int arm_tr_init_disas_context(DisasContextBase *dcbase,
11979 CPUState *cs, int max_insns)
2c0262af 11980{
1d8a5535 11981 DisasContext *dc = container_of(dcbase, DisasContext, base);
9c489ea6 11982 CPUARMState *env = cs->env_ptr;
4e5e1215 11983 ARMCPU *cpu = arm_env_get_cpu(env);
3b46e624 11984
dcba3a8d 11985 dc->pc = dc->base.pc_first;
e50e6a20 11986 dc->condjmp = 0;
3926cc84 11987
40f860cd 11988 dc->aarch64 = 0;
cef9ee70
SS
11989 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11990 * there is no secure EL1, so we route exceptions to EL3.
11991 */
11992 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11993 !arm_el_is_aa64(env, 3);
1d8a5535
LV
11994 dc->thumb = ARM_TBFLAG_THUMB(dc->base.tb->flags);
11995 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(dc->base.tb->flags);
11996 dc->be_data = ARM_TBFLAG_BE_DATA(dc->base.tb->flags) ? MO_BE : MO_LE;
11997 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) & 0xf) << 1;
11998 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) >> 4;
11999 dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(dc->base.tb->flags));
c1e37810 12000 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 12001#if !defined(CONFIG_USER_ONLY)
c1e37810 12002 dc->user = (dc->current_el == 0);
3926cc84 12003#endif
1d8a5535
LV
12004 dc->ns = ARM_TBFLAG_NS(dc->base.tb->flags);
12005 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(dc->base.tb->flags);
12006 dc->vfp_enabled = ARM_TBFLAG_VFPEN(dc->base.tb->flags);
12007 dc->vec_len = ARM_TBFLAG_VECLEN(dc->base.tb->flags);
12008 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(dc->base.tb->flags);
12009 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(dc->base.tb->flags);
12010 dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(dc->base.tb->flags);
fb602cb7
PM
12011 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
12012 regime_is_secure(env, dc->mmu_idx);
60322b39 12013 dc->cp_regs = cpu->cp_regs;
a984e42c 12014 dc->features = env->features;
40f860cd 12015
50225ad0
PM
12016 /* Single step state. The code-generation logic here is:
12017 * SS_ACTIVE == 0:
12018 * generate code with no special handling for single-stepping (except
12019 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
12020 * this happens anyway because those changes are all system register or
12021 * PSTATE writes).
12022 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
12023 * emit code for one insn
12024 * emit code to clear PSTATE.SS
12025 * emit code to generate software step exception for completed step
12026 * end TB (as usual for having generated an exception)
12027 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
12028 * emit code to generate a software step exception
12029 * end the TB
12030 */
1d8a5535
LV
12031 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(dc->base.tb->flags);
12032 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(dc->base.tb->flags);
50225ad0
PM
12033 dc->is_ldex = false;
12034 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
12035
13189a90
LV
12036 dc->next_page_start =
12037 (dc->base.pc_first & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1d8a5535 12038
f7708456
RH
12039 /* If architectural single step active, limit to 1. */
12040 if (is_singlestepping(dc)) {
12041 max_insns = 1;
12042 }
12043
d0264d86
RH
12044 /* ARM is a fixed-length ISA. Bound the number of insns to execute
12045 to those left on the page. */
12046 if (!dc->thumb) {
12047 int bound = (dc->next_page_start - dc->base.pc_first) / 4;
12048 max_insns = MIN(max_insns, bound);
12049 }
12050
a7812ae4
PB
12051 cpu_F0s = tcg_temp_new_i32();
12052 cpu_F1s = tcg_temp_new_i32();
12053 cpu_F0d = tcg_temp_new_i64();
12054 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
12055 cpu_V0 = cpu_F0d;
12056 cpu_V1 = cpu_F1d;
e677137d 12057 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 12058 cpu_M0 = tcg_temp_new_i64();
1d8a5535
LV
12059
12060 return max_insns;
12061}
12062
b1476854
LV
12063static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
12064{
12065 DisasContext *dc = container_of(dcbase, DisasContext, base);
12066
12067 /* A note on handling of the condexec (IT) bits:
12068 *
12069 * We want to avoid the overhead of having to write the updated condexec
12070 * bits back to the CPUARMState for every instruction in an IT block. So:
12071 * (1) if the condexec bits are not already zero then we write
12072 * zero back into the CPUARMState now. This avoids complications trying
12073 * to do it at the end of the block. (For example if we don't do this
12074 * it's hard to identify whether we can safely skip writing condexec
12075 * at the end of the TB, which we definitely want to do for the case
12076 * where a TB doesn't do anything with the IT state at all.)
12077 * (2) if we are going to leave the TB then we call gen_set_condexec()
12078 * which will write the correct value into CPUARMState if zero is wrong.
12079 * This is done both for leaving the TB at the end, and for leaving
12080 * it because of an exception we know will happen, which is done in
12081 * gen_exception_insn(). The latter is necessary because we need to
12082 * leave the TB with the PC/IT state just prior to execution of the
12083 * instruction which caused the exception.
12084 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
12085 * then the CPUARMState will be wrong and we need to reset it.
12086 * This is handled in the same way as restoration of the
12087 * PC in these situations; we save the value of the condexec bits
12088 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
12089 * then uses this to restore them after an exception.
12090 *
12091 * Note that there are no instructions which can read the condexec
12092 * bits, and none which can write non-static values to them, so
12093 * we don't need to care about whether CPUARMState is correct in the
12094 * middle of a TB.
12095 */
12096
12097 /* Reset the conditional execution bits immediately. This avoids
12098 complications trying to do it at the end of the block. */
12099 if (dc->condexec_mask || dc->condexec_cond) {
12100 TCGv_i32 tmp = tcg_temp_new_i32();
12101 tcg_gen_movi_i32(tmp, 0);
12102 store_cpu_field(tmp, condexec_bits);
12103 }
23169224 12104 tcg_clear_temp_count();
b1476854
LV
12105}
12106
f62bd897
LV
12107static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
12108{
12109 DisasContext *dc = container_of(dcbase, DisasContext, base);
12110
f62bd897
LV
12111 tcg_gen_insn_start(dc->pc,
12112 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
12113 0);
15fa08f8 12114 dc->insn_start = tcg_last_op();
f62bd897
LV
12115}
12116
a68956ad
LV
12117static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
12118 const CPUBreakpoint *bp)
12119{
12120 DisasContext *dc = container_of(dcbase, DisasContext, base);
12121
12122 if (bp->flags & BP_CPU) {
12123 gen_set_condexec(dc);
12124 gen_set_pc_im(dc, dc->pc);
12125 gen_helper_check_breakpoints(cpu_env);
12126 /* End the TB early; it's likely not going to be executed */
12127 dc->base.is_jmp = DISAS_TOO_MANY;
12128 } else {
12129 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
12130 /* The address covered by the breakpoint must be
12131 included in [tb->pc, tb->pc + tb->size) in order
12132 to for it to be properly cleared -- thus we
12133 increment the PC here so that the logic setting
12134 tb->size below does the right thing. */
12135 /* TODO: Advance PC by correct instruction length to
12136 * avoid disassembler error messages */
12137 dc->pc += 2;
12138 dc->base.is_jmp = DISAS_NORETURN;
12139 }
12140
12141 return true;
12142}
12143
722ef0a5 12144static bool arm_pre_translate_insn(DisasContext *dc)
13189a90 12145{
13189a90
LV
12146#ifdef CONFIG_USER_ONLY
12147 /* Intercept jump to the magic kernel page. */
12148 if (dc->pc >= 0xffff0000) {
12149 /* We always get here via a jump, so know we are not in a
12150 conditional execution block. */
12151 gen_exception_internal(EXCP_KERNEL_TRAP);
12152 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 12153 return true;
13189a90
LV
12154 }
12155#endif
12156
12157 if (dc->ss_active && !dc->pstate_ss) {
12158 /* Singlestep state is Active-pending.
12159 * If we're in this state at the start of a TB then either
12160 * a) we just took an exception to an EL which is being debugged
12161 * and this is the first insn in the exception handler
12162 * b) debug exceptions were masked and we just unmasked them
12163 * without changing EL (eg by clearing PSTATE.D)
12164 * In either case we're going to take a swstep exception in the
12165 * "did not step an insn" case, and so the syndrome ISV and EX
12166 * bits should be zero.
12167 */
12168 assert(dc->base.num_insns == 1);
12169 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
12170 default_exception_el(dc));
12171 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 12172 return true;
13189a90
LV
12173 }
12174
722ef0a5
RH
12175 return false;
12176}
13189a90 12177
d0264d86 12178static void arm_post_translate_insn(DisasContext *dc)
722ef0a5 12179{
13189a90
LV
12180 if (dc->condjmp && !dc->base.is_jmp) {
12181 gen_set_label(dc->condlabel);
12182 dc->condjmp = 0;
12183 }
13189a90 12184 dc->base.pc_next = dc->pc;
23169224 12185 translator_loop_temp_check(&dc->base);
13189a90
LV
12186}
12187
722ef0a5
RH
12188static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12189{
12190 DisasContext *dc = container_of(dcbase, DisasContext, base);
12191 CPUARMState *env = cpu->env_ptr;
12192 unsigned int insn;
12193
12194 if (arm_pre_translate_insn(dc)) {
12195 return;
12196 }
12197
12198 insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
58803318 12199 dc->insn = insn;
722ef0a5
RH
12200 dc->pc += 4;
12201 disas_arm_insn(dc, insn);
12202
d0264d86
RH
12203 arm_post_translate_insn(dc);
12204
12205 /* ARM is a fixed-length ISA. We performed the cross-page check
12206 in init_disas_context by adjusting max_insns. */
722ef0a5
RH
12207}
12208
dcf14dfb
PM
12209static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
12210{
12211 /* Return true if this Thumb insn is always unconditional,
12212 * even inside an IT block. This is true of only a very few
12213 * instructions: BKPT, HLT, and SG.
12214 *
12215 * A larger class of instructions are UNPREDICTABLE if used
12216 * inside an IT block; we do not need to detect those here, because
12217 * what we do by default (perform the cc check and update the IT
12218 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
12219 * choice for those situations.
12220 *
12221 * insn is either a 16-bit or a 32-bit instruction; the two are
12222 * distinguishable because for the 16-bit case the top 16 bits
12223 * are zeroes, and that isn't a valid 32-bit encoding.
12224 */
12225 if ((insn & 0xffffff00) == 0xbe00) {
12226 /* BKPT */
12227 return true;
12228 }
12229
12230 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
12231 !arm_dc_feature(s, ARM_FEATURE_M)) {
12232 /* HLT: v8A only. This is unconditional even when it is going to
12233 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
12234 * For v7 cores this was a plain old undefined encoding and so
12235 * honours its cc check. (We might be using the encoding as
12236 * a semihosting trap, but we don't change the cc check behaviour
12237 * on that account, because a debugger connected to a real v7A
12238 * core and emulating semihosting traps by catching the UNDEF
12239 * exception would also only see cases where the cc check passed.
12240 * No guest code should be trying to do a HLT semihosting trap
12241 * in an IT block anyway.
12242 */
12243 return true;
12244 }
12245
12246 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
12247 arm_dc_feature(s, ARM_FEATURE_M)) {
12248 /* SG: v8M only */
12249 return true;
12250 }
12251
12252 return false;
12253}
12254
722ef0a5
RH
12255static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12256{
12257 DisasContext *dc = container_of(dcbase, DisasContext, base);
12258 CPUARMState *env = cpu->env_ptr;
296e5a0a
PM
12259 uint32_t insn;
12260 bool is_16bit;
722ef0a5
RH
12261
12262 if (arm_pre_translate_insn(dc)) {
12263 return;
12264 }
12265
296e5a0a
PM
12266 insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12267 is_16bit = thumb_insn_is_16bit(dc, insn);
12268 dc->pc += 2;
12269 if (!is_16bit) {
12270 uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12271
12272 insn = insn << 16 | insn2;
12273 dc->pc += 2;
12274 }
58803318 12275 dc->insn = insn;
296e5a0a 12276
dcf14dfb 12277 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
296e5a0a
PM
12278 uint32_t cond = dc->condexec_cond;
12279
12280 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
12281 dc->condlabel = gen_new_label();
12282 arm_gen_test_cc(cond ^ 1, dc->condlabel);
12283 dc->condjmp = 1;
12284 }
12285 }
12286
12287 if (is_16bit) {
12288 disas_thumb_insn(dc, insn);
12289 } else {
2eea841c 12290 disas_thumb2_insn(dc, insn);
296e5a0a 12291 }
722ef0a5
RH
12292
12293 /* Advance the Thumb condexec condition. */
12294 if (dc->condexec_mask) {
12295 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
12296 ((dc->condexec_mask >> 4) & 1));
12297 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
12298 if (dc->condexec_mask == 0) {
12299 dc->condexec_cond = 0;
12300 }
12301 }
12302
d0264d86
RH
12303 arm_post_translate_insn(dc);
12304
12305 /* Thumb is a variable-length ISA. Stop translation when the next insn
12306 * will touch a new page. This ensures that prefetch aborts occur at
12307 * the right place.
12308 *
12309 * We want to stop the TB if the next insn starts in a new page,
12310 * or if it spans between this page and the next. This means that
12311 * if we're looking at the last halfword in the page we need to
12312 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12313 * or a 32-bit Thumb insn (which won't).
12314 * This is to avoid generating a silly TB with a single 16-bit insn
12315 * in it at the end of this page (which would execute correctly
12316 * but isn't very efficient).
12317 */
12318 if (dc->base.is_jmp == DISAS_NEXT
12319 && (dc->pc >= dc->next_page_start
12320 || (dc->pc >= dc->next_page_start - 3
12321 && insn_crosses_page(env, dc)))) {
12322 dc->base.is_jmp = DISAS_TOO_MANY;
12323 }
722ef0a5
RH
12324}
12325
70d3c035 12326static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1d8a5535 12327{
70d3c035 12328 DisasContext *dc = container_of(dcbase, DisasContext, base);
2e70f6ef 12329
c5a49c63 12330 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
70d3c035
LV
12331 /* FIXME: This can theoretically happen with self-modifying code. */
12332 cpu_abort(cpu, "IO on conditional branch instruction");
2e70f6ef 12333 }
9ee6e8bb 12334
b5ff1b31 12335 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
12336 instruction was a conditional branch or trap, and the PC has
12337 already been written. */
f021b2c4 12338 gen_set_condexec(dc);
dcba3a8d 12339 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
3bb8a96f
PM
12340 /* Exception return branches need some special case code at the
12341 * end of the TB, which is complex enough that it has to
12342 * handle the single-step vs not and the condition-failed
12343 * insn codepath itself.
12344 */
12345 gen_bx_excret_final_code(dc);
12346 } else if (unlikely(is_singlestepping(dc))) {
7999a5c8 12347 /* Unconditional and "condition passed" instruction codepath. */
dcba3a8d 12348 switch (dc->base.is_jmp) {
7999a5c8 12349 case DISAS_SWI:
50225ad0 12350 gen_ss_advance(dc);
73710361
GB
12351 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12352 default_exception_el(dc));
7999a5c8
SF
12353 break;
12354 case DISAS_HVC:
37e6456e 12355 gen_ss_advance(dc);
73710361 12356 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
12357 break;
12358 case DISAS_SMC:
37e6456e 12359 gen_ss_advance(dc);
73710361 12360 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
12361 break;
12362 case DISAS_NEXT:
a68956ad 12363 case DISAS_TOO_MANY:
7999a5c8
SF
12364 case DISAS_UPDATE:
12365 gen_set_pc_im(dc, dc->pc);
12366 /* fall through */
12367 default:
5425415e
PM
12368 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12369 gen_singlestep_exception(dc);
a0c231e6
RH
12370 break;
12371 case DISAS_NORETURN:
12372 break;
7999a5c8 12373 }
8aaca4c0 12374 } else {
9ee6e8bb
PB
12375 /* While branches must always occur at the end of an IT block,
12376 there are a few other things that can cause us to terminate
65626741 12377 the TB in the middle of an IT block:
9ee6e8bb
PB
12378 - Exception generating instructions (bkpt, swi, undefined).
12379 - Page boundaries.
12380 - Hardware watchpoints.
12381 Hardware breakpoints have already been handled and skip this code.
12382 */
dcba3a8d 12383 switch(dc->base.is_jmp) {
8aaca4c0 12384 case DISAS_NEXT:
a68956ad 12385 case DISAS_TOO_MANY:
6e256c93 12386 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 12387 break;
577bf808 12388 case DISAS_JUMP:
8a6b28c7
EC
12389 gen_goto_ptr();
12390 break;
e8d52302
AB
12391 case DISAS_UPDATE:
12392 gen_set_pc_im(dc, dc->pc);
12393 /* fall through */
577bf808 12394 default:
8aaca4c0 12395 /* indicate that the hash table must be used to find the next TB */
57fec1fe 12396 tcg_gen_exit_tb(0);
8aaca4c0 12397 break;
a0c231e6 12398 case DISAS_NORETURN:
8aaca4c0
FB
12399 /* nothing more to generate */
12400 break;
9ee6e8bb 12401 case DISAS_WFI:
58803318
SS
12402 {
12403 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
12404 !(dc->insn & (1U << 31))) ? 2 : 4);
12405
12406 gen_helper_wfi(cpu_env, tmp);
12407 tcg_temp_free_i32(tmp);
84549b6d
PM
12408 /* The helper doesn't necessarily throw an exception, but we
12409 * must go back to the main loop to check for interrupts anyway.
12410 */
12411 tcg_gen_exit_tb(0);
9ee6e8bb 12412 break;
58803318 12413 }
72c1d3af
PM
12414 case DISAS_WFE:
12415 gen_helper_wfe(cpu_env);
12416 break;
c87e5a61
PM
12417 case DISAS_YIELD:
12418 gen_helper_yield(cpu_env);
12419 break;
9ee6e8bb 12420 case DISAS_SWI:
73710361
GB
12421 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12422 default_exception_el(dc));
9ee6e8bb 12423 break;
37e6456e 12424 case DISAS_HVC:
73710361 12425 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
12426 break;
12427 case DISAS_SMC:
73710361 12428 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 12429 break;
8aaca4c0 12430 }
f021b2c4
PM
12431 }
12432
12433 if (dc->condjmp) {
12434 /* "Condition failed" instruction codepath for the branch/trap insn */
12435 gen_set_label(dc->condlabel);
12436 gen_set_condexec(dc);
b636649f 12437 if (unlikely(is_singlestepping(dc))) {
f021b2c4
PM
12438 gen_set_pc_im(dc, dc->pc);
12439 gen_singlestep_exception(dc);
12440 } else {
6e256c93 12441 gen_goto_tb(dc, 1, dc->pc);
e50e6a20 12442 }
2c0262af 12443 }
23169224
LV
12444
12445 /* Functions above can change dc->pc, so re-align db->pc_next */
12446 dc->base.pc_next = dc->pc;
70d3c035
LV
12447}
12448
4013f7fc
LV
12449static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
12450{
12451 DisasContext *dc = container_of(dcbase, DisasContext, base);
12452
12453 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
1d48474d 12454 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
4013f7fc
LV
12455}
12456
23169224
LV
12457static const TranslatorOps arm_translator_ops = {
12458 .init_disas_context = arm_tr_init_disas_context,
12459 .tb_start = arm_tr_tb_start,
12460 .insn_start = arm_tr_insn_start,
12461 .breakpoint_check = arm_tr_breakpoint_check,
12462 .translate_insn = arm_tr_translate_insn,
12463 .tb_stop = arm_tr_tb_stop,
12464 .disas_log = arm_tr_disas_log,
12465};
12466
722ef0a5
RH
12467static const TranslatorOps thumb_translator_ops = {
12468 .init_disas_context = arm_tr_init_disas_context,
12469 .tb_start = arm_tr_tb_start,
12470 .insn_start = arm_tr_insn_start,
12471 .breakpoint_check = arm_tr_breakpoint_check,
12472 .translate_insn = thumb_tr_translate_insn,
12473 .tb_stop = arm_tr_tb_stop,
12474 .disas_log = arm_tr_disas_log,
12475};
12476
70d3c035 12477/* generate intermediate code for basic block 'tb'. */
23169224 12478void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
70d3c035 12479{
23169224
LV
12480 DisasContext dc;
12481 const TranslatorOps *ops = &arm_translator_ops;
70d3c035 12482
722ef0a5
RH
12483 if (ARM_TBFLAG_THUMB(tb->flags)) {
12484 ops = &thumb_translator_ops;
12485 }
23169224 12486#ifdef TARGET_AARCH64
70d3c035 12487 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
23169224 12488 ops = &aarch64_translator_ops;
2c0262af
FB
12489 }
12490#endif
23169224
LV
12491
12492 translator_loop(ops, &dc.base, cpu, tb);
2c0262af
FB
12493}
12494
b5ff1b31 12495static const char *cpu_mode_names[16] = {
28c9457d
EI
12496 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
12497 "???", "???", "hyp", "und", "???", "???", "???", "sys"
b5ff1b31 12498};
9ee6e8bb 12499
878096ee
AF
12500void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
12501 int flags)
2c0262af 12502{
878096ee
AF
12503 ARMCPU *cpu = ARM_CPU(cs);
12504 CPUARMState *env = &cpu->env;
2c0262af
FB
12505 int i;
12506
17731115
PM
12507 if (is_a64(env)) {
12508 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
12509 return;
12510 }
12511
2c0262af 12512 for(i=0;i<16;i++) {
7fe48483 12513 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 12514 if ((i % 4) == 3)
7fe48483 12515 cpu_fprintf(f, "\n");
2c0262af 12516 else
7fe48483 12517 cpu_fprintf(f, " ");
2c0262af 12518 }
06e5cf7a 12519
5b906f35
PM
12520 if (arm_feature(env, ARM_FEATURE_M)) {
12521 uint32_t xpsr = xpsr_read(env);
12522 const char *mode;
1e577cc7
PM
12523 const char *ns_status = "";
12524
12525 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
12526 ns_status = env->v7m.secure ? "S " : "NS ";
12527 }
5b906f35
PM
12528
12529 if (xpsr & XPSR_EXCP) {
12530 mode = "handler";
12531 } else {
8bfc26ea 12532 if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
5b906f35
PM
12533 mode = "unpriv-thread";
12534 } else {
12535 mode = "priv-thread";
12536 }
12537 }
12538
1e577cc7 12539 cpu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
5b906f35
PM
12540 xpsr,
12541 xpsr & XPSR_N ? 'N' : '-',
12542 xpsr & XPSR_Z ? 'Z' : '-',
12543 xpsr & XPSR_C ? 'C' : '-',
12544 xpsr & XPSR_V ? 'V' : '-',
12545 xpsr & XPSR_T ? 'T' : 'A',
1e577cc7 12546 ns_status,
5b906f35 12547 mode);
06e5cf7a 12548 } else {
5b906f35
PM
12549 uint32_t psr = cpsr_read(env);
12550 const char *ns_status = "";
12551
12552 if (arm_feature(env, ARM_FEATURE_EL3) &&
12553 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
12554 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
12555 }
12556
12557 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
12558 psr,
12559 psr & CPSR_N ? 'N' : '-',
12560 psr & CPSR_Z ? 'Z' : '-',
12561 psr & CPSR_C ? 'C' : '-',
12562 psr & CPSR_V ? 'V' : '-',
12563 psr & CPSR_T ? 'T' : 'A',
12564 ns_status,
12565 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
12566 }
b7bcbe95 12567
f2617cfc
PM
12568 if (flags & CPU_DUMP_FPU) {
12569 int numvfpregs = 0;
12570 if (arm_feature(env, ARM_FEATURE_VFP)) {
12571 numvfpregs += 16;
12572 }
12573 if (arm_feature(env, ARM_FEATURE_VFP3)) {
12574 numvfpregs += 16;
12575 }
12576 for (i = 0; i < numvfpregs; i++) {
9a2b5256 12577 uint64_t v = *aa32_vfp_dreg(env, i);
f2617cfc
PM
12578 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
12579 i * 2, (uint32_t)v,
12580 i * 2 + 1, (uint32_t)(v >> 32),
12581 i, v);
12582 }
12583 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 12584 }
2c0262af 12585}
a6b025d3 12586
bad729e2
RH
12587void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12588 target_ulong *data)
d2856f1a 12589{
3926cc84 12590 if (is_a64(env)) {
bad729e2 12591 env->pc = data[0];
40f860cd 12592 env->condexec_bits = 0;
aaa1f954 12593 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12594 } else {
bad729e2
RH
12595 env->regs[15] = data[0];
12596 env->condexec_bits = data[1];
aaa1f954 12597 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12598 }
d2856f1a 12599}
This page took 3.685043 seconds and 4 git commands to generate.