]> Git Repo - qemu.git/blame - target/arm/translate.c
Merge remote-tracking branch 'remotes/cohuck/tags/s390x-20171109' into staging
[qemu.git] / target / arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
1de7afc9 28#include "qemu/log.h"
534df156 29#include "qemu/bitops.h"
1d854765 30#include "arm_ldst.h"
19a6e31c 31#include "exec/semihost.h"
1497c961 32
2ef6175a
RH
33#include "exec/helper-proto.h"
34#include "exec/helper-gen.h"
2c0262af 35
a7e30d84 36#include "trace-tcg.h"
508127e2 37#include "exec/log.h"
a7e30d84
LV
38
39
2b51668f
PM
40#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
41#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 42/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 43#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
c99a55d3 44#define ENABLE_ARCH_5J arm_dc_feature(s, ARM_FEATURE_JAZELLE)
2b51668f
PM
45#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
46#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
47#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
48#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
49#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 50
86753403 51#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 52
f570c61e 53#include "translate.h"
e12ce78d 54
b5ff1b31
FB
55#if defined(CONFIG_USER_ONLY)
56#define IS_USER(s) 1
57#else
58#define IS_USER(s) (s->user)
59#endif
60
ad69471c 61/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 62static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 63static TCGv_i32 cpu_R[16];
78bcaa3e
RH
64TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
65TCGv_i64 cpu_exclusive_addr;
66TCGv_i64 cpu_exclusive_val;
ad69471c 67
b26eefb6 68/* FIXME: These should be removed. */
39d5492a 69static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 70static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 71
022c62cb 72#include "exec/gen-icount.h"
2e70f6ef 73
155c3eac
FN
74static const char *regnames[] =
75 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
76 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
77
b26eefb6
PB
78/* initialize TCG globals. */
79void arm_translate_init(void)
80{
155c3eac
FN
81 int i;
82
155c3eac 83 for (i = 0; i < 16; i++) {
e1ccc054 84 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 85 offsetof(CPUARMState, regs[i]),
155c3eac
FN
86 regnames[i]);
87 }
e1ccc054
RH
88 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
89 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
90 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
91 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 92
e1ccc054 93 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 94 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 95 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 96 offsetof(CPUARMState, exclusive_val), "exclusive_val");
155c3eac 97
14ade10f 98 a64_translate_init();
b26eefb6
PB
99}
100
9bb6558a
PM
101/* Flags for the disas_set_da_iss info argument:
102 * lower bits hold the Rt register number, higher bits are flags.
103 */
104typedef enum ISSInfo {
105 ISSNone = 0,
106 ISSRegMask = 0x1f,
107 ISSInvalid = (1 << 5),
108 ISSIsAcqRel = (1 << 6),
109 ISSIsWrite = (1 << 7),
110 ISSIs16Bit = (1 << 8),
111} ISSInfo;
112
113/* Save the syndrome information for a Data Abort */
114static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
115{
116 uint32_t syn;
117 int sas = memop & MO_SIZE;
118 bool sse = memop & MO_SIGN;
119 bool is_acqrel = issinfo & ISSIsAcqRel;
120 bool is_write = issinfo & ISSIsWrite;
121 bool is_16bit = issinfo & ISSIs16Bit;
122 int srt = issinfo & ISSRegMask;
123
124 if (issinfo & ISSInvalid) {
125 /* Some callsites want to conditionally provide ISS info,
126 * eg "only if this was not a writeback"
127 */
128 return;
129 }
130
131 if (srt == 15) {
132 /* For AArch32, insns where the src/dest is R15 never generate
133 * ISS information. Catching that here saves checking at all
134 * the call sites.
135 */
136 return;
137 }
138
139 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
140 0, 0, 0, is_write, 0, is_16bit);
141 disas_set_insn_syndrome(s, syn);
142}
143
8bd5c820 144static inline int get_a32_user_mem_index(DisasContext *s)
579d21cc 145{
8bd5c820 146 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
579d21cc
PM
147 * insns:
148 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
149 * otherwise, access as if at PL0.
150 */
151 switch (s->mmu_idx) {
152 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
153 case ARMMMUIdx_S12NSE0:
154 case ARMMMUIdx_S12NSE1:
8bd5c820 155 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
579d21cc
PM
156 case ARMMMUIdx_S1E3:
157 case ARMMMUIdx_S1SE0:
158 case ARMMMUIdx_S1SE1:
8bd5c820 159 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
e7b921c2
PM
160 case ARMMMUIdx_MUser:
161 case ARMMMUIdx_MPriv:
3bef7012 162 case ARMMMUIdx_MNegPri:
e7b921c2 163 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
b9f587d6
PM
164 case ARMMMUIdx_MSUser:
165 case ARMMMUIdx_MSPriv:
166 case ARMMMUIdx_MSNegPri:
167 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
579d21cc
PM
168 case ARMMMUIdx_S2NS:
169 default:
170 g_assert_not_reached();
171 }
172}
173
39d5492a 174static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 175{
39d5492a 176 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
177 tcg_gen_ld_i32(tmp, cpu_env, offset);
178 return tmp;
179}
180
0ecb72a5 181#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 182
39d5492a 183static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
184{
185 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 186 tcg_temp_free_i32(var);
d9ba4830
PB
187}
188
189#define store_cpu_field(var, name) \
0ecb72a5 190 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 191
b26eefb6 192/* Set a variable to the value of a CPU register. */
39d5492a 193static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
194{
195 if (reg == 15) {
196 uint32_t addr;
b90372ad 197 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
198 if (s->thumb)
199 addr = (long)s->pc + 2;
200 else
201 addr = (long)s->pc + 4;
202 tcg_gen_movi_i32(var, addr);
203 } else {
155c3eac 204 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
205 }
206}
207
208/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 209static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 210{
39d5492a 211 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
212 load_reg_var(s, tmp, reg);
213 return tmp;
214}
215
216/* Set a CPU register. The source must be a temporary and will be
217 marked as dead. */
39d5492a 218static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
219{
220 if (reg == 15) {
9b6a3ea7
PM
221 /* In Thumb mode, we must ignore bit 0.
222 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
223 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
224 * We choose to ignore [1:0] in ARM mode for all architecture versions.
225 */
226 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
dcba3a8d 227 s->base.is_jmp = DISAS_JUMP;
b26eefb6 228 }
155c3eac 229 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 230 tcg_temp_free_i32(var);
b26eefb6
PB
231}
232
b26eefb6 233/* Value extensions. */
86831435
PB
234#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
235#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
236#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
237#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
238
1497c961
PB
239#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
240#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 241
b26eefb6 242
39d5492a 243static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 244{
39d5492a 245 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 246 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
247 tcg_temp_free_i32(tmp_mask);
248}
d9ba4830
PB
249/* Set NZCV flags from the high 4 bits of var. */
250#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
251
d4a2dc67 252static void gen_exception_internal(int excp)
d9ba4830 253{
d4a2dc67
PM
254 TCGv_i32 tcg_excp = tcg_const_i32(excp);
255
256 assert(excp_is_internal(excp));
257 gen_helper_exception_internal(cpu_env, tcg_excp);
258 tcg_temp_free_i32(tcg_excp);
259}
260
73710361 261static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
262{
263 TCGv_i32 tcg_excp = tcg_const_i32(excp);
264 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 265 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 266
73710361
GB
267 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
268 tcg_syn, tcg_el);
269
270 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
271 tcg_temp_free_i32(tcg_syn);
272 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
273}
274
50225ad0
PM
275static void gen_ss_advance(DisasContext *s)
276{
277 /* If the singlestep state is Active-not-pending, advance to
278 * Active-pending.
279 */
280 if (s->ss_active) {
281 s->pstate_ss = 0;
282 gen_helper_clear_pstate_ss(cpu_env);
283 }
284}
285
286static void gen_step_complete_exception(DisasContext *s)
287{
288 /* We just completed step of an insn. Move from Active-not-pending
289 * to Active-pending, and then also take the swstep exception.
290 * This corresponds to making the (IMPDEF) choice to prioritize
291 * swstep exceptions over asynchronous exceptions taken to an exception
292 * level where debug is disabled. This choice has the advantage that
293 * we do not need to maintain internal state corresponding to the
294 * ISV/EX syndrome bits between completion of the step and generation
295 * of the exception, and our syndrome information is always correct.
296 */
297 gen_ss_advance(s);
73710361
GB
298 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
299 default_exception_el(s));
dcba3a8d 300 s->base.is_jmp = DISAS_NORETURN;
50225ad0
PM
301}
302
5425415e
PM
303static void gen_singlestep_exception(DisasContext *s)
304{
305 /* Generate the right kind of exception for singlestep, which is
306 * either the architectural singlestep or EXCP_DEBUG for QEMU's
307 * gdb singlestepping.
308 */
309 if (s->ss_active) {
310 gen_step_complete_exception(s);
311 } else {
312 gen_exception_internal(EXCP_DEBUG);
313 }
314}
315
b636649f
PM
316static inline bool is_singlestepping(DisasContext *s)
317{
318 /* Return true if we are singlestepping either because of
319 * architectural singlestep or QEMU gdbstub singlestep. This does
320 * not include the command line '-singlestep' mode which is rather
321 * misnamed as it only means "one instruction per TB" and doesn't
322 * affect the code we generate.
323 */
dcba3a8d 324 return s->base.singlestep_enabled || s->ss_active;
b636649f
PM
325}
326
39d5492a 327static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 328{
39d5492a
PM
329 TCGv_i32 tmp1 = tcg_temp_new_i32();
330 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
331 tcg_gen_ext16s_i32(tmp1, a);
332 tcg_gen_ext16s_i32(tmp2, b);
3670669c 333 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 334 tcg_temp_free_i32(tmp2);
3670669c
PB
335 tcg_gen_sari_i32(a, a, 16);
336 tcg_gen_sari_i32(b, b, 16);
337 tcg_gen_mul_i32(b, b, a);
338 tcg_gen_mov_i32(a, tmp1);
7d1b0095 339 tcg_temp_free_i32(tmp1);
3670669c
PB
340}
341
342/* Byteswap each halfword. */
39d5492a 343static void gen_rev16(TCGv_i32 var)
3670669c 344{
39d5492a 345 TCGv_i32 tmp = tcg_temp_new_i32();
68cedf73 346 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
3670669c 347 tcg_gen_shri_i32(tmp, var, 8);
68cedf73
AJ
348 tcg_gen_and_i32(tmp, tmp, mask);
349 tcg_gen_and_i32(var, var, mask);
3670669c 350 tcg_gen_shli_i32(var, var, 8);
3670669c 351 tcg_gen_or_i32(var, var, tmp);
68cedf73 352 tcg_temp_free_i32(mask);
7d1b0095 353 tcg_temp_free_i32(tmp);
3670669c
PB
354}
355
356/* Byteswap low halfword and sign extend. */
39d5492a 357static void gen_revsh(TCGv_i32 var)
3670669c 358{
1a855029
AJ
359 tcg_gen_ext16u_i32(var, var);
360 tcg_gen_bswap16_i32(var, var);
361 tcg_gen_ext16s_i32(var, var);
3670669c
PB
362}
363
838fa72d 364/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 365static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 366{
838fa72d
AJ
367 TCGv_i64 tmp64 = tcg_temp_new_i64();
368
369 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 370 tcg_temp_free_i32(b);
838fa72d
AJ
371 tcg_gen_shli_i64(tmp64, tmp64, 32);
372 tcg_gen_add_i64(a, tmp64, a);
373
374 tcg_temp_free_i64(tmp64);
375 return a;
376}
377
378/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 379static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
380{
381 TCGv_i64 tmp64 = tcg_temp_new_i64();
382
383 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 384 tcg_temp_free_i32(b);
838fa72d
AJ
385 tcg_gen_shli_i64(tmp64, tmp64, 32);
386 tcg_gen_sub_i64(a, tmp64, a);
387
388 tcg_temp_free_i64(tmp64);
389 return a;
3670669c
PB
390}
391
5e3f878a 392/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 393static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 394{
39d5492a
PM
395 TCGv_i32 lo = tcg_temp_new_i32();
396 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 397 TCGv_i64 ret;
5e3f878a 398
831d7fe8 399 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 400 tcg_temp_free_i32(a);
7d1b0095 401 tcg_temp_free_i32(b);
831d7fe8
RH
402
403 ret = tcg_temp_new_i64();
404 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
405 tcg_temp_free_i32(lo);
406 tcg_temp_free_i32(hi);
831d7fe8
RH
407
408 return ret;
5e3f878a
PB
409}
410
39d5492a 411static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 412{
39d5492a
PM
413 TCGv_i32 lo = tcg_temp_new_i32();
414 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 415 TCGv_i64 ret;
5e3f878a 416
831d7fe8 417 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 418 tcg_temp_free_i32(a);
7d1b0095 419 tcg_temp_free_i32(b);
831d7fe8
RH
420
421 ret = tcg_temp_new_i64();
422 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
423 tcg_temp_free_i32(lo);
424 tcg_temp_free_i32(hi);
831d7fe8
RH
425
426 return ret;
5e3f878a
PB
427}
428
8f01245e 429/* Swap low and high halfwords. */
39d5492a 430static void gen_swap_half(TCGv_i32 var)
8f01245e 431{
39d5492a 432 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
433 tcg_gen_shri_i32(tmp, var, 16);
434 tcg_gen_shli_i32(var, var, 16);
435 tcg_gen_or_i32(var, var, tmp);
7d1b0095 436 tcg_temp_free_i32(tmp);
8f01245e
PB
437}
438
b26eefb6
PB
439/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
440 tmp = (t0 ^ t1) & 0x8000;
441 t0 &= ~0x8000;
442 t1 &= ~0x8000;
443 t0 = (t0 + t1) ^ tmp;
444 */
445
39d5492a 446static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 447{
39d5492a 448 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
449 tcg_gen_xor_i32(tmp, t0, t1);
450 tcg_gen_andi_i32(tmp, tmp, 0x8000);
451 tcg_gen_andi_i32(t0, t0, ~0x8000);
452 tcg_gen_andi_i32(t1, t1, ~0x8000);
453 tcg_gen_add_i32(t0, t0, t1);
454 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
455 tcg_temp_free_i32(tmp);
456 tcg_temp_free_i32(t1);
b26eefb6
PB
457}
458
459/* Set CF to the top bit of var. */
39d5492a 460static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 461{
66c374de 462 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
463}
464
465/* Set N and Z flags from var. */
39d5492a 466static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 467{
66c374de
AJ
468 tcg_gen_mov_i32(cpu_NF, var);
469 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
470}
471
472/* T0 += T1 + CF. */
39d5492a 473static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 474{
396e467c 475 tcg_gen_add_i32(t0, t0, t1);
66c374de 476 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
477}
478
e9bb4aa9 479/* dest = T0 + T1 + CF. */
39d5492a 480static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 481{
e9bb4aa9 482 tcg_gen_add_i32(dest, t0, t1);
66c374de 483 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
484}
485
3670669c 486/* dest = T0 - T1 + CF - 1. */
39d5492a 487static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 488{
3670669c 489 tcg_gen_sub_i32(dest, t0, t1);
66c374de 490 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 491 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
492}
493
72485ec4 494/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 495static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 496{
39d5492a 497 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
498 tcg_gen_movi_i32(tmp, 0);
499 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 500 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 501 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
502 tcg_gen_xor_i32(tmp, t0, t1);
503 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
504 tcg_temp_free_i32(tmp);
505 tcg_gen_mov_i32(dest, cpu_NF);
506}
507
49b4c31e 508/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 509static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 510{
39d5492a 511 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
512 if (TCG_TARGET_HAS_add2_i32) {
513 tcg_gen_movi_i32(tmp, 0);
514 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 515 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
516 } else {
517 TCGv_i64 q0 = tcg_temp_new_i64();
518 TCGv_i64 q1 = tcg_temp_new_i64();
519 tcg_gen_extu_i32_i64(q0, t0);
520 tcg_gen_extu_i32_i64(q1, t1);
521 tcg_gen_add_i64(q0, q0, q1);
522 tcg_gen_extu_i32_i64(q1, cpu_CF);
523 tcg_gen_add_i64(q0, q0, q1);
524 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
525 tcg_temp_free_i64(q0);
526 tcg_temp_free_i64(q1);
527 }
528 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
529 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
530 tcg_gen_xor_i32(tmp, t0, t1);
531 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
532 tcg_temp_free_i32(tmp);
533 tcg_gen_mov_i32(dest, cpu_NF);
534}
535
72485ec4 536/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 537static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 538{
39d5492a 539 TCGv_i32 tmp;
72485ec4
AJ
540 tcg_gen_sub_i32(cpu_NF, t0, t1);
541 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
542 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
543 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
544 tmp = tcg_temp_new_i32();
545 tcg_gen_xor_i32(tmp, t0, t1);
546 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
547 tcg_temp_free_i32(tmp);
548 tcg_gen_mov_i32(dest, cpu_NF);
549}
550
e77f0832 551/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 552static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 553{
39d5492a 554 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
555 tcg_gen_not_i32(tmp, t1);
556 gen_adc_CC(dest, t0, tmp);
39d5492a 557 tcg_temp_free_i32(tmp);
2de68a49
RH
558}
559
365af80e 560#define GEN_SHIFT(name) \
39d5492a 561static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 562{ \
39d5492a 563 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
564 tmp1 = tcg_temp_new_i32(); \
565 tcg_gen_andi_i32(tmp1, t1, 0xff); \
566 tmp2 = tcg_const_i32(0); \
567 tmp3 = tcg_const_i32(0x1f); \
568 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
569 tcg_temp_free_i32(tmp3); \
570 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
571 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
572 tcg_temp_free_i32(tmp2); \
573 tcg_temp_free_i32(tmp1); \
574}
575GEN_SHIFT(shl)
576GEN_SHIFT(shr)
577#undef GEN_SHIFT
578
39d5492a 579static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 580{
39d5492a 581 TCGv_i32 tmp1, tmp2;
365af80e
AJ
582 tmp1 = tcg_temp_new_i32();
583 tcg_gen_andi_i32(tmp1, t1, 0xff);
584 tmp2 = tcg_const_i32(0x1f);
585 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
586 tcg_temp_free_i32(tmp2);
587 tcg_gen_sar_i32(dest, t0, tmp1);
588 tcg_temp_free_i32(tmp1);
589}
590
39d5492a 591static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 592{
39d5492a
PM
593 TCGv_i32 c0 = tcg_const_i32(0);
594 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
595 tcg_gen_neg_i32(tmp, src);
596 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
597 tcg_temp_free_i32(c0);
598 tcg_temp_free_i32(tmp);
599}
ad69471c 600
39d5492a 601static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 602{
9a119ff6 603 if (shift == 0) {
66c374de 604 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 605 } else {
66c374de
AJ
606 tcg_gen_shri_i32(cpu_CF, var, shift);
607 if (shift != 31) {
608 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
609 }
9a119ff6 610 }
9a119ff6 611}
b26eefb6 612
9a119ff6 613/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
614static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
615 int shift, int flags)
9a119ff6
PB
616{
617 switch (shiftop) {
618 case 0: /* LSL */
619 if (shift != 0) {
620 if (flags)
621 shifter_out_im(var, 32 - shift);
622 tcg_gen_shli_i32(var, var, shift);
623 }
624 break;
625 case 1: /* LSR */
626 if (shift == 0) {
627 if (flags) {
66c374de 628 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
629 }
630 tcg_gen_movi_i32(var, 0);
631 } else {
632 if (flags)
633 shifter_out_im(var, shift - 1);
634 tcg_gen_shri_i32(var, var, shift);
635 }
636 break;
637 case 2: /* ASR */
638 if (shift == 0)
639 shift = 32;
640 if (flags)
641 shifter_out_im(var, shift - 1);
642 if (shift == 32)
643 shift = 31;
644 tcg_gen_sari_i32(var, var, shift);
645 break;
646 case 3: /* ROR/RRX */
647 if (shift != 0) {
648 if (flags)
649 shifter_out_im(var, shift - 1);
f669df27 650 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 651 } else {
39d5492a 652 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 653 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
654 if (flags)
655 shifter_out_im(var, 0);
656 tcg_gen_shri_i32(var, var, 1);
b26eefb6 657 tcg_gen_or_i32(var, var, tmp);
7d1b0095 658 tcg_temp_free_i32(tmp);
b26eefb6
PB
659 }
660 }
661};
662
39d5492a
PM
663static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
664 TCGv_i32 shift, int flags)
8984bd2e
PB
665{
666 if (flags) {
667 switch (shiftop) {
9ef39277
BS
668 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
669 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
670 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
671 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
672 }
673 } else {
674 switch (shiftop) {
365af80e
AJ
675 case 0:
676 gen_shl(var, var, shift);
677 break;
678 case 1:
679 gen_shr(var, var, shift);
680 break;
681 case 2:
682 gen_sar(var, var, shift);
683 break;
f669df27
AJ
684 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
685 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
686 }
687 }
7d1b0095 688 tcg_temp_free_i32(shift);
8984bd2e
PB
689}
690
6ddbc6e4
PB
691#define PAS_OP(pfx) \
692 switch (op2) { \
693 case 0: gen_pas_helper(glue(pfx,add16)); break; \
694 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
695 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
696 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
697 case 4: gen_pas_helper(glue(pfx,add8)); break; \
698 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
699 }
39d5492a 700static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 701{
a7812ae4 702 TCGv_ptr tmp;
6ddbc6e4
PB
703
704 switch (op1) {
705#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
706 case 1:
a7812ae4 707 tmp = tcg_temp_new_ptr();
0ecb72a5 708 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 709 PAS_OP(s)
b75263d6 710 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
711 break;
712 case 5:
a7812ae4 713 tmp = tcg_temp_new_ptr();
0ecb72a5 714 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 715 PAS_OP(u)
b75263d6 716 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
717 break;
718#undef gen_pas_helper
719#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
720 case 2:
721 PAS_OP(q);
722 break;
723 case 3:
724 PAS_OP(sh);
725 break;
726 case 6:
727 PAS_OP(uq);
728 break;
729 case 7:
730 PAS_OP(uh);
731 break;
732#undef gen_pas_helper
733 }
734}
9ee6e8bb
PB
735#undef PAS_OP
736
6ddbc6e4
PB
737/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
738#define PAS_OP(pfx) \
ed89a2f1 739 switch (op1) { \
6ddbc6e4
PB
740 case 0: gen_pas_helper(glue(pfx,add8)); break; \
741 case 1: gen_pas_helper(glue(pfx,add16)); break; \
742 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
743 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
744 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
745 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
746 }
39d5492a 747static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 748{
a7812ae4 749 TCGv_ptr tmp;
6ddbc6e4 750
ed89a2f1 751 switch (op2) {
6ddbc6e4
PB
752#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
753 case 0:
a7812ae4 754 tmp = tcg_temp_new_ptr();
0ecb72a5 755 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 756 PAS_OP(s)
b75263d6 757 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
758 break;
759 case 4:
a7812ae4 760 tmp = tcg_temp_new_ptr();
0ecb72a5 761 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 762 PAS_OP(u)
b75263d6 763 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
764 break;
765#undef gen_pas_helper
766#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
767 case 1:
768 PAS_OP(q);
769 break;
770 case 2:
771 PAS_OP(sh);
772 break;
773 case 5:
774 PAS_OP(uq);
775 break;
776 case 6:
777 PAS_OP(uh);
778 break;
779#undef gen_pas_helper
780 }
781}
9ee6e8bb
PB
782#undef PAS_OP
783
39fb730a 784/*
6c2c63d3 785 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
786 * This is common between ARM and Aarch64 targets.
787 */
6c2c63d3 788void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 789{
6c2c63d3
RH
790 TCGv_i32 value;
791 TCGCond cond;
792 bool global = true;
d9ba4830 793
d9ba4830
PB
794 switch (cc) {
795 case 0: /* eq: Z */
d9ba4830 796 case 1: /* ne: !Z */
6c2c63d3
RH
797 cond = TCG_COND_EQ;
798 value = cpu_ZF;
d9ba4830 799 break;
6c2c63d3 800
d9ba4830 801 case 2: /* cs: C */
d9ba4830 802 case 3: /* cc: !C */
6c2c63d3
RH
803 cond = TCG_COND_NE;
804 value = cpu_CF;
d9ba4830 805 break;
6c2c63d3 806
d9ba4830 807 case 4: /* mi: N */
d9ba4830 808 case 5: /* pl: !N */
6c2c63d3
RH
809 cond = TCG_COND_LT;
810 value = cpu_NF;
d9ba4830 811 break;
6c2c63d3 812
d9ba4830 813 case 6: /* vs: V */
d9ba4830 814 case 7: /* vc: !V */
6c2c63d3
RH
815 cond = TCG_COND_LT;
816 value = cpu_VF;
d9ba4830 817 break;
6c2c63d3 818
d9ba4830 819 case 8: /* hi: C && !Z */
6c2c63d3
RH
820 case 9: /* ls: !C || Z -> !(C && !Z) */
821 cond = TCG_COND_NE;
822 value = tcg_temp_new_i32();
823 global = false;
824 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
825 ZF is non-zero for !Z; so AND the two subexpressions. */
826 tcg_gen_neg_i32(value, cpu_CF);
827 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 828 break;
6c2c63d3 829
d9ba4830 830 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 831 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
832 /* Since we're only interested in the sign bit, == 0 is >= 0. */
833 cond = TCG_COND_GE;
834 value = tcg_temp_new_i32();
835 global = false;
836 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 837 break;
6c2c63d3 838
d9ba4830 839 case 12: /* gt: !Z && N == V */
d9ba4830 840 case 13: /* le: Z || N != V */
6c2c63d3
RH
841 cond = TCG_COND_NE;
842 value = tcg_temp_new_i32();
843 global = false;
844 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
845 * the sign bit then AND with ZF to yield the result. */
846 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
847 tcg_gen_sari_i32(value, value, 31);
848 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 849 break;
6c2c63d3 850
9305eac0
RH
851 case 14: /* always */
852 case 15: /* always */
853 /* Use the ALWAYS condition, which will fold early.
854 * It doesn't matter what we use for the value. */
855 cond = TCG_COND_ALWAYS;
856 value = cpu_ZF;
857 goto no_invert;
858
d9ba4830
PB
859 default:
860 fprintf(stderr, "Bad condition code 0x%x\n", cc);
861 abort();
862 }
6c2c63d3
RH
863
864 if (cc & 1) {
865 cond = tcg_invert_cond(cond);
866 }
867
9305eac0 868 no_invert:
6c2c63d3
RH
869 cmp->cond = cond;
870 cmp->value = value;
871 cmp->value_global = global;
872}
873
874void arm_free_cc(DisasCompare *cmp)
875{
876 if (!cmp->value_global) {
877 tcg_temp_free_i32(cmp->value);
878 }
879}
880
881void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
882{
883 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
884}
885
886void arm_gen_test_cc(int cc, TCGLabel *label)
887{
888 DisasCompare cmp;
889 arm_test_cc(&cmp, cc);
890 arm_jump_cc(&cmp, label);
891 arm_free_cc(&cmp);
d9ba4830 892}
2c0262af 893
b1d8e52e 894static const uint8_t table_logic_cc[16] = {
2c0262af
FB
895 1, /* and */
896 1, /* xor */
897 0, /* sub */
898 0, /* rsb */
899 0, /* add */
900 0, /* adc */
901 0, /* sbc */
902 0, /* rsc */
903 1, /* andl */
904 1, /* xorl */
905 0, /* cmp */
906 0, /* cmn */
907 1, /* orr */
908 1, /* mov */
909 1, /* bic */
910 1, /* mvn */
911};
3b46e624 912
4d5e8c96
PM
913static inline void gen_set_condexec(DisasContext *s)
914{
915 if (s->condexec_mask) {
916 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
917 TCGv_i32 tmp = tcg_temp_new_i32();
918 tcg_gen_movi_i32(tmp, val);
919 store_cpu_field(tmp, condexec_bits);
920 }
921}
922
923static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
924{
925 tcg_gen_movi_i32(cpu_R[15], val);
926}
927
d9ba4830
PB
928/* Set PC and Thumb state from an immediate address. */
929static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 930{
39d5492a 931 TCGv_i32 tmp;
99c475ab 932
dcba3a8d 933 s->base.is_jmp = DISAS_JUMP;
d9ba4830 934 if (s->thumb != (addr & 1)) {
7d1b0095 935 tmp = tcg_temp_new_i32();
d9ba4830 936 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 937 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 938 tcg_temp_free_i32(tmp);
d9ba4830 939 }
155c3eac 940 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
941}
942
943/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 944static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 945{
dcba3a8d 946 s->base.is_jmp = DISAS_JUMP;
155c3eac
FN
947 tcg_gen_andi_i32(cpu_R[15], var, ~1);
948 tcg_gen_andi_i32(var, var, 1);
949 store_cpu_field(var, thumb);
d9ba4830
PB
950}
951
3bb8a96f
PM
952/* Set PC and Thumb state from var. var is marked as dead.
953 * For M-profile CPUs, include logic to detect exception-return
954 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
955 * and BX reg, and no others, and happens only for code in Handler mode.
956 */
957static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
958{
959 /* Generate the same code here as for a simple bx, but flag via
dcba3a8d 960 * s->base.is_jmp that we need to do the rest of the work later.
3bb8a96f
PM
961 */
962 gen_bx(s, var);
d02a8698
PM
963 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
964 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
dcba3a8d 965 s->base.is_jmp = DISAS_BX_EXCRET;
3bb8a96f
PM
966 }
967}
968
969static inline void gen_bx_excret_final_code(DisasContext *s)
970{
971 /* Generate the code to finish possible exception return and end the TB */
972 TCGLabel *excret_label = gen_new_label();
d02a8698
PM
973 uint32_t min_magic;
974
975 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
976 /* Covers FNC_RETURN and EXC_RETURN magic */
977 min_magic = FNC_RETURN_MIN_MAGIC;
978 } else {
979 /* EXC_RETURN magic only */
980 min_magic = EXC_RETURN_MIN_MAGIC;
981 }
3bb8a96f
PM
982
983 /* Is the new PC value in the magic range indicating exception return? */
d02a8698 984 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
3bb8a96f
PM
985 /* No: end the TB as we would for a DISAS_JMP */
986 if (is_singlestepping(s)) {
987 gen_singlestep_exception(s);
988 } else {
989 tcg_gen_exit_tb(0);
990 }
991 gen_set_label(excret_label);
992 /* Yes: this is an exception return.
993 * At this point in runtime env->regs[15] and env->thumb will hold
994 * the exception-return magic number, which do_v7m_exception_exit()
995 * will read. Nothing else will be able to see those values because
996 * the cpu-exec main loop guarantees that we will always go straight
997 * from raising the exception to the exception-handling code.
998 *
999 * gen_ss_advance(s) does nothing on M profile currently but
1000 * calling it is conceptually the right thing as we have executed
1001 * this instruction (compare SWI, HVC, SMC handling).
1002 */
1003 gen_ss_advance(s);
1004 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1005}
1006
fb602cb7
PM
1007static inline void gen_bxns(DisasContext *s, int rm)
1008{
1009 TCGv_i32 var = load_reg(s, rm);
1010
1011 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1012 * we need to sync state before calling it, but:
1013 * - we don't need to do gen_set_pc_im() because the bxns helper will
1014 * always set the PC itself
1015 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1016 * unless it's outside an IT block or the last insn in an IT block,
1017 * so we know that condexec == 0 (already set at the top of the TB)
1018 * is correct in the non-UNPREDICTABLE cases, and we can choose
1019 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1020 */
1021 gen_helper_v7m_bxns(cpu_env, var);
1022 tcg_temp_free_i32(var);
ef475b5d 1023 s->base.is_jmp = DISAS_EXIT;
fb602cb7
PM
1024}
1025
3e3fa230
PM
1026static inline void gen_blxns(DisasContext *s, int rm)
1027{
1028 TCGv_i32 var = load_reg(s, rm);
1029
1030 /* We don't need to sync condexec state, for the same reason as bxns.
1031 * We do however need to set the PC, because the blxns helper reads it.
1032 * The blxns helper may throw an exception.
1033 */
1034 gen_set_pc_im(s, s->pc);
1035 gen_helper_v7m_blxns(cpu_env, var);
1036 tcg_temp_free_i32(var);
1037 s->base.is_jmp = DISAS_EXIT;
1038}
1039
21aeb343
JR
1040/* Variant of store_reg which uses branch&exchange logic when storing
1041 to r15 in ARM architecture v7 and above. The source must be a temporary
1042 and will be marked as dead. */
7dcc1f89 1043static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
1044{
1045 if (reg == 15 && ENABLE_ARCH_7) {
1046 gen_bx(s, var);
1047 } else {
1048 store_reg(s, reg, var);
1049 }
1050}
1051
be5e7a76
DES
1052/* Variant of store_reg which uses branch&exchange logic when storing
1053 * to r15 in ARM architecture v5T and above. This is used for storing
1054 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1055 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 1056static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
1057{
1058 if (reg == 15 && ENABLE_ARCH_5) {
3bb8a96f 1059 gen_bx_excret(s, var);
be5e7a76
DES
1060 } else {
1061 store_reg(s, reg, var);
1062 }
1063}
1064
e334bd31
PB
1065#ifdef CONFIG_USER_ONLY
1066#define IS_USER_ONLY 1
1067#else
1068#define IS_USER_ONLY 0
1069#endif
1070
08307563
PM
1071/* Abstractions of "generate code to do a guest load/store for
1072 * AArch32", where a vaddr is always 32 bits (and is zero
1073 * extended if we're a 64 bit core) and data is also
1074 * 32 bits unless specifically doing a 64 bit access.
1075 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 1076 * that the address argument is TCGv_i32 rather than TCGv.
08307563 1077 */
08307563 1078
7f5616f5 1079static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
08307563 1080{
7f5616f5
RH
1081 TCGv addr = tcg_temp_new();
1082 tcg_gen_extu_i32_tl(addr, a32);
1083
e334bd31 1084 /* Not needed for user-mode BE32, where we use MO_BE instead. */
7f5616f5
RH
1085 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1086 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
e334bd31 1087 }
7f5616f5 1088 return addr;
08307563
PM
1089}
1090
7f5616f5
RH
1091static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1092 int index, TCGMemOp opc)
08307563 1093{
7f5616f5
RH
1094 TCGv addr = gen_aa32_addr(s, a32, opc);
1095 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1096 tcg_temp_free(addr);
08307563
PM
1097}
1098
7f5616f5
RH
1099static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1100 int index, TCGMemOp opc)
1101{
1102 TCGv addr = gen_aa32_addr(s, a32, opc);
1103 tcg_gen_qemu_st_i32(val, addr, index, opc);
1104 tcg_temp_free(addr);
1105}
08307563 1106
7f5616f5 1107#define DO_GEN_LD(SUFF, OPC) \
12dcc321 1108static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1109 TCGv_i32 a32, int index) \
08307563 1110{ \
7f5616f5 1111 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1112} \
1113static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1114 TCGv_i32 val, \
1115 TCGv_i32 a32, int index, \
1116 ISSInfo issinfo) \
1117{ \
1118 gen_aa32_ld##SUFF(s, val, a32, index); \
1119 disas_set_da_iss(s, OPC, issinfo); \
08307563
PM
1120}
1121
7f5616f5 1122#define DO_GEN_ST(SUFF, OPC) \
12dcc321 1123static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1124 TCGv_i32 a32, int index) \
08307563 1125{ \
7f5616f5 1126 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1127} \
1128static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1129 TCGv_i32 val, \
1130 TCGv_i32 a32, int index, \
1131 ISSInfo issinfo) \
1132{ \
1133 gen_aa32_st##SUFF(s, val, a32, index); \
1134 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
08307563
PM
1135}
1136
7f5616f5 1137static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
08307563 1138{
e334bd31
PB
1139 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1140 if (!IS_USER_ONLY && s->sctlr_b) {
1141 tcg_gen_rotri_i64(val, val, 32);
1142 }
08307563
PM
1143}
1144
7f5616f5
RH
1145static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1146 int index, TCGMemOp opc)
08307563 1147{
7f5616f5
RH
1148 TCGv addr = gen_aa32_addr(s, a32, opc);
1149 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1150 gen_aa32_frob64(s, val);
1151 tcg_temp_free(addr);
1152}
1153
1154static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1155 TCGv_i32 a32, int index)
1156{
1157 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1158}
1159
1160static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1161 int index, TCGMemOp opc)
1162{
1163 TCGv addr = gen_aa32_addr(s, a32, opc);
e334bd31
PB
1164
1165 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1166 if (!IS_USER_ONLY && s->sctlr_b) {
7f5616f5 1167 TCGv_i64 tmp = tcg_temp_new_i64();
e334bd31 1168 tcg_gen_rotri_i64(tmp, val, 32);
7f5616f5
RH
1169 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1170 tcg_temp_free_i64(tmp);
e334bd31 1171 } else {
7f5616f5 1172 tcg_gen_qemu_st_i64(val, addr, index, opc);
e334bd31 1173 }
7f5616f5 1174 tcg_temp_free(addr);
08307563
PM
1175}
1176
7f5616f5
RH
1177static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1178 TCGv_i32 a32, int index)
1179{
1180 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1181}
08307563 1182
7f5616f5
RH
1183DO_GEN_LD(8s, MO_SB)
1184DO_GEN_LD(8u, MO_UB)
1185DO_GEN_LD(16s, MO_SW)
1186DO_GEN_LD(16u, MO_UW)
1187DO_GEN_LD(32u, MO_UL)
7f5616f5
RH
1188DO_GEN_ST(8, MO_UB)
1189DO_GEN_ST(16, MO_UW)
1190DO_GEN_ST(32, MO_UL)
08307563 1191
37e6456e
PM
1192static inline void gen_hvc(DisasContext *s, int imm16)
1193{
1194 /* The pre HVC helper handles cases when HVC gets trapped
1195 * as an undefined insn by runtime configuration (ie before
1196 * the insn really executes).
1197 */
1198 gen_set_pc_im(s, s->pc - 4);
1199 gen_helper_pre_hvc(cpu_env);
1200 /* Otherwise we will treat this as a real exception which
1201 * happens after execution of the insn. (The distinction matters
1202 * for the PC value reported to the exception handler and also
1203 * for single stepping.)
1204 */
1205 s->svc_imm = imm16;
1206 gen_set_pc_im(s, s->pc);
dcba3a8d 1207 s->base.is_jmp = DISAS_HVC;
37e6456e
PM
1208}
1209
1210static inline void gen_smc(DisasContext *s)
1211{
1212 /* As with HVC, we may take an exception either before or after
1213 * the insn executes.
1214 */
1215 TCGv_i32 tmp;
1216
1217 gen_set_pc_im(s, s->pc - 4);
1218 tmp = tcg_const_i32(syn_aa32_smc());
1219 gen_helper_pre_smc(cpu_env, tmp);
1220 tcg_temp_free_i32(tmp);
1221 gen_set_pc_im(s, s->pc);
dcba3a8d 1222 s->base.is_jmp = DISAS_SMC;
37e6456e
PM
1223}
1224
d4a2dc67
PM
1225static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1226{
1227 gen_set_condexec(s);
1228 gen_set_pc_im(s, s->pc - offset);
1229 gen_exception_internal(excp);
dcba3a8d 1230 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1231}
1232
73710361
GB
1233static void gen_exception_insn(DisasContext *s, int offset, int excp,
1234 int syn, uint32_t target_el)
d4a2dc67
PM
1235{
1236 gen_set_condexec(s);
1237 gen_set_pc_im(s, s->pc - offset);
73710361 1238 gen_exception(excp, syn, target_el);
dcba3a8d 1239 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1240}
1241
b5ff1b31
FB
1242/* Force a TB lookup after an instruction that changes the CPU state. */
1243static inline void gen_lookup_tb(DisasContext *s)
1244{
a6445c52 1245 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
dcba3a8d 1246 s->base.is_jmp = DISAS_EXIT;
b5ff1b31
FB
1247}
1248
19a6e31c
PM
1249static inline void gen_hlt(DisasContext *s, int imm)
1250{
1251 /* HLT. This has two purposes.
1252 * Architecturally, it is an external halting debug instruction.
1253 * Since QEMU doesn't implement external debug, we treat this as
1254 * it is required for halting debug disabled: it will UNDEF.
1255 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1256 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1257 * must trigger semihosting even for ARMv7 and earlier, where
1258 * HLT was an undefined encoding.
1259 * In system mode, we don't allow userspace access to
1260 * semihosting, to provide some semblance of security
1261 * (and for consistency with our 32-bit semihosting).
1262 */
1263 if (semihosting_enabled() &&
1264#ifndef CONFIG_USER_ONLY
1265 s->current_el != 0 &&
1266#endif
1267 (imm == (s->thumb ? 0x3c : 0xf000))) {
1268 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1269 return;
1270 }
1271
1272 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1273 default_exception_el(s));
1274}
1275
b0109805 1276static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1277 TCGv_i32 var)
2c0262af 1278{
1e8d4eec 1279 int val, rm, shift, shiftop;
39d5492a 1280 TCGv_i32 offset;
2c0262af
FB
1281
1282 if (!(insn & (1 << 25))) {
1283 /* immediate */
1284 val = insn & 0xfff;
1285 if (!(insn & (1 << 23)))
1286 val = -val;
537730b9 1287 if (val != 0)
b0109805 1288 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1289 } else {
1290 /* shift/register */
1291 rm = (insn) & 0xf;
1292 shift = (insn >> 7) & 0x1f;
1e8d4eec 1293 shiftop = (insn >> 5) & 3;
b26eefb6 1294 offset = load_reg(s, rm);
9a119ff6 1295 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1296 if (!(insn & (1 << 23)))
b0109805 1297 tcg_gen_sub_i32(var, var, offset);
2c0262af 1298 else
b0109805 1299 tcg_gen_add_i32(var, var, offset);
7d1b0095 1300 tcg_temp_free_i32(offset);
2c0262af
FB
1301 }
1302}
1303
191f9a93 1304static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1305 int extra, TCGv_i32 var)
2c0262af
FB
1306{
1307 int val, rm;
39d5492a 1308 TCGv_i32 offset;
3b46e624 1309
2c0262af
FB
1310 if (insn & (1 << 22)) {
1311 /* immediate */
1312 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1313 if (!(insn & (1 << 23)))
1314 val = -val;
18acad92 1315 val += extra;
537730b9 1316 if (val != 0)
b0109805 1317 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1318 } else {
1319 /* register */
191f9a93 1320 if (extra)
b0109805 1321 tcg_gen_addi_i32(var, var, extra);
2c0262af 1322 rm = (insn) & 0xf;
b26eefb6 1323 offset = load_reg(s, rm);
2c0262af 1324 if (!(insn & (1 << 23)))
b0109805 1325 tcg_gen_sub_i32(var, var, offset);
2c0262af 1326 else
b0109805 1327 tcg_gen_add_i32(var, var, offset);
7d1b0095 1328 tcg_temp_free_i32(offset);
2c0262af
FB
1329 }
1330}
1331
5aaebd13
PM
1332static TCGv_ptr get_fpstatus_ptr(int neon)
1333{
1334 TCGv_ptr statusptr = tcg_temp_new_ptr();
1335 int offset;
1336 if (neon) {
0ecb72a5 1337 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1338 } else {
0ecb72a5 1339 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1340 }
1341 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1342 return statusptr;
1343}
1344
4373f3ce
PB
1345#define VFP_OP2(name) \
1346static inline void gen_vfp_##name(int dp) \
1347{ \
ae1857ec
PM
1348 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1349 if (dp) { \
1350 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1351 } else { \
1352 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1353 } \
1354 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1355}
1356
4373f3ce
PB
1357VFP_OP2(add)
1358VFP_OP2(sub)
1359VFP_OP2(mul)
1360VFP_OP2(div)
1361
1362#undef VFP_OP2
1363
605a6aed
PM
1364static inline void gen_vfp_F1_mul(int dp)
1365{
1366 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1367 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1368 if (dp) {
ae1857ec 1369 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1370 } else {
ae1857ec 1371 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1372 }
ae1857ec 1373 tcg_temp_free_ptr(fpst);
605a6aed
PM
1374}
1375
1376static inline void gen_vfp_F1_neg(int dp)
1377{
1378 /* Like gen_vfp_neg() but put result in F1 */
1379 if (dp) {
1380 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1381 } else {
1382 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1383 }
1384}
1385
4373f3ce
PB
1386static inline void gen_vfp_abs(int dp)
1387{
1388 if (dp)
1389 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1390 else
1391 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1392}
1393
1394static inline void gen_vfp_neg(int dp)
1395{
1396 if (dp)
1397 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1398 else
1399 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1400}
1401
1402static inline void gen_vfp_sqrt(int dp)
1403{
1404 if (dp)
1405 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1406 else
1407 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1408}
1409
1410static inline void gen_vfp_cmp(int dp)
1411{
1412 if (dp)
1413 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1414 else
1415 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1416}
1417
1418static inline void gen_vfp_cmpe(int dp)
1419{
1420 if (dp)
1421 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1422 else
1423 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1424}
1425
1426static inline void gen_vfp_F1_ld0(int dp)
1427{
1428 if (dp)
5b340b51 1429 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1430 else
5b340b51 1431 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1432}
1433
5500b06c
PM
1434#define VFP_GEN_ITOF(name) \
1435static inline void gen_vfp_##name(int dp, int neon) \
1436{ \
5aaebd13 1437 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1438 if (dp) { \
1439 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1440 } else { \
1441 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1442 } \
b7fa9214 1443 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1444}
1445
5500b06c
PM
1446VFP_GEN_ITOF(uito)
1447VFP_GEN_ITOF(sito)
1448#undef VFP_GEN_ITOF
4373f3ce 1449
5500b06c
PM
1450#define VFP_GEN_FTOI(name) \
1451static inline void gen_vfp_##name(int dp, int neon) \
1452{ \
5aaebd13 1453 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1454 if (dp) { \
1455 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1456 } else { \
1457 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1458 } \
b7fa9214 1459 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1460}
1461
5500b06c
PM
1462VFP_GEN_FTOI(toui)
1463VFP_GEN_FTOI(touiz)
1464VFP_GEN_FTOI(tosi)
1465VFP_GEN_FTOI(tosiz)
1466#undef VFP_GEN_FTOI
4373f3ce 1467
16d5b3ca 1468#define VFP_GEN_FIX(name, round) \
5500b06c 1469static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1470{ \
39d5492a 1471 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1472 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1473 if (dp) { \
16d5b3ca
WN
1474 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1475 statusptr); \
5500b06c 1476 } else { \
16d5b3ca
WN
1477 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1478 statusptr); \
5500b06c 1479 } \
b75263d6 1480 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1481 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1482}
16d5b3ca
WN
1483VFP_GEN_FIX(tosh, _round_to_zero)
1484VFP_GEN_FIX(tosl, _round_to_zero)
1485VFP_GEN_FIX(touh, _round_to_zero)
1486VFP_GEN_FIX(toul, _round_to_zero)
1487VFP_GEN_FIX(shto, )
1488VFP_GEN_FIX(slto, )
1489VFP_GEN_FIX(uhto, )
1490VFP_GEN_FIX(ulto, )
4373f3ce 1491#undef VFP_GEN_FIX
9ee6e8bb 1492
39d5492a 1493static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1494{
08307563 1495 if (dp) {
12dcc321 1496 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1497 } else {
12dcc321 1498 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
08307563 1499 }
b5ff1b31
FB
1500}
1501
39d5492a 1502static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1503{
08307563 1504 if (dp) {
12dcc321 1505 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1506 } else {
12dcc321 1507 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
08307563 1508 }
b5ff1b31
FB
1509}
1510
8e96005d
FB
1511static inline long
1512vfp_reg_offset (int dp, int reg)
1513{
1514 if (dp)
1515 return offsetof(CPUARMState, vfp.regs[reg]);
1516 else if (reg & 1) {
1517 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1518 + offsetof(CPU_DoubleU, l.upper);
1519 } else {
1520 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1521 + offsetof(CPU_DoubleU, l.lower);
1522 }
1523}
9ee6e8bb
PB
1524
1525/* Return the offset of a 32-bit piece of a NEON register.
1526 zero is the least significant end of the register. */
1527static inline long
1528neon_reg_offset (int reg, int n)
1529{
1530 int sreg;
1531 sreg = reg * 2 + n;
1532 return vfp_reg_offset(0, sreg);
1533}
1534
39d5492a 1535static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1536{
39d5492a 1537 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1538 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1539 return tmp;
1540}
1541
39d5492a 1542static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1543{
1544 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1545 tcg_temp_free_i32(var);
8f8e3aa4
PB
1546}
1547
a7812ae4 1548static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1549{
1550 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1551}
1552
a7812ae4 1553static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1554{
1555 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1556}
1557
4373f3ce
PB
1558#define tcg_gen_ld_f32 tcg_gen_ld_i32
1559#define tcg_gen_ld_f64 tcg_gen_ld_i64
1560#define tcg_gen_st_f32 tcg_gen_st_i32
1561#define tcg_gen_st_f64 tcg_gen_st_i64
1562
b7bcbe95
FB
1563static inline void gen_mov_F0_vreg(int dp, int reg)
1564{
1565 if (dp)
4373f3ce 1566 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1567 else
4373f3ce 1568 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1569}
1570
1571static inline void gen_mov_F1_vreg(int dp, int reg)
1572{
1573 if (dp)
4373f3ce 1574 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1575 else
4373f3ce 1576 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1577}
1578
1579static inline void gen_mov_vreg_F0(int dp, int reg)
1580{
1581 if (dp)
4373f3ce 1582 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1583 else
4373f3ce 1584 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1585}
1586
18c9b560
AZ
1587#define ARM_CP_RW_BIT (1 << 20)
1588
a7812ae4 1589static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1590{
0ecb72a5 1591 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1592}
1593
a7812ae4 1594static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1595{
0ecb72a5 1596 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1597}
1598
39d5492a 1599static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1600{
39d5492a 1601 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1602 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1603 return var;
e677137d
PB
1604}
1605
39d5492a 1606static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1607{
0ecb72a5 1608 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1609 tcg_temp_free_i32(var);
e677137d
PB
1610}
1611
1612static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1613{
1614 iwmmxt_store_reg(cpu_M0, rn);
1615}
1616
1617static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1618{
1619 iwmmxt_load_reg(cpu_M0, rn);
1620}
1621
1622static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1623{
1624 iwmmxt_load_reg(cpu_V1, rn);
1625 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1626}
1627
1628static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1629{
1630 iwmmxt_load_reg(cpu_V1, rn);
1631 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1632}
1633
1634static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1635{
1636 iwmmxt_load_reg(cpu_V1, rn);
1637 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1638}
1639
1640#define IWMMXT_OP(name) \
1641static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1642{ \
1643 iwmmxt_load_reg(cpu_V1, rn); \
1644 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1645}
1646
477955bd
PM
1647#define IWMMXT_OP_ENV(name) \
1648static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1649{ \
1650 iwmmxt_load_reg(cpu_V1, rn); \
1651 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1652}
1653
1654#define IWMMXT_OP_ENV_SIZE(name) \
1655IWMMXT_OP_ENV(name##b) \
1656IWMMXT_OP_ENV(name##w) \
1657IWMMXT_OP_ENV(name##l)
e677137d 1658
477955bd 1659#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1660static inline void gen_op_iwmmxt_##name##_M0(void) \
1661{ \
477955bd 1662 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1663}
1664
1665IWMMXT_OP(maddsq)
1666IWMMXT_OP(madduq)
1667IWMMXT_OP(sadb)
1668IWMMXT_OP(sadw)
1669IWMMXT_OP(mulslw)
1670IWMMXT_OP(mulshw)
1671IWMMXT_OP(mululw)
1672IWMMXT_OP(muluhw)
1673IWMMXT_OP(macsw)
1674IWMMXT_OP(macuw)
1675
477955bd
PM
1676IWMMXT_OP_ENV_SIZE(unpackl)
1677IWMMXT_OP_ENV_SIZE(unpackh)
1678
1679IWMMXT_OP_ENV1(unpacklub)
1680IWMMXT_OP_ENV1(unpackluw)
1681IWMMXT_OP_ENV1(unpacklul)
1682IWMMXT_OP_ENV1(unpackhub)
1683IWMMXT_OP_ENV1(unpackhuw)
1684IWMMXT_OP_ENV1(unpackhul)
1685IWMMXT_OP_ENV1(unpacklsb)
1686IWMMXT_OP_ENV1(unpacklsw)
1687IWMMXT_OP_ENV1(unpacklsl)
1688IWMMXT_OP_ENV1(unpackhsb)
1689IWMMXT_OP_ENV1(unpackhsw)
1690IWMMXT_OP_ENV1(unpackhsl)
1691
1692IWMMXT_OP_ENV_SIZE(cmpeq)
1693IWMMXT_OP_ENV_SIZE(cmpgtu)
1694IWMMXT_OP_ENV_SIZE(cmpgts)
1695
1696IWMMXT_OP_ENV_SIZE(mins)
1697IWMMXT_OP_ENV_SIZE(minu)
1698IWMMXT_OP_ENV_SIZE(maxs)
1699IWMMXT_OP_ENV_SIZE(maxu)
1700
1701IWMMXT_OP_ENV_SIZE(subn)
1702IWMMXT_OP_ENV_SIZE(addn)
1703IWMMXT_OP_ENV_SIZE(subu)
1704IWMMXT_OP_ENV_SIZE(addu)
1705IWMMXT_OP_ENV_SIZE(subs)
1706IWMMXT_OP_ENV_SIZE(adds)
1707
1708IWMMXT_OP_ENV(avgb0)
1709IWMMXT_OP_ENV(avgb1)
1710IWMMXT_OP_ENV(avgw0)
1711IWMMXT_OP_ENV(avgw1)
e677137d 1712
477955bd
PM
1713IWMMXT_OP_ENV(packuw)
1714IWMMXT_OP_ENV(packul)
1715IWMMXT_OP_ENV(packuq)
1716IWMMXT_OP_ENV(packsw)
1717IWMMXT_OP_ENV(packsl)
1718IWMMXT_OP_ENV(packsq)
e677137d 1719
e677137d
PB
1720static void gen_op_iwmmxt_set_mup(void)
1721{
39d5492a 1722 TCGv_i32 tmp;
e677137d
PB
1723 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1724 tcg_gen_ori_i32(tmp, tmp, 2);
1725 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1726}
1727
1728static void gen_op_iwmmxt_set_cup(void)
1729{
39d5492a 1730 TCGv_i32 tmp;
e677137d
PB
1731 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1732 tcg_gen_ori_i32(tmp, tmp, 1);
1733 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1734}
1735
1736static void gen_op_iwmmxt_setpsr_nz(void)
1737{
39d5492a 1738 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1739 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1740 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1741}
1742
1743static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1744{
1745 iwmmxt_load_reg(cpu_V1, rn);
86831435 1746 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1747 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1748}
1749
39d5492a
PM
1750static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1751 TCGv_i32 dest)
18c9b560
AZ
1752{
1753 int rd;
1754 uint32_t offset;
39d5492a 1755 TCGv_i32 tmp;
18c9b560
AZ
1756
1757 rd = (insn >> 16) & 0xf;
da6b5335 1758 tmp = load_reg(s, rd);
18c9b560
AZ
1759
1760 offset = (insn & 0xff) << ((insn >> 7) & 2);
1761 if (insn & (1 << 24)) {
1762 /* Pre indexed */
1763 if (insn & (1 << 23))
da6b5335 1764 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1765 else
da6b5335
FN
1766 tcg_gen_addi_i32(tmp, tmp, -offset);
1767 tcg_gen_mov_i32(dest, tmp);
18c9b560 1768 if (insn & (1 << 21))
da6b5335
FN
1769 store_reg(s, rd, tmp);
1770 else
7d1b0095 1771 tcg_temp_free_i32(tmp);
18c9b560
AZ
1772 } else if (insn & (1 << 21)) {
1773 /* Post indexed */
da6b5335 1774 tcg_gen_mov_i32(dest, tmp);
18c9b560 1775 if (insn & (1 << 23))
da6b5335 1776 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1777 else
da6b5335
FN
1778 tcg_gen_addi_i32(tmp, tmp, -offset);
1779 store_reg(s, rd, tmp);
18c9b560
AZ
1780 } else if (!(insn & (1 << 23)))
1781 return 1;
1782 return 0;
1783}
1784
39d5492a 1785static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1786{
1787 int rd = (insn >> 0) & 0xf;
39d5492a 1788 TCGv_i32 tmp;
18c9b560 1789
da6b5335
FN
1790 if (insn & (1 << 8)) {
1791 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1792 return 1;
da6b5335
FN
1793 } else {
1794 tmp = iwmmxt_load_creg(rd);
1795 }
1796 } else {
7d1b0095 1797 tmp = tcg_temp_new_i32();
da6b5335 1798 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1799 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1800 }
1801 tcg_gen_andi_i32(tmp, tmp, mask);
1802 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1803 tcg_temp_free_i32(tmp);
18c9b560
AZ
1804 return 0;
1805}
1806
a1c7273b 1807/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1808 (ie. an undefined instruction). */
7dcc1f89 1809static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1810{
1811 int rd, wrd;
1812 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1813 TCGv_i32 addr;
1814 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1815
1816 if ((insn & 0x0e000e00) == 0x0c000000) {
1817 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1818 wrd = insn & 0xf;
1819 rdlo = (insn >> 12) & 0xf;
1820 rdhi = (insn >> 16) & 0xf;
1821 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1822 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1823 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1824 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1825 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1826 } else { /* TMCRR */
da6b5335
FN
1827 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1828 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1829 gen_op_iwmmxt_set_mup();
1830 }
1831 return 0;
1832 }
1833
1834 wrd = (insn >> 12) & 0xf;
7d1b0095 1835 addr = tcg_temp_new_i32();
da6b5335 1836 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1837 tcg_temp_free_i32(addr);
18c9b560 1838 return 1;
da6b5335 1839 }
18c9b560
AZ
1840 if (insn & ARM_CP_RW_BIT) {
1841 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1842 tmp = tcg_temp_new_i32();
12dcc321 1843 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1844 iwmmxt_store_creg(wrd, tmp);
18c9b560 1845 } else {
e677137d
PB
1846 i = 1;
1847 if (insn & (1 << 8)) {
1848 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 1849 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d
PB
1850 i = 0;
1851 } else { /* WLDRW wRd */
29531141 1852 tmp = tcg_temp_new_i32();
12dcc321 1853 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1854 }
1855 } else {
29531141 1856 tmp = tcg_temp_new_i32();
e677137d 1857 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 1858 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
e677137d 1859 } else { /* WLDRB */
12dcc321 1860 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1861 }
1862 }
1863 if (i) {
1864 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1865 tcg_temp_free_i32(tmp);
e677137d 1866 }
18c9b560
AZ
1867 gen_op_iwmmxt_movq_wRn_M0(wrd);
1868 }
1869 } else {
1870 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1871 tmp = iwmmxt_load_creg(wrd);
12dcc321 1872 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
1873 } else {
1874 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1875 tmp = tcg_temp_new_i32();
e677137d
PB
1876 if (insn & (1 << 8)) {
1877 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 1878 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
e677137d 1879 } else { /* WSTRW wRd */
ecc7b3aa 1880 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1881 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
1882 }
1883 } else {
1884 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1885 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1886 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
e677137d 1887 } else { /* WSTRB */
ecc7b3aa 1888 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1889 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
1890 }
1891 }
18c9b560 1892 }
29531141 1893 tcg_temp_free_i32(tmp);
18c9b560 1894 }
7d1b0095 1895 tcg_temp_free_i32(addr);
18c9b560
AZ
1896 return 0;
1897 }
1898
1899 if ((insn & 0x0f000000) != 0x0e000000)
1900 return 1;
1901
1902 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1903 case 0x000: /* WOR */
1904 wrd = (insn >> 12) & 0xf;
1905 rd0 = (insn >> 0) & 0xf;
1906 rd1 = (insn >> 16) & 0xf;
1907 gen_op_iwmmxt_movq_M0_wRn(rd0);
1908 gen_op_iwmmxt_orq_M0_wRn(rd1);
1909 gen_op_iwmmxt_setpsr_nz();
1910 gen_op_iwmmxt_movq_wRn_M0(wrd);
1911 gen_op_iwmmxt_set_mup();
1912 gen_op_iwmmxt_set_cup();
1913 break;
1914 case 0x011: /* TMCR */
1915 if (insn & 0xf)
1916 return 1;
1917 rd = (insn >> 12) & 0xf;
1918 wrd = (insn >> 16) & 0xf;
1919 switch (wrd) {
1920 case ARM_IWMMXT_wCID:
1921 case ARM_IWMMXT_wCASF:
1922 break;
1923 case ARM_IWMMXT_wCon:
1924 gen_op_iwmmxt_set_cup();
1925 /* Fall through. */
1926 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1927 tmp = iwmmxt_load_creg(wrd);
1928 tmp2 = load_reg(s, rd);
f669df27 1929 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1930 tcg_temp_free_i32(tmp2);
da6b5335 1931 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1932 break;
1933 case ARM_IWMMXT_wCGR0:
1934 case ARM_IWMMXT_wCGR1:
1935 case ARM_IWMMXT_wCGR2:
1936 case ARM_IWMMXT_wCGR3:
1937 gen_op_iwmmxt_set_cup();
da6b5335
FN
1938 tmp = load_reg(s, rd);
1939 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1940 break;
1941 default:
1942 return 1;
1943 }
1944 break;
1945 case 0x100: /* WXOR */
1946 wrd = (insn >> 12) & 0xf;
1947 rd0 = (insn >> 0) & 0xf;
1948 rd1 = (insn >> 16) & 0xf;
1949 gen_op_iwmmxt_movq_M0_wRn(rd0);
1950 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1951 gen_op_iwmmxt_setpsr_nz();
1952 gen_op_iwmmxt_movq_wRn_M0(wrd);
1953 gen_op_iwmmxt_set_mup();
1954 gen_op_iwmmxt_set_cup();
1955 break;
1956 case 0x111: /* TMRC */
1957 if (insn & 0xf)
1958 return 1;
1959 rd = (insn >> 12) & 0xf;
1960 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1961 tmp = iwmmxt_load_creg(wrd);
1962 store_reg(s, rd, tmp);
18c9b560
AZ
1963 break;
1964 case 0x300: /* WANDN */
1965 wrd = (insn >> 12) & 0xf;
1966 rd0 = (insn >> 0) & 0xf;
1967 rd1 = (insn >> 16) & 0xf;
1968 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1969 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1970 gen_op_iwmmxt_andq_M0_wRn(rd1);
1971 gen_op_iwmmxt_setpsr_nz();
1972 gen_op_iwmmxt_movq_wRn_M0(wrd);
1973 gen_op_iwmmxt_set_mup();
1974 gen_op_iwmmxt_set_cup();
1975 break;
1976 case 0x200: /* WAND */
1977 wrd = (insn >> 12) & 0xf;
1978 rd0 = (insn >> 0) & 0xf;
1979 rd1 = (insn >> 16) & 0xf;
1980 gen_op_iwmmxt_movq_M0_wRn(rd0);
1981 gen_op_iwmmxt_andq_M0_wRn(rd1);
1982 gen_op_iwmmxt_setpsr_nz();
1983 gen_op_iwmmxt_movq_wRn_M0(wrd);
1984 gen_op_iwmmxt_set_mup();
1985 gen_op_iwmmxt_set_cup();
1986 break;
1987 case 0x810: case 0xa10: /* WMADD */
1988 wrd = (insn >> 12) & 0xf;
1989 rd0 = (insn >> 0) & 0xf;
1990 rd1 = (insn >> 16) & 0xf;
1991 gen_op_iwmmxt_movq_M0_wRn(rd0);
1992 if (insn & (1 << 21))
1993 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1994 else
1995 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1996 gen_op_iwmmxt_movq_wRn_M0(wrd);
1997 gen_op_iwmmxt_set_mup();
1998 break;
1999 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
2000 wrd = (insn >> 12) & 0xf;
2001 rd0 = (insn >> 16) & 0xf;
2002 rd1 = (insn >> 0) & 0xf;
2003 gen_op_iwmmxt_movq_M0_wRn(rd0);
2004 switch ((insn >> 22) & 3) {
2005 case 0:
2006 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
2007 break;
2008 case 1:
2009 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
2010 break;
2011 case 2:
2012 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
2013 break;
2014 case 3:
2015 return 1;
2016 }
2017 gen_op_iwmmxt_movq_wRn_M0(wrd);
2018 gen_op_iwmmxt_set_mup();
2019 gen_op_iwmmxt_set_cup();
2020 break;
2021 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
2022 wrd = (insn >> 12) & 0xf;
2023 rd0 = (insn >> 16) & 0xf;
2024 rd1 = (insn >> 0) & 0xf;
2025 gen_op_iwmmxt_movq_M0_wRn(rd0);
2026 switch ((insn >> 22) & 3) {
2027 case 0:
2028 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
2029 break;
2030 case 1:
2031 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
2032 break;
2033 case 2:
2034 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
2035 break;
2036 case 3:
2037 return 1;
2038 }
2039 gen_op_iwmmxt_movq_wRn_M0(wrd);
2040 gen_op_iwmmxt_set_mup();
2041 gen_op_iwmmxt_set_cup();
2042 break;
2043 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
2044 wrd = (insn >> 12) & 0xf;
2045 rd0 = (insn >> 16) & 0xf;
2046 rd1 = (insn >> 0) & 0xf;
2047 gen_op_iwmmxt_movq_M0_wRn(rd0);
2048 if (insn & (1 << 22))
2049 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2050 else
2051 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2052 if (!(insn & (1 << 20)))
2053 gen_op_iwmmxt_addl_M0_wRn(wrd);
2054 gen_op_iwmmxt_movq_wRn_M0(wrd);
2055 gen_op_iwmmxt_set_mup();
2056 break;
2057 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
2058 wrd = (insn >> 12) & 0xf;
2059 rd0 = (insn >> 16) & 0xf;
2060 rd1 = (insn >> 0) & 0xf;
2061 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2062 if (insn & (1 << 21)) {
2063 if (insn & (1 << 20))
2064 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2065 else
2066 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2067 } else {
2068 if (insn & (1 << 20))
2069 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2070 else
2071 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2072 }
18c9b560
AZ
2073 gen_op_iwmmxt_movq_wRn_M0(wrd);
2074 gen_op_iwmmxt_set_mup();
2075 break;
2076 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
2077 wrd = (insn >> 12) & 0xf;
2078 rd0 = (insn >> 16) & 0xf;
2079 rd1 = (insn >> 0) & 0xf;
2080 gen_op_iwmmxt_movq_M0_wRn(rd0);
2081 if (insn & (1 << 21))
2082 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2083 else
2084 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2085 if (!(insn & (1 << 20))) {
e677137d
PB
2086 iwmmxt_load_reg(cpu_V1, wrd);
2087 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
2088 }
2089 gen_op_iwmmxt_movq_wRn_M0(wrd);
2090 gen_op_iwmmxt_set_mup();
2091 break;
2092 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
2093 wrd = (insn >> 12) & 0xf;
2094 rd0 = (insn >> 16) & 0xf;
2095 rd1 = (insn >> 0) & 0xf;
2096 gen_op_iwmmxt_movq_M0_wRn(rd0);
2097 switch ((insn >> 22) & 3) {
2098 case 0:
2099 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2100 break;
2101 case 1:
2102 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2103 break;
2104 case 2:
2105 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2106 break;
2107 case 3:
2108 return 1;
2109 }
2110 gen_op_iwmmxt_movq_wRn_M0(wrd);
2111 gen_op_iwmmxt_set_mup();
2112 gen_op_iwmmxt_set_cup();
2113 break;
2114 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2115 wrd = (insn >> 12) & 0xf;
2116 rd0 = (insn >> 16) & 0xf;
2117 rd1 = (insn >> 0) & 0xf;
2118 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2119 if (insn & (1 << 22)) {
2120 if (insn & (1 << 20))
2121 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2122 else
2123 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2124 } else {
2125 if (insn & (1 << 20))
2126 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2127 else
2128 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2129 }
18c9b560
AZ
2130 gen_op_iwmmxt_movq_wRn_M0(wrd);
2131 gen_op_iwmmxt_set_mup();
2132 gen_op_iwmmxt_set_cup();
2133 break;
2134 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2135 wrd = (insn >> 12) & 0xf;
2136 rd0 = (insn >> 16) & 0xf;
2137 rd1 = (insn >> 0) & 0xf;
2138 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2139 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2140 tcg_gen_andi_i32(tmp, tmp, 7);
2141 iwmmxt_load_reg(cpu_V1, rd1);
2142 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2143 tcg_temp_free_i32(tmp);
18c9b560
AZ
2144 gen_op_iwmmxt_movq_wRn_M0(wrd);
2145 gen_op_iwmmxt_set_mup();
2146 break;
2147 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2148 if (((insn >> 6) & 3) == 3)
2149 return 1;
18c9b560
AZ
2150 rd = (insn >> 12) & 0xf;
2151 wrd = (insn >> 16) & 0xf;
da6b5335 2152 tmp = load_reg(s, rd);
18c9b560
AZ
2153 gen_op_iwmmxt_movq_M0_wRn(wrd);
2154 switch ((insn >> 6) & 3) {
2155 case 0:
da6b5335
FN
2156 tmp2 = tcg_const_i32(0xff);
2157 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2158 break;
2159 case 1:
da6b5335
FN
2160 tmp2 = tcg_const_i32(0xffff);
2161 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2162 break;
2163 case 2:
da6b5335
FN
2164 tmp2 = tcg_const_i32(0xffffffff);
2165 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2166 break;
da6b5335 2167 default:
39d5492a
PM
2168 TCGV_UNUSED_I32(tmp2);
2169 TCGV_UNUSED_I32(tmp3);
18c9b560 2170 }
da6b5335 2171 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2172 tcg_temp_free_i32(tmp3);
2173 tcg_temp_free_i32(tmp2);
7d1b0095 2174 tcg_temp_free_i32(tmp);
18c9b560
AZ
2175 gen_op_iwmmxt_movq_wRn_M0(wrd);
2176 gen_op_iwmmxt_set_mup();
2177 break;
2178 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2179 rd = (insn >> 12) & 0xf;
2180 wrd = (insn >> 16) & 0xf;
da6b5335 2181 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2182 return 1;
2183 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2184 tmp = tcg_temp_new_i32();
18c9b560
AZ
2185 switch ((insn >> 22) & 3) {
2186 case 0:
da6b5335 2187 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2188 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2189 if (insn & 8) {
2190 tcg_gen_ext8s_i32(tmp, tmp);
2191 } else {
2192 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2193 }
2194 break;
2195 case 1:
da6b5335 2196 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2197 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2198 if (insn & 8) {
2199 tcg_gen_ext16s_i32(tmp, tmp);
2200 } else {
2201 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2202 }
2203 break;
2204 case 2:
da6b5335 2205 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2206 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2207 break;
18c9b560 2208 }
da6b5335 2209 store_reg(s, rd, tmp);
18c9b560
AZ
2210 break;
2211 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2212 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2213 return 1;
da6b5335 2214 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2215 switch ((insn >> 22) & 3) {
2216 case 0:
da6b5335 2217 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2218 break;
2219 case 1:
da6b5335 2220 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2221 break;
2222 case 2:
da6b5335 2223 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2224 break;
18c9b560 2225 }
da6b5335
FN
2226 tcg_gen_shli_i32(tmp, tmp, 28);
2227 gen_set_nzcv(tmp);
7d1b0095 2228 tcg_temp_free_i32(tmp);
18c9b560
AZ
2229 break;
2230 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2231 if (((insn >> 6) & 3) == 3)
2232 return 1;
18c9b560
AZ
2233 rd = (insn >> 12) & 0xf;
2234 wrd = (insn >> 16) & 0xf;
da6b5335 2235 tmp = load_reg(s, rd);
18c9b560
AZ
2236 switch ((insn >> 6) & 3) {
2237 case 0:
da6b5335 2238 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2239 break;
2240 case 1:
da6b5335 2241 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2242 break;
2243 case 2:
da6b5335 2244 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2245 break;
18c9b560 2246 }
7d1b0095 2247 tcg_temp_free_i32(tmp);
18c9b560
AZ
2248 gen_op_iwmmxt_movq_wRn_M0(wrd);
2249 gen_op_iwmmxt_set_mup();
2250 break;
2251 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2252 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2253 return 1;
da6b5335 2254 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2255 tmp2 = tcg_temp_new_i32();
da6b5335 2256 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2257 switch ((insn >> 22) & 3) {
2258 case 0:
2259 for (i = 0; i < 7; i ++) {
da6b5335
FN
2260 tcg_gen_shli_i32(tmp2, tmp2, 4);
2261 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2262 }
2263 break;
2264 case 1:
2265 for (i = 0; i < 3; i ++) {
da6b5335
FN
2266 tcg_gen_shli_i32(tmp2, tmp2, 8);
2267 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2268 }
2269 break;
2270 case 2:
da6b5335
FN
2271 tcg_gen_shli_i32(tmp2, tmp2, 16);
2272 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2273 break;
18c9b560 2274 }
da6b5335 2275 gen_set_nzcv(tmp);
7d1b0095
PM
2276 tcg_temp_free_i32(tmp2);
2277 tcg_temp_free_i32(tmp);
18c9b560
AZ
2278 break;
2279 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2280 wrd = (insn >> 12) & 0xf;
2281 rd0 = (insn >> 16) & 0xf;
2282 gen_op_iwmmxt_movq_M0_wRn(rd0);
2283 switch ((insn >> 22) & 3) {
2284 case 0:
e677137d 2285 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2286 break;
2287 case 1:
e677137d 2288 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2289 break;
2290 case 2:
e677137d 2291 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2292 break;
2293 case 3:
2294 return 1;
2295 }
2296 gen_op_iwmmxt_movq_wRn_M0(wrd);
2297 gen_op_iwmmxt_set_mup();
2298 break;
2299 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2300 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2301 return 1;
da6b5335 2302 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2303 tmp2 = tcg_temp_new_i32();
da6b5335 2304 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2305 switch ((insn >> 22) & 3) {
2306 case 0:
2307 for (i = 0; i < 7; i ++) {
da6b5335
FN
2308 tcg_gen_shli_i32(tmp2, tmp2, 4);
2309 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2310 }
2311 break;
2312 case 1:
2313 for (i = 0; i < 3; i ++) {
da6b5335
FN
2314 tcg_gen_shli_i32(tmp2, tmp2, 8);
2315 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2316 }
2317 break;
2318 case 2:
da6b5335
FN
2319 tcg_gen_shli_i32(tmp2, tmp2, 16);
2320 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2321 break;
18c9b560 2322 }
da6b5335 2323 gen_set_nzcv(tmp);
7d1b0095
PM
2324 tcg_temp_free_i32(tmp2);
2325 tcg_temp_free_i32(tmp);
18c9b560
AZ
2326 break;
2327 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2328 rd = (insn >> 12) & 0xf;
2329 rd0 = (insn >> 16) & 0xf;
da6b5335 2330 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2331 return 1;
2332 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2333 tmp = tcg_temp_new_i32();
18c9b560
AZ
2334 switch ((insn >> 22) & 3) {
2335 case 0:
da6b5335 2336 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2337 break;
2338 case 1:
da6b5335 2339 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2340 break;
2341 case 2:
da6b5335 2342 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2343 break;
18c9b560 2344 }
da6b5335 2345 store_reg(s, rd, tmp);
18c9b560
AZ
2346 break;
2347 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2348 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2349 wrd = (insn >> 12) & 0xf;
2350 rd0 = (insn >> 16) & 0xf;
2351 rd1 = (insn >> 0) & 0xf;
2352 gen_op_iwmmxt_movq_M0_wRn(rd0);
2353 switch ((insn >> 22) & 3) {
2354 case 0:
2355 if (insn & (1 << 21))
2356 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2357 else
2358 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2359 break;
2360 case 1:
2361 if (insn & (1 << 21))
2362 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2363 else
2364 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2365 break;
2366 case 2:
2367 if (insn & (1 << 21))
2368 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2369 else
2370 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2371 break;
2372 case 3:
2373 return 1;
2374 }
2375 gen_op_iwmmxt_movq_wRn_M0(wrd);
2376 gen_op_iwmmxt_set_mup();
2377 gen_op_iwmmxt_set_cup();
2378 break;
2379 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2380 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2381 wrd = (insn >> 12) & 0xf;
2382 rd0 = (insn >> 16) & 0xf;
2383 gen_op_iwmmxt_movq_M0_wRn(rd0);
2384 switch ((insn >> 22) & 3) {
2385 case 0:
2386 if (insn & (1 << 21))
2387 gen_op_iwmmxt_unpacklsb_M0();
2388 else
2389 gen_op_iwmmxt_unpacklub_M0();
2390 break;
2391 case 1:
2392 if (insn & (1 << 21))
2393 gen_op_iwmmxt_unpacklsw_M0();
2394 else
2395 gen_op_iwmmxt_unpackluw_M0();
2396 break;
2397 case 2:
2398 if (insn & (1 << 21))
2399 gen_op_iwmmxt_unpacklsl_M0();
2400 else
2401 gen_op_iwmmxt_unpacklul_M0();
2402 break;
2403 case 3:
2404 return 1;
2405 }
2406 gen_op_iwmmxt_movq_wRn_M0(wrd);
2407 gen_op_iwmmxt_set_mup();
2408 gen_op_iwmmxt_set_cup();
2409 break;
2410 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2411 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2412 wrd = (insn >> 12) & 0xf;
2413 rd0 = (insn >> 16) & 0xf;
2414 gen_op_iwmmxt_movq_M0_wRn(rd0);
2415 switch ((insn >> 22) & 3) {
2416 case 0:
2417 if (insn & (1 << 21))
2418 gen_op_iwmmxt_unpackhsb_M0();
2419 else
2420 gen_op_iwmmxt_unpackhub_M0();
2421 break;
2422 case 1:
2423 if (insn & (1 << 21))
2424 gen_op_iwmmxt_unpackhsw_M0();
2425 else
2426 gen_op_iwmmxt_unpackhuw_M0();
2427 break;
2428 case 2:
2429 if (insn & (1 << 21))
2430 gen_op_iwmmxt_unpackhsl_M0();
2431 else
2432 gen_op_iwmmxt_unpackhul_M0();
2433 break;
2434 case 3:
2435 return 1;
2436 }
2437 gen_op_iwmmxt_movq_wRn_M0(wrd);
2438 gen_op_iwmmxt_set_mup();
2439 gen_op_iwmmxt_set_cup();
2440 break;
2441 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2442 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2443 if (((insn >> 22) & 3) == 0)
2444 return 1;
18c9b560
AZ
2445 wrd = (insn >> 12) & 0xf;
2446 rd0 = (insn >> 16) & 0xf;
2447 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2448 tmp = tcg_temp_new_i32();
da6b5335 2449 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2450 tcg_temp_free_i32(tmp);
18c9b560 2451 return 1;
da6b5335 2452 }
18c9b560 2453 switch ((insn >> 22) & 3) {
18c9b560 2454 case 1:
477955bd 2455 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2456 break;
2457 case 2:
477955bd 2458 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2459 break;
2460 case 3:
477955bd 2461 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2462 break;
2463 }
7d1b0095 2464 tcg_temp_free_i32(tmp);
18c9b560
AZ
2465 gen_op_iwmmxt_movq_wRn_M0(wrd);
2466 gen_op_iwmmxt_set_mup();
2467 gen_op_iwmmxt_set_cup();
2468 break;
2469 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2470 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2471 if (((insn >> 22) & 3) == 0)
2472 return 1;
18c9b560
AZ
2473 wrd = (insn >> 12) & 0xf;
2474 rd0 = (insn >> 16) & 0xf;
2475 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2476 tmp = tcg_temp_new_i32();
da6b5335 2477 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2478 tcg_temp_free_i32(tmp);
18c9b560 2479 return 1;
da6b5335 2480 }
18c9b560 2481 switch ((insn >> 22) & 3) {
18c9b560 2482 case 1:
477955bd 2483 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2484 break;
2485 case 2:
477955bd 2486 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2487 break;
2488 case 3:
477955bd 2489 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2490 break;
2491 }
7d1b0095 2492 tcg_temp_free_i32(tmp);
18c9b560
AZ
2493 gen_op_iwmmxt_movq_wRn_M0(wrd);
2494 gen_op_iwmmxt_set_mup();
2495 gen_op_iwmmxt_set_cup();
2496 break;
2497 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2498 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2499 if (((insn >> 22) & 3) == 0)
2500 return 1;
18c9b560
AZ
2501 wrd = (insn >> 12) & 0xf;
2502 rd0 = (insn >> 16) & 0xf;
2503 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2504 tmp = tcg_temp_new_i32();
da6b5335 2505 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2506 tcg_temp_free_i32(tmp);
18c9b560 2507 return 1;
da6b5335 2508 }
18c9b560 2509 switch ((insn >> 22) & 3) {
18c9b560 2510 case 1:
477955bd 2511 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2512 break;
2513 case 2:
477955bd 2514 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2515 break;
2516 case 3:
477955bd 2517 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2518 break;
2519 }
7d1b0095 2520 tcg_temp_free_i32(tmp);
18c9b560
AZ
2521 gen_op_iwmmxt_movq_wRn_M0(wrd);
2522 gen_op_iwmmxt_set_mup();
2523 gen_op_iwmmxt_set_cup();
2524 break;
2525 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2526 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2527 if (((insn >> 22) & 3) == 0)
2528 return 1;
18c9b560
AZ
2529 wrd = (insn >> 12) & 0xf;
2530 rd0 = (insn >> 16) & 0xf;
2531 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2532 tmp = tcg_temp_new_i32();
18c9b560 2533 switch ((insn >> 22) & 3) {
18c9b560 2534 case 1:
da6b5335 2535 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2536 tcg_temp_free_i32(tmp);
18c9b560 2537 return 1;
da6b5335 2538 }
477955bd 2539 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2540 break;
2541 case 2:
da6b5335 2542 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2543 tcg_temp_free_i32(tmp);
18c9b560 2544 return 1;
da6b5335 2545 }
477955bd 2546 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2547 break;
2548 case 3:
da6b5335 2549 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2550 tcg_temp_free_i32(tmp);
18c9b560 2551 return 1;
da6b5335 2552 }
477955bd 2553 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2554 break;
2555 }
7d1b0095 2556 tcg_temp_free_i32(tmp);
18c9b560
AZ
2557 gen_op_iwmmxt_movq_wRn_M0(wrd);
2558 gen_op_iwmmxt_set_mup();
2559 gen_op_iwmmxt_set_cup();
2560 break;
2561 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2562 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2563 wrd = (insn >> 12) & 0xf;
2564 rd0 = (insn >> 16) & 0xf;
2565 rd1 = (insn >> 0) & 0xf;
2566 gen_op_iwmmxt_movq_M0_wRn(rd0);
2567 switch ((insn >> 22) & 3) {
2568 case 0:
2569 if (insn & (1 << 21))
2570 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2571 else
2572 gen_op_iwmmxt_minub_M0_wRn(rd1);
2573 break;
2574 case 1:
2575 if (insn & (1 << 21))
2576 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2577 else
2578 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2579 break;
2580 case 2:
2581 if (insn & (1 << 21))
2582 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2583 else
2584 gen_op_iwmmxt_minul_M0_wRn(rd1);
2585 break;
2586 case 3:
2587 return 1;
2588 }
2589 gen_op_iwmmxt_movq_wRn_M0(wrd);
2590 gen_op_iwmmxt_set_mup();
2591 break;
2592 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2593 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2594 wrd = (insn >> 12) & 0xf;
2595 rd0 = (insn >> 16) & 0xf;
2596 rd1 = (insn >> 0) & 0xf;
2597 gen_op_iwmmxt_movq_M0_wRn(rd0);
2598 switch ((insn >> 22) & 3) {
2599 case 0:
2600 if (insn & (1 << 21))
2601 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2602 else
2603 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2604 break;
2605 case 1:
2606 if (insn & (1 << 21))
2607 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2608 else
2609 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2610 break;
2611 case 2:
2612 if (insn & (1 << 21))
2613 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2614 else
2615 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2616 break;
2617 case 3:
2618 return 1;
2619 }
2620 gen_op_iwmmxt_movq_wRn_M0(wrd);
2621 gen_op_iwmmxt_set_mup();
2622 break;
2623 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2624 case 0x402: case 0x502: case 0x602: case 0x702:
2625 wrd = (insn >> 12) & 0xf;
2626 rd0 = (insn >> 16) & 0xf;
2627 rd1 = (insn >> 0) & 0xf;
2628 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2629 tmp = tcg_const_i32((insn >> 20) & 3);
2630 iwmmxt_load_reg(cpu_V1, rd1);
2631 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2632 tcg_temp_free_i32(tmp);
18c9b560
AZ
2633 gen_op_iwmmxt_movq_wRn_M0(wrd);
2634 gen_op_iwmmxt_set_mup();
2635 break;
2636 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2637 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2638 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2639 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2640 wrd = (insn >> 12) & 0xf;
2641 rd0 = (insn >> 16) & 0xf;
2642 rd1 = (insn >> 0) & 0xf;
2643 gen_op_iwmmxt_movq_M0_wRn(rd0);
2644 switch ((insn >> 20) & 0xf) {
2645 case 0x0:
2646 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2647 break;
2648 case 0x1:
2649 gen_op_iwmmxt_subub_M0_wRn(rd1);
2650 break;
2651 case 0x3:
2652 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2653 break;
2654 case 0x4:
2655 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2656 break;
2657 case 0x5:
2658 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2659 break;
2660 case 0x7:
2661 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2662 break;
2663 case 0x8:
2664 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2665 break;
2666 case 0x9:
2667 gen_op_iwmmxt_subul_M0_wRn(rd1);
2668 break;
2669 case 0xb:
2670 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2671 break;
2672 default:
2673 return 1;
2674 }
2675 gen_op_iwmmxt_movq_wRn_M0(wrd);
2676 gen_op_iwmmxt_set_mup();
2677 gen_op_iwmmxt_set_cup();
2678 break;
2679 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2680 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2681 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2682 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2683 wrd = (insn >> 12) & 0xf;
2684 rd0 = (insn >> 16) & 0xf;
2685 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2686 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2687 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2688 tcg_temp_free_i32(tmp);
18c9b560
AZ
2689 gen_op_iwmmxt_movq_wRn_M0(wrd);
2690 gen_op_iwmmxt_set_mup();
2691 gen_op_iwmmxt_set_cup();
2692 break;
2693 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2694 case 0x418: case 0x518: case 0x618: case 0x718:
2695 case 0x818: case 0x918: case 0xa18: case 0xb18:
2696 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2697 wrd = (insn >> 12) & 0xf;
2698 rd0 = (insn >> 16) & 0xf;
2699 rd1 = (insn >> 0) & 0xf;
2700 gen_op_iwmmxt_movq_M0_wRn(rd0);
2701 switch ((insn >> 20) & 0xf) {
2702 case 0x0:
2703 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2704 break;
2705 case 0x1:
2706 gen_op_iwmmxt_addub_M0_wRn(rd1);
2707 break;
2708 case 0x3:
2709 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2710 break;
2711 case 0x4:
2712 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2713 break;
2714 case 0x5:
2715 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2716 break;
2717 case 0x7:
2718 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2719 break;
2720 case 0x8:
2721 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2722 break;
2723 case 0x9:
2724 gen_op_iwmmxt_addul_M0_wRn(rd1);
2725 break;
2726 case 0xb:
2727 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2728 break;
2729 default:
2730 return 1;
2731 }
2732 gen_op_iwmmxt_movq_wRn_M0(wrd);
2733 gen_op_iwmmxt_set_mup();
2734 gen_op_iwmmxt_set_cup();
2735 break;
2736 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2737 case 0x408: case 0x508: case 0x608: case 0x708:
2738 case 0x808: case 0x908: case 0xa08: case 0xb08:
2739 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2740 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2741 return 1;
18c9b560
AZ
2742 wrd = (insn >> 12) & 0xf;
2743 rd0 = (insn >> 16) & 0xf;
2744 rd1 = (insn >> 0) & 0xf;
2745 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2746 switch ((insn >> 22) & 3) {
18c9b560
AZ
2747 case 1:
2748 if (insn & (1 << 21))
2749 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2750 else
2751 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2752 break;
2753 case 2:
2754 if (insn & (1 << 21))
2755 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2756 else
2757 gen_op_iwmmxt_packul_M0_wRn(rd1);
2758 break;
2759 case 3:
2760 if (insn & (1 << 21))
2761 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2762 else
2763 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2764 break;
2765 }
2766 gen_op_iwmmxt_movq_wRn_M0(wrd);
2767 gen_op_iwmmxt_set_mup();
2768 gen_op_iwmmxt_set_cup();
2769 break;
2770 case 0x201: case 0x203: case 0x205: case 0x207:
2771 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2772 case 0x211: case 0x213: case 0x215: case 0x217:
2773 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2774 wrd = (insn >> 5) & 0xf;
2775 rd0 = (insn >> 12) & 0xf;
2776 rd1 = (insn >> 0) & 0xf;
2777 if (rd0 == 0xf || rd1 == 0xf)
2778 return 1;
2779 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2780 tmp = load_reg(s, rd0);
2781 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2782 switch ((insn >> 16) & 0xf) {
2783 case 0x0: /* TMIA */
da6b5335 2784 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2785 break;
2786 case 0x8: /* TMIAPH */
da6b5335 2787 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2788 break;
2789 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2790 if (insn & (1 << 16))
da6b5335 2791 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2792 if (insn & (1 << 17))
da6b5335
FN
2793 tcg_gen_shri_i32(tmp2, tmp2, 16);
2794 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2795 break;
2796 default:
7d1b0095
PM
2797 tcg_temp_free_i32(tmp2);
2798 tcg_temp_free_i32(tmp);
18c9b560
AZ
2799 return 1;
2800 }
7d1b0095
PM
2801 tcg_temp_free_i32(tmp2);
2802 tcg_temp_free_i32(tmp);
18c9b560
AZ
2803 gen_op_iwmmxt_movq_wRn_M0(wrd);
2804 gen_op_iwmmxt_set_mup();
2805 break;
2806 default:
2807 return 1;
2808 }
2809
2810 return 0;
2811}
2812
a1c7273b 2813/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2814 (ie. an undefined instruction). */
7dcc1f89 2815static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2816{
2817 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2818 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2819
2820 if ((insn & 0x0ff00f10) == 0x0e200010) {
2821 /* Multiply with Internal Accumulate Format */
2822 rd0 = (insn >> 12) & 0xf;
2823 rd1 = insn & 0xf;
2824 acc = (insn >> 5) & 7;
2825
2826 if (acc != 0)
2827 return 1;
2828
3a554c0f
FN
2829 tmp = load_reg(s, rd0);
2830 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2831 switch ((insn >> 16) & 0xf) {
2832 case 0x0: /* MIA */
3a554c0f 2833 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2834 break;
2835 case 0x8: /* MIAPH */
3a554c0f 2836 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2837 break;
2838 case 0xc: /* MIABB */
2839 case 0xd: /* MIABT */
2840 case 0xe: /* MIATB */
2841 case 0xf: /* MIATT */
18c9b560 2842 if (insn & (1 << 16))
3a554c0f 2843 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2844 if (insn & (1 << 17))
3a554c0f
FN
2845 tcg_gen_shri_i32(tmp2, tmp2, 16);
2846 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2847 break;
2848 default:
2849 return 1;
2850 }
7d1b0095
PM
2851 tcg_temp_free_i32(tmp2);
2852 tcg_temp_free_i32(tmp);
18c9b560
AZ
2853
2854 gen_op_iwmmxt_movq_wRn_M0(acc);
2855 return 0;
2856 }
2857
2858 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2859 /* Internal Accumulator Access Format */
2860 rdhi = (insn >> 16) & 0xf;
2861 rdlo = (insn >> 12) & 0xf;
2862 acc = insn & 7;
2863
2864 if (acc != 0)
2865 return 1;
2866
2867 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2868 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2869 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 2870 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 2871 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2872 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2873 } else { /* MAR */
3a554c0f
FN
2874 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2875 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2876 }
2877 return 0;
2878 }
2879
2880 return 1;
2881}
2882
9ee6e8bb
PB
2883#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2884#define VFP_SREG(insn, bigbit, smallbit) \
2885 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2886#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2887 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2888 reg = (((insn) >> (bigbit)) & 0x0f) \
2889 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2890 } else { \
2891 if (insn & (1 << (smallbit))) \
2892 return 1; \
2893 reg = ((insn) >> (bigbit)) & 0x0f; \
2894 }} while (0)
2895
2896#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2897#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2898#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2899#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2900#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2901#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2902
4373f3ce 2903/* Move between integer and VFP cores. */
39d5492a 2904static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2905{
39d5492a 2906 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2907 tcg_gen_mov_i32(tmp, cpu_F0s);
2908 return tmp;
2909}
2910
39d5492a 2911static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2912{
2913 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2914 tcg_temp_free_i32(tmp);
4373f3ce
PB
2915}
2916
39d5492a 2917static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2918{
39d5492a 2919 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2920 if (shift)
2921 tcg_gen_shri_i32(var, var, shift);
86831435 2922 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2923 tcg_gen_shli_i32(tmp, var, 8);
2924 tcg_gen_or_i32(var, var, tmp);
2925 tcg_gen_shli_i32(tmp, var, 16);
2926 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2927 tcg_temp_free_i32(tmp);
ad69471c
PB
2928}
2929
39d5492a 2930static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2931{
39d5492a 2932 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2933 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2934 tcg_gen_shli_i32(tmp, var, 16);
2935 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2936 tcg_temp_free_i32(tmp);
ad69471c
PB
2937}
2938
39d5492a 2939static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2940{
39d5492a 2941 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2942 tcg_gen_andi_i32(var, var, 0xffff0000);
2943 tcg_gen_shri_i32(tmp, var, 16);
2944 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2945 tcg_temp_free_i32(tmp);
ad69471c
PB
2946}
2947
39d5492a 2948static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2949{
2950 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2951 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2952 switch (size) {
2953 case 0:
12dcc321 2954 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2955 gen_neon_dup_u8(tmp, 0);
2956 break;
2957 case 1:
12dcc321 2958 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2959 gen_neon_dup_low16(tmp);
2960 break;
2961 case 2:
12dcc321 2962 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2963 break;
2964 default: /* Avoid compiler warnings. */
2965 abort();
2966 }
2967 return tmp;
2968}
2969
04731fb5
WN
2970static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2971 uint32_t dp)
2972{
2973 uint32_t cc = extract32(insn, 20, 2);
2974
2975 if (dp) {
2976 TCGv_i64 frn, frm, dest;
2977 TCGv_i64 tmp, zero, zf, nf, vf;
2978
2979 zero = tcg_const_i64(0);
2980
2981 frn = tcg_temp_new_i64();
2982 frm = tcg_temp_new_i64();
2983 dest = tcg_temp_new_i64();
2984
2985 zf = tcg_temp_new_i64();
2986 nf = tcg_temp_new_i64();
2987 vf = tcg_temp_new_i64();
2988
2989 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2990 tcg_gen_ext_i32_i64(nf, cpu_NF);
2991 tcg_gen_ext_i32_i64(vf, cpu_VF);
2992
2993 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2994 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2995 switch (cc) {
2996 case 0: /* eq: Z */
2997 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2998 frn, frm);
2999 break;
3000 case 1: /* vs: V */
3001 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
3002 frn, frm);
3003 break;
3004 case 2: /* ge: N == V -> N ^ V == 0 */
3005 tmp = tcg_temp_new_i64();
3006 tcg_gen_xor_i64(tmp, vf, nf);
3007 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3008 frn, frm);
3009 tcg_temp_free_i64(tmp);
3010 break;
3011 case 3: /* gt: !Z && N == V */
3012 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
3013 frn, frm);
3014 tmp = tcg_temp_new_i64();
3015 tcg_gen_xor_i64(tmp, vf, nf);
3016 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3017 dest, frm);
3018 tcg_temp_free_i64(tmp);
3019 break;
3020 }
3021 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3022 tcg_temp_free_i64(frn);
3023 tcg_temp_free_i64(frm);
3024 tcg_temp_free_i64(dest);
3025
3026 tcg_temp_free_i64(zf);
3027 tcg_temp_free_i64(nf);
3028 tcg_temp_free_i64(vf);
3029
3030 tcg_temp_free_i64(zero);
3031 } else {
3032 TCGv_i32 frn, frm, dest;
3033 TCGv_i32 tmp, zero;
3034
3035 zero = tcg_const_i32(0);
3036
3037 frn = tcg_temp_new_i32();
3038 frm = tcg_temp_new_i32();
3039 dest = tcg_temp_new_i32();
3040 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3041 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3042 switch (cc) {
3043 case 0: /* eq: Z */
3044 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
3045 frn, frm);
3046 break;
3047 case 1: /* vs: V */
3048 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
3049 frn, frm);
3050 break;
3051 case 2: /* ge: N == V -> N ^ V == 0 */
3052 tmp = tcg_temp_new_i32();
3053 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3054 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3055 frn, frm);
3056 tcg_temp_free_i32(tmp);
3057 break;
3058 case 3: /* gt: !Z && N == V */
3059 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
3060 frn, frm);
3061 tmp = tcg_temp_new_i32();
3062 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3063 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3064 dest, frm);
3065 tcg_temp_free_i32(tmp);
3066 break;
3067 }
3068 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3069 tcg_temp_free_i32(frn);
3070 tcg_temp_free_i32(frm);
3071 tcg_temp_free_i32(dest);
3072
3073 tcg_temp_free_i32(zero);
3074 }
3075
3076 return 0;
3077}
3078
40cfacdd
WN
3079static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
3080 uint32_t rm, uint32_t dp)
3081{
3082 uint32_t vmin = extract32(insn, 6, 1);
3083 TCGv_ptr fpst = get_fpstatus_ptr(0);
3084
3085 if (dp) {
3086 TCGv_i64 frn, frm, dest;
3087
3088 frn = tcg_temp_new_i64();
3089 frm = tcg_temp_new_i64();
3090 dest = tcg_temp_new_i64();
3091
3092 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3093 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3094 if (vmin) {
f71a2ae5 3095 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 3096 } else {
f71a2ae5 3097 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
3098 }
3099 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3100 tcg_temp_free_i64(frn);
3101 tcg_temp_free_i64(frm);
3102 tcg_temp_free_i64(dest);
3103 } else {
3104 TCGv_i32 frn, frm, dest;
3105
3106 frn = tcg_temp_new_i32();
3107 frm = tcg_temp_new_i32();
3108 dest = tcg_temp_new_i32();
3109
3110 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3111 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3112 if (vmin) {
f71a2ae5 3113 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 3114 } else {
f71a2ae5 3115 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
3116 }
3117 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3118 tcg_temp_free_i32(frn);
3119 tcg_temp_free_i32(frm);
3120 tcg_temp_free_i32(dest);
3121 }
3122
3123 tcg_temp_free_ptr(fpst);
3124 return 0;
3125}
3126
7655f39b
WN
3127static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3128 int rounding)
3129{
3130 TCGv_ptr fpst = get_fpstatus_ptr(0);
3131 TCGv_i32 tcg_rmode;
3132
3133 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3134 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3135
3136 if (dp) {
3137 TCGv_i64 tcg_op;
3138 TCGv_i64 tcg_res;
3139 tcg_op = tcg_temp_new_i64();
3140 tcg_res = tcg_temp_new_i64();
3141 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3142 gen_helper_rintd(tcg_res, tcg_op, fpst);
3143 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3144 tcg_temp_free_i64(tcg_op);
3145 tcg_temp_free_i64(tcg_res);
3146 } else {
3147 TCGv_i32 tcg_op;
3148 TCGv_i32 tcg_res;
3149 tcg_op = tcg_temp_new_i32();
3150 tcg_res = tcg_temp_new_i32();
3151 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3152 gen_helper_rints(tcg_res, tcg_op, fpst);
3153 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3154 tcg_temp_free_i32(tcg_op);
3155 tcg_temp_free_i32(tcg_res);
3156 }
3157
3158 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3159 tcg_temp_free_i32(tcg_rmode);
3160
3161 tcg_temp_free_ptr(fpst);
3162 return 0;
3163}
3164
c9975a83
WN
3165static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3166 int rounding)
3167{
3168 bool is_signed = extract32(insn, 7, 1);
3169 TCGv_ptr fpst = get_fpstatus_ptr(0);
3170 TCGv_i32 tcg_rmode, tcg_shift;
3171
3172 tcg_shift = tcg_const_i32(0);
3173
3174 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3175 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3176
3177 if (dp) {
3178 TCGv_i64 tcg_double, tcg_res;
3179 TCGv_i32 tcg_tmp;
3180 /* Rd is encoded as a single precision register even when the source
3181 * is double precision.
3182 */
3183 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3184 tcg_double = tcg_temp_new_i64();
3185 tcg_res = tcg_temp_new_i64();
3186 tcg_tmp = tcg_temp_new_i32();
3187 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3188 if (is_signed) {
3189 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3190 } else {
3191 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3192 }
ecc7b3aa 3193 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
c9975a83
WN
3194 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3195 tcg_temp_free_i32(tcg_tmp);
3196 tcg_temp_free_i64(tcg_res);
3197 tcg_temp_free_i64(tcg_double);
3198 } else {
3199 TCGv_i32 tcg_single, tcg_res;
3200 tcg_single = tcg_temp_new_i32();
3201 tcg_res = tcg_temp_new_i32();
3202 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3203 if (is_signed) {
3204 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3205 } else {
3206 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3207 }
3208 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3209 tcg_temp_free_i32(tcg_res);
3210 tcg_temp_free_i32(tcg_single);
3211 }
3212
3213 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3214 tcg_temp_free_i32(tcg_rmode);
3215
3216 tcg_temp_free_i32(tcg_shift);
3217
3218 tcg_temp_free_ptr(fpst);
3219
3220 return 0;
3221}
7655f39b
WN
3222
3223/* Table for converting the most common AArch32 encoding of
3224 * rounding mode to arm_fprounding order (which matches the
3225 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3226 */
3227static const uint8_t fp_decode_rm[] = {
3228 FPROUNDING_TIEAWAY,
3229 FPROUNDING_TIEEVEN,
3230 FPROUNDING_POSINF,
3231 FPROUNDING_NEGINF,
3232};
3233
7dcc1f89 3234static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
3235{
3236 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3237
d614a513 3238 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
04731fb5
WN
3239 return 1;
3240 }
3241
3242 if (dp) {
3243 VFP_DREG_D(rd, insn);
3244 VFP_DREG_N(rn, insn);
3245 VFP_DREG_M(rm, insn);
3246 } else {
3247 rd = VFP_SREG_D(insn);
3248 rn = VFP_SREG_N(insn);
3249 rm = VFP_SREG_M(insn);
3250 }
3251
3252 if ((insn & 0x0f800e50) == 0x0e000a00) {
3253 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
3254 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3255 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
3256 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3257 /* VRINTA, VRINTN, VRINTP, VRINTM */
3258 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3259 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
3260 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3261 /* VCVTA, VCVTN, VCVTP, VCVTM */
3262 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3263 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3264 }
3265 return 1;
3266}
3267
a1c7273b 3268/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3269 (ie. an undefined instruction). */
7dcc1f89 3270static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3271{
3272 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3273 int dp, veclen;
39d5492a
PM
3274 TCGv_i32 addr;
3275 TCGv_i32 tmp;
3276 TCGv_i32 tmp2;
b7bcbe95 3277
d614a513 3278 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3279 return 1;
d614a513 3280 }
40f137e1 3281
2c7ffc41
PM
3282 /* FIXME: this access check should not take precedence over UNDEF
3283 * for invalid encodings; we will generate incorrect syndrome information
3284 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3285 */
9dbbc748 3286 if (s->fp_excp_el) {
2c7ffc41 3287 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 3288 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3289 return 0;
3290 }
3291
5df8bac1 3292 if (!s->vfp_enabled) {
9ee6e8bb 3293 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3294 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3295 return 1;
3296 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3297 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3298 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3299 return 1;
a50c0f51 3300 }
40f137e1 3301 }
6a57f3eb
WN
3302
3303 if (extract32(insn, 28, 4) == 0xf) {
3304 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3305 * only used in v8 and above.
3306 */
7dcc1f89 3307 return disas_vfp_v8_insn(s, insn);
6a57f3eb
WN
3308 }
3309
b7bcbe95
FB
3310 dp = ((insn & 0xf00) == 0xb00);
3311 switch ((insn >> 24) & 0xf) {
3312 case 0xe:
3313 if (insn & (1 << 4)) {
3314 /* single register transfer */
b7bcbe95
FB
3315 rd = (insn >> 12) & 0xf;
3316 if (dp) {
9ee6e8bb
PB
3317 int size;
3318 int pass;
3319
3320 VFP_DREG_N(rn, insn);
3321 if (insn & 0xf)
b7bcbe95 3322 return 1;
9ee6e8bb 3323 if (insn & 0x00c00060
d614a513 3324 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3325 return 1;
d614a513 3326 }
9ee6e8bb
PB
3327
3328 pass = (insn >> 21) & 1;
3329 if (insn & (1 << 22)) {
3330 size = 0;
3331 offset = ((insn >> 5) & 3) * 8;
3332 } else if (insn & (1 << 5)) {
3333 size = 1;
3334 offset = (insn & (1 << 6)) ? 16 : 0;
3335 } else {
3336 size = 2;
3337 offset = 0;
3338 }
18c9b560 3339 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3340 /* vfp->arm */
ad69471c 3341 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3342 switch (size) {
3343 case 0:
9ee6e8bb 3344 if (offset)
ad69471c 3345 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3346 if (insn & (1 << 23))
ad69471c 3347 gen_uxtb(tmp);
9ee6e8bb 3348 else
ad69471c 3349 gen_sxtb(tmp);
9ee6e8bb
PB
3350 break;
3351 case 1:
9ee6e8bb
PB
3352 if (insn & (1 << 23)) {
3353 if (offset) {
ad69471c 3354 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3355 } else {
ad69471c 3356 gen_uxth(tmp);
9ee6e8bb
PB
3357 }
3358 } else {
3359 if (offset) {
ad69471c 3360 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3361 } else {
ad69471c 3362 gen_sxth(tmp);
9ee6e8bb
PB
3363 }
3364 }
3365 break;
3366 case 2:
9ee6e8bb
PB
3367 break;
3368 }
ad69471c 3369 store_reg(s, rd, tmp);
b7bcbe95
FB
3370 } else {
3371 /* arm->vfp */
ad69471c 3372 tmp = load_reg(s, rd);
9ee6e8bb
PB
3373 if (insn & (1 << 23)) {
3374 /* VDUP */
3375 if (size == 0) {
ad69471c 3376 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3377 } else if (size == 1) {
ad69471c 3378 gen_neon_dup_low16(tmp);
9ee6e8bb 3379 }
cbbccffc 3380 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3381 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3382 tcg_gen_mov_i32(tmp2, tmp);
3383 neon_store_reg(rn, n, tmp2);
3384 }
3385 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3386 } else {
3387 /* VMOV */
3388 switch (size) {
3389 case 0:
ad69471c 3390 tmp2 = neon_load_reg(rn, pass);
d593c48e 3391 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3392 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3393 break;
3394 case 1:
ad69471c 3395 tmp2 = neon_load_reg(rn, pass);
d593c48e 3396 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3397 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3398 break;
3399 case 2:
9ee6e8bb
PB
3400 break;
3401 }
ad69471c 3402 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3403 }
b7bcbe95 3404 }
9ee6e8bb
PB
3405 } else { /* !dp */
3406 if ((insn & 0x6f) != 0x00)
3407 return 1;
3408 rn = VFP_SREG_N(insn);
18c9b560 3409 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3410 /* vfp->arm */
3411 if (insn & (1 << 21)) {
3412 /* system register */
40f137e1 3413 rn >>= 1;
9ee6e8bb 3414
b7bcbe95 3415 switch (rn) {
40f137e1 3416 case ARM_VFP_FPSID:
4373f3ce 3417 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3418 VFP3 restricts all id registers to privileged
3419 accesses. */
3420 if (IS_USER(s)
d614a513 3421 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3422 return 1;
d614a513 3423 }
4373f3ce 3424 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3425 break;
40f137e1 3426 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3427 if (IS_USER(s))
3428 return 1;
4373f3ce 3429 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3430 break;
40f137e1
PB
3431 case ARM_VFP_FPINST:
3432 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3433 /* Not present in VFP3. */
3434 if (IS_USER(s)
d614a513 3435 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3436 return 1;
d614a513 3437 }
4373f3ce 3438 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3439 break;
40f137e1 3440 case ARM_VFP_FPSCR:
601d70b9 3441 if (rd == 15) {
4373f3ce
PB
3442 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3443 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3444 } else {
7d1b0095 3445 tmp = tcg_temp_new_i32();
4373f3ce
PB
3446 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3447 }
b7bcbe95 3448 break;
a50c0f51 3449 case ARM_VFP_MVFR2:
d614a513 3450 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3451 return 1;
3452 }
3453 /* fall through */
9ee6e8bb
PB
3454 case ARM_VFP_MVFR0:
3455 case ARM_VFP_MVFR1:
3456 if (IS_USER(s)
d614a513 3457 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3458 return 1;
d614a513 3459 }
4373f3ce 3460 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3461 break;
b7bcbe95
FB
3462 default:
3463 return 1;
3464 }
3465 } else {
3466 gen_mov_F0_vreg(0, rn);
4373f3ce 3467 tmp = gen_vfp_mrs();
b7bcbe95
FB
3468 }
3469 if (rd == 15) {
b5ff1b31 3470 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3471 gen_set_nzcv(tmp);
7d1b0095 3472 tcg_temp_free_i32(tmp);
4373f3ce
PB
3473 } else {
3474 store_reg(s, rd, tmp);
3475 }
b7bcbe95
FB
3476 } else {
3477 /* arm->vfp */
b7bcbe95 3478 if (insn & (1 << 21)) {
40f137e1 3479 rn >>= 1;
b7bcbe95
FB
3480 /* system register */
3481 switch (rn) {
40f137e1 3482 case ARM_VFP_FPSID:
9ee6e8bb
PB
3483 case ARM_VFP_MVFR0:
3484 case ARM_VFP_MVFR1:
b7bcbe95
FB
3485 /* Writes are ignored. */
3486 break;
40f137e1 3487 case ARM_VFP_FPSCR:
e4c1cfa5 3488 tmp = load_reg(s, rd);
4373f3ce 3489 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3490 tcg_temp_free_i32(tmp);
b5ff1b31 3491 gen_lookup_tb(s);
b7bcbe95 3492 break;
40f137e1 3493 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3494 if (IS_USER(s))
3495 return 1;
71b3c3de
JR
3496 /* TODO: VFP subarchitecture support.
3497 * For now, keep the EN bit only */
e4c1cfa5 3498 tmp = load_reg(s, rd);
71b3c3de 3499 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3500 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3501 gen_lookup_tb(s);
3502 break;
3503 case ARM_VFP_FPINST:
3504 case ARM_VFP_FPINST2:
23adb861
PM
3505 if (IS_USER(s)) {
3506 return 1;
3507 }
e4c1cfa5 3508 tmp = load_reg(s, rd);
4373f3ce 3509 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3510 break;
b7bcbe95
FB
3511 default:
3512 return 1;
3513 }
3514 } else {
e4c1cfa5 3515 tmp = load_reg(s, rd);
4373f3ce 3516 gen_vfp_msr(tmp);
b7bcbe95
FB
3517 gen_mov_vreg_F0(0, rn);
3518 }
3519 }
3520 }
3521 } else {
3522 /* data processing */
3523 /* The opcode is in bits 23, 21, 20 and 6. */
3524 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3525 if (dp) {
3526 if (op == 15) {
3527 /* rn is opcode */
3528 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3529 } else {
3530 /* rn is register number */
9ee6e8bb 3531 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3532 }
3533
239c20c7
WN
3534 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3535 ((rn & 0x1e) == 0x6))) {
3536 /* Integer or single/half precision destination. */
9ee6e8bb 3537 rd = VFP_SREG_D(insn);
b7bcbe95 3538 } else {
9ee6e8bb 3539 VFP_DREG_D(rd, insn);
b7bcbe95 3540 }
04595bf6 3541 if (op == 15 &&
239c20c7
WN
3542 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3543 ((rn & 0x1e) == 0x4))) {
3544 /* VCVT from int or half precision is always from S reg
3545 * regardless of dp bit. VCVT with immediate frac_bits
3546 * has same format as SREG_M.
04595bf6
PM
3547 */
3548 rm = VFP_SREG_M(insn);
b7bcbe95 3549 } else {
9ee6e8bb 3550 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3551 }
3552 } else {
9ee6e8bb 3553 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3554 if (op == 15 && rn == 15) {
3555 /* Double precision destination. */
9ee6e8bb
PB
3556 VFP_DREG_D(rd, insn);
3557 } else {
3558 rd = VFP_SREG_D(insn);
3559 }
04595bf6
PM
3560 /* NB that we implicitly rely on the encoding for the frac_bits
3561 * in VCVT of fixed to float being the same as that of an SREG_M
3562 */
9ee6e8bb 3563 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3564 }
3565
69d1fc22 3566 veclen = s->vec_len;
b7bcbe95
FB
3567 if (op == 15 && rn > 3)
3568 veclen = 0;
3569
3570 /* Shut up compiler warnings. */
3571 delta_m = 0;
3572 delta_d = 0;
3573 bank_mask = 0;
3b46e624 3574
b7bcbe95
FB
3575 if (veclen > 0) {
3576 if (dp)
3577 bank_mask = 0xc;
3578 else
3579 bank_mask = 0x18;
3580
3581 /* Figure out what type of vector operation this is. */
3582 if ((rd & bank_mask) == 0) {
3583 /* scalar */
3584 veclen = 0;
3585 } else {
3586 if (dp)
69d1fc22 3587 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3588 else
69d1fc22 3589 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3590
3591 if ((rm & bank_mask) == 0) {
3592 /* mixed scalar/vector */
3593 delta_m = 0;
3594 } else {
3595 /* vector */
3596 delta_m = delta_d;
3597 }
3598 }
3599 }
3600
3601 /* Load the initial operands. */
3602 if (op == 15) {
3603 switch (rn) {
3604 case 16:
3605 case 17:
3606 /* Integer source */
3607 gen_mov_F0_vreg(0, rm);
3608 break;
3609 case 8:
3610 case 9:
3611 /* Compare */
3612 gen_mov_F0_vreg(dp, rd);
3613 gen_mov_F1_vreg(dp, rm);
3614 break;
3615 case 10:
3616 case 11:
3617 /* Compare with zero */
3618 gen_mov_F0_vreg(dp, rd);
3619 gen_vfp_F1_ld0(dp);
3620 break;
9ee6e8bb
PB
3621 case 20:
3622 case 21:
3623 case 22:
3624 case 23:
644ad806
PB
3625 case 28:
3626 case 29:
3627 case 30:
3628 case 31:
9ee6e8bb
PB
3629 /* Source and destination the same. */
3630 gen_mov_F0_vreg(dp, rd);
3631 break;
6e0c0ed1
PM
3632 case 4:
3633 case 5:
3634 case 6:
3635 case 7:
239c20c7
WN
3636 /* VCVTB, VCVTT: only present with the halfprec extension
3637 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3638 * (we choose to UNDEF)
6e0c0ed1 3639 */
d614a513
PM
3640 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3641 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3642 return 1;
3643 }
239c20c7
WN
3644 if (!extract32(rn, 1, 1)) {
3645 /* Half precision source. */
3646 gen_mov_F0_vreg(0, rm);
3647 break;
3648 }
6e0c0ed1 3649 /* Otherwise fall through */
b7bcbe95
FB
3650 default:
3651 /* One source operand. */
3652 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3653 break;
b7bcbe95
FB
3654 }
3655 } else {
3656 /* Two source operands. */
3657 gen_mov_F0_vreg(dp, rn);
3658 gen_mov_F1_vreg(dp, rm);
3659 }
3660
3661 for (;;) {
3662 /* Perform the calculation. */
3663 switch (op) {
605a6aed
PM
3664 case 0: /* VMLA: fd + (fn * fm) */
3665 /* Note that order of inputs to the add matters for NaNs */
3666 gen_vfp_F1_mul(dp);
3667 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3668 gen_vfp_add(dp);
3669 break;
605a6aed 3670 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3671 gen_vfp_mul(dp);
605a6aed
PM
3672 gen_vfp_F1_neg(dp);
3673 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3674 gen_vfp_add(dp);
3675 break;
605a6aed
PM
3676 case 2: /* VNMLS: -fd + (fn * fm) */
3677 /* Note that it isn't valid to replace (-A + B) with (B - A)
3678 * or similar plausible looking simplifications
3679 * because this will give wrong results for NaNs.
3680 */
3681 gen_vfp_F1_mul(dp);
3682 gen_mov_F0_vreg(dp, rd);
3683 gen_vfp_neg(dp);
3684 gen_vfp_add(dp);
b7bcbe95 3685 break;
605a6aed 3686 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3687 gen_vfp_mul(dp);
605a6aed
PM
3688 gen_vfp_F1_neg(dp);
3689 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3690 gen_vfp_neg(dp);
605a6aed 3691 gen_vfp_add(dp);
b7bcbe95
FB
3692 break;
3693 case 4: /* mul: fn * fm */
3694 gen_vfp_mul(dp);
3695 break;
3696 case 5: /* nmul: -(fn * fm) */
3697 gen_vfp_mul(dp);
3698 gen_vfp_neg(dp);
3699 break;
3700 case 6: /* add: fn + fm */
3701 gen_vfp_add(dp);
3702 break;
3703 case 7: /* sub: fn - fm */
3704 gen_vfp_sub(dp);
3705 break;
3706 case 8: /* div: fn / fm */
3707 gen_vfp_div(dp);
3708 break;
da97f52c
PM
3709 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3710 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3711 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3712 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3713 /* These are fused multiply-add, and must be done as one
3714 * floating point operation with no rounding between the
3715 * multiplication and addition steps.
3716 * NB that doing the negations here as separate steps is
3717 * correct : an input NaN should come out with its sign bit
3718 * flipped if it is a negated-input.
3719 */
d614a513 3720 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3721 return 1;
3722 }
3723 if (dp) {
3724 TCGv_ptr fpst;
3725 TCGv_i64 frd;
3726 if (op & 1) {
3727 /* VFNMS, VFMS */
3728 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3729 }
3730 frd = tcg_temp_new_i64();
3731 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3732 if (op & 2) {
3733 /* VFNMA, VFNMS */
3734 gen_helper_vfp_negd(frd, frd);
3735 }
3736 fpst = get_fpstatus_ptr(0);
3737 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3738 cpu_F1d, frd, fpst);
3739 tcg_temp_free_ptr(fpst);
3740 tcg_temp_free_i64(frd);
3741 } else {
3742 TCGv_ptr fpst;
3743 TCGv_i32 frd;
3744 if (op & 1) {
3745 /* VFNMS, VFMS */
3746 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3747 }
3748 frd = tcg_temp_new_i32();
3749 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3750 if (op & 2) {
3751 gen_helper_vfp_negs(frd, frd);
3752 }
3753 fpst = get_fpstatus_ptr(0);
3754 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3755 cpu_F1s, frd, fpst);
3756 tcg_temp_free_ptr(fpst);
3757 tcg_temp_free_i32(frd);
3758 }
3759 break;
9ee6e8bb 3760 case 14: /* fconst */
d614a513
PM
3761 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3762 return 1;
3763 }
9ee6e8bb
PB
3764
3765 n = (insn << 12) & 0x80000000;
3766 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3767 if (dp) {
3768 if (i & 0x40)
3769 i |= 0x3f80;
3770 else
3771 i |= 0x4000;
3772 n |= i << 16;
4373f3ce 3773 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3774 } else {
3775 if (i & 0x40)
3776 i |= 0x780;
3777 else
3778 i |= 0x800;
3779 n |= i << 19;
5b340b51 3780 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3781 }
9ee6e8bb 3782 break;
b7bcbe95
FB
3783 case 15: /* extension space */
3784 switch (rn) {
3785 case 0: /* cpy */
3786 /* no-op */
3787 break;
3788 case 1: /* abs */
3789 gen_vfp_abs(dp);
3790 break;
3791 case 2: /* neg */
3792 gen_vfp_neg(dp);
3793 break;
3794 case 3: /* sqrt */
3795 gen_vfp_sqrt(dp);
3796 break;
239c20c7 3797 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
60011498
PB
3798 tmp = gen_vfp_mrs();
3799 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3800 if (dp) {
3801 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3802 cpu_env);
3803 } else {
3804 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3805 cpu_env);
3806 }
7d1b0095 3807 tcg_temp_free_i32(tmp);
60011498 3808 break;
239c20c7 3809 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
60011498
PB
3810 tmp = gen_vfp_mrs();
3811 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3812 if (dp) {
3813 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3814 cpu_env);
3815 } else {
3816 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3817 cpu_env);
3818 }
7d1b0095 3819 tcg_temp_free_i32(tmp);
60011498 3820 break;
239c20c7 3821 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
7d1b0095 3822 tmp = tcg_temp_new_i32();
239c20c7
WN
3823 if (dp) {
3824 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3825 cpu_env);
3826 } else {
3827 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3828 cpu_env);
3829 }
60011498
PB
3830 gen_mov_F0_vreg(0, rd);
3831 tmp2 = gen_vfp_mrs();
3832 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3833 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3834 tcg_temp_free_i32(tmp2);
60011498
PB
3835 gen_vfp_msr(tmp);
3836 break;
239c20c7 3837 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
7d1b0095 3838 tmp = tcg_temp_new_i32();
239c20c7
WN
3839 if (dp) {
3840 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3841 cpu_env);
3842 } else {
3843 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3844 cpu_env);
3845 }
60011498
PB
3846 tcg_gen_shli_i32(tmp, tmp, 16);
3847 gen_mov_F0_vreg(0, rd);
3848 tmp2 = gen_vfp_mrs();
3849 tcg_gen_ext16u_i32(tmp2, tmp2);
3850 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3851 tcg_temp_free_i32(tmp2);
60011498
PB
3852 gen_vfp_msr(tmp);
3853 break;
b7bcbe95
FB
3854 case 8: /* cmp */
3855 gen_vfp_cmp(dp);
3856 break;
3857 case 9: /* cmpe */
3858 gen_vfp_cmpe(dp);
3859 break;
3860 case 10: /* cmpz */
3861 gen_vfp_cmp(dp);
3862 break;
3863 case 11: /* cmpez */
3864 gen_vfp_F1_ld0(dp);
3865 gen_vfp_cmpe(dp);
3866 break;
664c6733
WN
3867 case 12: /* vrintr */
3868 {
3869 TCGv_ptr fpst = get_fpstatus_ptr(0);
3870 if (dp) {
3871 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3872 } else {
3873 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3874 }
3875 tcg_temp_free_ptr(fpst);
3876 break;
3877 }
a290c62a
WN
3878 case 13: /* vrintz */
3879 {
3880 TCGv_ptr fpst = get_fpstatus_ptr(0);
3881 TCGv_i32 tcg_rmode;
3882 tcg_rmode = tcg_const_i32(float_round_to_zero);
3883 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3884 if (dp) {
3885 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3886 } else {
3887 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3888 }
3889 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3890 tcg_temp_free_i32(tcg_rmode);
3891 tcg_temp_free_ptr(fpst);
3892 break;
3893 }
4e82bc01
WN
3894 case 14: /* vrintx */
3895 {
3896 TCGv_ptr fpst = get_fpstatus_ptr(0);
3897 if (dp) {
3898 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3899 } else {
3900 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3901 }
3902 tcg_temp_free_ptr(fpst);
3903 break;
3904 }
b7bcbe95
FB
3905 case 15: /* single<->double conversion */
3906 if (dp)
4373f3ce 3907 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3908 else
4373f3ce 3909 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3910 break;
3911 case 16: /* fuito */
5500b06c 3912 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3913 break;
3914 case 17: /* fsito */
5500b06c 3915 gen_vfp_sito(dp, 0);
b7bcbe95 3916 break;
9ee6e8bb 3917 case 20: /* fshto */
d614a513
PM
3918 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3919 return 1;
3920 }
5500b06c 3921 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3922 break;
3923 case 21: /* fslto */
d614a513
PM
3924 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3925 return 1;
3926 }
5500b06c 3927 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3928 break;
3929 case 22: /* fuhto */
d614a513
PM
3930 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3931 return 1;
3932 }
5500b06c 3933 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3934 break;
3935 case 23: /* fulto */
d614a513
PM
3936 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3937 return 1;
3938 }
5500b06c 3939 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3940 break;
b7bcbe95 3941 case 24: /* ftoui */
5500b06c 3942 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3943 break;
3944 case 25: /* ftouiz */
5500b06c 3945 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3946 break;
3947 case 26: /* ftosi */
5500b06c 3948 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3949 break;
3950 case 27: /* ftosiz */
5500b06c 3951 gen_vfp_tosiz(dp, 0);
b7bcbe95 3952 break;
9ee6e8bb 3953 case 28: /* ftosh */
d614a513
PM
3954 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3955 return 1;
3956 }
5500b06c 3957 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3958 break;
3959 case 29: /* ftosl */
d614a513
PM
3960 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3961 return 1;
3962 }
5500b06c 3963 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3964 break;
3965 case 30: /* ftouh */
d614a513
PM
3966 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3967 return 1;
3968 }
5500b06c 3969 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3970 break;
3971 case 31: /* ftoul */
d614a513
PM
3972 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3973 return 1;
3974 }
5500b06c 3975 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3976 break;
b7bcbe95 3977 default: /* undefined */
b7bcbe95
FB
3978 return 1;
3979 }
3980 break;
3981 default: /* undefined */
b7bcbe95
FB
3982 return 1;
3983 }
3984
3985 /* Write back the result. */
239c20c7
WN
3986 if (op == 15 && (rn >= 8 && rn <= 11)) {
3987 /* Comparison, do nothing. */
3988 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3989 (rn & 0x1e) == 0x6)) {
3990 /* VCVT double to int: always integer result.
3991 * VCVT double to half precision is always a single
3992 * precision result.
3993 */
b7bcbe95 3994 gen_mov_vreg_F0(0, rd);
239c20c7 3995 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
3996 /* conversion */
3997 gen_mov_vreg_F0(!dp, rd);
239c20c7 3998 } else {
b7bcbe95 3999 gen_mov_vreg_F0(dp, rd);
239c20c7 4000 }
b7bcbe95
FB
4001
4002 /* break out of the loop if we have finished */
4003 if (veclen == 0)
4004 break;
4005
4006 if (op == 15 && delta_m == 0) {
4007 /* single source one-many */
4008 while (veclen--) {
4009 rd = ((rd + delta_d) & (bank_mask - 1))
4010 | (rd & bank_mask);
4011 gen_mov_vreg_F0(dp, rd);
4012 }
4013 break;
4014 }
4015 /* Setup the next operands. */
4016 veclen--;
4017 rd = ((rd + delta_d) & (bank_mask - 1))
4018 | (rd & bank_mask);
4019
4020 if (op == 15) {
4021 /* One source operand. */
4022 rm = ((rm + delta_m) & (bank_mask - 1))
4023 | (rm & bank_mask);
4024 gen_mov_F0_vreg(dp, rm);
4025 } else {
4026 /* Two source operands. */
4027 rn = ((rn + delta_d) & (bank_mask - 1))
4028 | (rn & bank_mask);
4029 gen_mov_F0_vreg(dp, rn);
4030 if (delta_m) {
4031 rm = ((rm + delta_m) & (bank_mask - 1))
4032 | (rm & bank_mask);
4033 gen_mov_F1_vreg(dp, rm);
4034 }
4035 }
4036 }
4037 }
4038 break;
4039 case 0xc:
4040 case 0xd:
8387da81 4041 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
4042 /* two-register transfer */
4043 rn = (insn >> 16) & 0xf;
4044 rd = (insn >> 12) & 0xf;
4045 if (dp) {
9ee6e8bb
PB
4046 VFP_DREG_M(rm, insn);
4047 } else {
4048 rm = VFP_SREG_M(insn);
4049 }
b7bcbe95 4050
18c9b560 4051 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
4052 /* vfp->arm */
4053 if (dp) {
4373f3ce
PB
4054 gen_mov_F0_vreg(0, rm * 2);
4055 tmp = gen_vfp_mrs();
4056 store_reg(s, rd, tmp);
4057 gen_mov_F0_vreg(0, rm * 2 + 1);
4058 tmp = gen_vfp_mrs();
4059 store_reg(s, rn, tmp);
b7bcbe95
FB
4060 } else {
4061 gen_mov_F0_vreg(0, rm);
4373f3ce 4062 tmp = gen_vfp_mrs();
8387da81 4063 store_reg(s, rd, tmp);
b7bcbe95 4064 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 4065 tmp = gen_vfp_mrs();
8387da81 4066 store_reg(s, rn, tmp);
b7bcbe95
FB
4067 }
4068 } else {
4069 /* arm->vfp */
4070 if (dp) {
4373f3ce
PB
4071 tmp = load_reg(s, rd);
4072 gen_vfp_msr(tmp);
4073 gen_mov_vreg_F0(0, rm * 2);
4074 tmp = load_reg(s, rn);
4075 gen_vfp_msr(tmp);
4076 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 4077 } else {
8387da81 4078 tmp = load_reg(s, rd);
4373f3ce 4079 gen_vfp_msr(tmp);
b7bcbe95 4080 gen_mov_vreg_F0(0, rm);
8387da81 4081 tmp = load_reg(s, rn);
4373f3ce 4082 gen_vfp_msr(tmp);
b7bcbe95
FB
4083 gen_mov_vreg_F0(0, rm + 1);
4084 }
4085 }
4086 } else {
4087 /* Load/store */
4088 rn = (insn >> 16) & 0xf;
4089 if (dp)
9ee6e8bb 4090 VFP_DREG_D(rd, insn);
b7bcbe95 4091 else
9ee6e8bb 4092 rd = VFP_SREG_D(insn);
b7bcbe95
FB
4093 if ((insn & 0x01200000) == 0x01000000) {
4094 /* Single load/store */
4095 offset = (insn & 0xff) << 2;
4096 if ((insn & (1 << 23)) == 0)
4097 offset = -offset;
934814f1
PM
4098 if (s->thumb && rn == 15) {
4099 /* This is actually UNPREDICTABLE */
4100 addr = tcg_temp_new_i32();
4101 tcg_gen_movi_i32(addr, s->pc & ~2);
4102 } else {
4103 addr = load_reg(s, rn);
4104 }
312eea9f 4105 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4106 if (insn & (1 << 20)) {
312eea9f 4107 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4108 gen_mov_vreg_F0(dp, rd);
4109 } else {
4110 gen_mov_F0_vreg(dp, rd);
312eea9f 4111 gen_vfp_st(s, dp, addr);
b7bcbe95 4112 }
7d1b0095 4113 tcg_temp_free_i32(addr);
b7bcbe95
FB
4114 } else {
4115 /* load/store multiple */
934814f1 4116 int w = insn & (1 << 21);
b7bcbe95
FB
4117 if (dp)
4118 n = (insn >> 1) & 0x7f;
4119 else
4120 n = insn & 0xff;
4121
934814f1
PM
4122 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
4123 /* P == U , W == 1 => UNDEF */
4124 return 1;
4125 }
4126 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
4127 /* UNPREDICTABLE cases for bad immediates: we choose to
4128 * UNDEF to avoid generating huge numbers of TCG ops
4129 */
4130 return 1;
4131 }
4132 if (rn == 15 && w) {
4133 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4134 return 1;
4135 }
4136
4137 if (s->thumb && rn == 15) {
4138 /* This is actually UNPREDICTABLE */
4139 addr = tcg_temp_new_i32();
4140 tcg_gen_movi_i32(addr, s->pc & ~2);
4141 } else {
4142 addr = load_reg(s, rn);
4143 }
b7bcbe95 4144 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 4145 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
4146
4147 if (dp)
4148 offset = 8;
4149 else
4150 offset = 4;
4151 for (i = 0; i < n; i++) {
18c9b560 4152 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 4153 /* load */
312eea9f 4154 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4155 gen_mov_vreg_F0(dp, rd + i);
4156 } else {
4157 /* store */
4158 gen_mov_F0_vreg(dp, rd + i);
312eea9f 4159 gen_vfp_st(s, dp, addr);
b7bcbe95 4160 }
312eea9f 4161 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4162 }
934814f1 4163 if (w) {
b7bcbe95
FB
4164 /* writeback */
4165 if (insn & (1 << 24))
4166 offset = -offset * n;
4167 else if (dp && (insn & 1))
4168 offset = 4;
4169 else
4170 offset = 0;
4171
4172 if (offset != 0)
312eea9f
FN
4173 tcg_gen_addi_i32(addr, addr, offset);
4174 store_reg(s, rn, addr);
4175 } else {
7d1b0095 4176 tcg_temp_free_i32(addr);
b7bcbe95
FB
4177 }
4178 }
4179 }
4180 break;
4181 default:
4182 /* Should never happen. */
4183 return 1;
4184 }
4185 return 0;
4186}
4187
90aa39a1 4188static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 4189{
90aa39a1 4190#ifndef CONFIG_USER_ONLY
dcba3a8d 4191 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
90aa39a1
SF
4192 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4193#else
4194 return true;
4195#endif
4196}
6e256c93 4197
8a6b28c7
EC
4198static void gen_goto_ptr(void)
4199{
7f11636d 4200 tcg_gen_lookup_and_goto_ptr();
8a6b28c7
EC
4201}
4202
4cae8f56
AB
4203/* This will end the TB but doesn't guarantee we'll return to
4204 * cpu_loop_exec. Any live exit_requests will be processed as we
4205 * enter the next TB.
4206 */
8a6b28c7 4207static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
90aa39a1
SF
4208{
4209 if (use_goto_tb(s, dest)) {
57fec1fe 4210 tcg_gen_goto_tb(n);
eaed129d 4211 gen_set_pc_im(s, dest);
dcba3a8d 4212 tcg_gen_exit_tb((uintptr_t)s->base.tb + n);
6e256c93 4213 } else {
eaed129d 4214 gen_set_pc_im(s, dest);
8a6b28c7 4215 gen_goto_ptr();
6e256c93 4216 }
dcba3a8d 4217 s->base.is_jmp = DISAS_NORETURN;
c53be334
FB
4218}
4219
8aaca4c0
FB
4220static inline void gen_jmp (DisasContext *s, uint32_t dest)
4221{
b636649f 4222 if (unlikely(is_singlestepping(s))) {
8aaca4c0 4223 /* An indirect jump so that we still trigger the debug exception. */
5899f386 4224 if (s->thumb)
d9ba4830
PB
4225 dest |= 1;
4226 gen_bx_im(s, dest);
8aaca4c0 4227 } else {
6e256c93 4228 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
4229 }
4230}
4231
39d5492a 4232static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 4233{
ee097184 4234 if (x)
d9ba4830 4235 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 4236 else
d9ba4830 4237 gen_sxth(t0);
ee097184 4238 if (y)
d9ba4830 4239 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 4240 else
d9ba4830
PB
4241 gen_sxth(t1);
4242 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
4243}
4244
4245/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
4246static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4247{
b5ff1b31
FB
4248 uint32_t mask;
4249
4250 mask = 0;
4251 if (flags & (1 << 0))
4252 mask |= 0xff;
4253 if (flags & (1 << 1))
4254 mask |= 0xff00;
4255 if (flags & (1 << 2))
4256 mask |= 0xff0000;
4257 if (flags & (1 << 3))
4258 mask |= 0xff000000;
9ee6e8bb 4259
2ae23e75 4260 /* Mask out undefined bits. */
9ee6e8bb 4261 mask &= ~CPSR_RESERVED;
d614a513 4262 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4263 mask &= ~CPSR_T;
d614a513
PM
4264 }
4265 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4266 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4267 }
4268 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4269 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4270 }
4271 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4272 mask &= ~CPSR_IT;
d614a513 4273 }
4051e12c
PM
4274 /* Mask out execution state and reserved bits. */
4275 if (!spsr) {
4276 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4277 }
b5ff1b31
FB
4278 /* Mask out privileged bits. */
4279 if (IS_USER(s))
9ee6e8bb 4280 mask &= CPSR_USER;
b5ff1b31
FB
4281 return mask;
4282}
4283
2fbac54b 4284/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4285static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4286{
39d5492a 4287 TCGv_i32 tmp;
b5ff1b31
FB
4288 if (spsr) {
4289 /* ??? This is also undefined in system mode. */
4290 if (IS_USER(s))
4291 return 1;
d9ba4830
PB
4292
4293 tmp = load_cpu_field(spsr);
4294 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4295 tcg_gen_andi_i32(t0, t0, mask);
4296 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4297 store_cpu_field(tmp, spsr);
b5ff1b31 4298 } else {
2fbac54b 4299 gen_set_cpsr(t0, mask);
b5ff1b31 4300 }
7d1b0095 4301 tcg_temp_free_i32(t0);
b5ff1b31
FB
4302 gen_lookup_tb(s);
4303 return 0;
4304}
4305
2fbac54b
FN
4306/* Returns nonzero if access to the PSR is not permitted. */
4307static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4308{
39d5492a 4309 TCGv_i32 tmp;
7d1b0095 4310 tmp = tcg_temp_new_i32();
2fbac54b
FN
4311 tcg_gen_movi_i32(tmp, val);
4312 return gen_set_psr(s, mask, spsr, tmp);
4313}
4314
8bfd0550
PM
4315static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4316 int *tgtmode, int *regno)
4317{
4318 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4319 * the target mode and register number, and identify the various
4320 * unpredictable cases.
4321 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4322 * + executed in user mode
4323 * + using R15 as the src/dest register
4324 * + accessing an unimplemented register
4325 * + accessing a register that's inaccessible at current PL/security state*
4326 * + accessing a register that you could access with a different insn
4327 * We choose to UNDEF in all these cases.
4328 * Since we don't know which of the various AArch32 modes we are in
4329 * we have to defer some checks to runtime.
4330 * Accesses to Monitor mode registers from Secure EL1 (which implies
4331 * that EL3 is AArch64) must trap to EL3.
4332 *
4333 * If the access checks fail this function will emit code to take
4334 * an exception and return false. Otherwise it will return true,
4335 * and set *tgtmode and *regno appropriately.
4336 */
4337 int exc_target = default_exception_el(s);
4338
4339 /* These instructions are present only in ARMv8, or in ARMv7 with the
4340 * Virtualization Extensions.
4341 */
4342 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4343 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4344 goto undef;
4345 }
4346
4347 if (IS_USER(s) || rn == 15) {
4348 goto undef;
4349 }
4350
4351 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4352 * of registers into (r, sysm).
4353 */
4354 if (r) {
4355 /* SPSRs for other modes */
4356 switch (sysm) {
4357 case 0xe: /* SPSR_fiq */
4358 *tgtmode = ARM_CPU_MODE_FIQ;
4359 break;
4360 case 0x10: /* SPSR_irq */
4361 *tgtmode = ARM_CPU_MODE_IRQ;
4362 break;
4363 case 0x12: /* SPSR_svc */
4364 *tgtmode = ARM_CPU_MODE_SVC;
4365 break;
4366 case 0x14: /* SPSR_abt */
4367 *tgtmode = ARM_CPU_MODE_ABT;
4368 break;
4369 case 0x16: /* SPSR_und */
4370 *tgtmode = ARM_CPU_MODE_UND;
4371 break;
4372 case 0x1c: /* SPSR_mon */
4373 *tgtmode = ARM_CPU_MODE_MON;
4374 break;
4375 case 0x1e: /* SPSR_hyp */
4376 *tgtmode = ARM_CPU_MODE_HYP;
4377 break;
4378 default: /* unallocated */
4379 goto undef;
4380 }
4381 /* We arbitrarily assign SPSR a register number of 16. */
4382 *regno = 16;
4383 } else {
4384 /* general purpose registers for other modes */
4385 switch (sysm) {
4386 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4387 *tgtmode = ARM_CPU_MODE_USR;
4388 *regno = sysm + 8;
4389 break;
4390 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4391 *tgtmode = ARM_CPU_MODE_FIQ;
4392 *regno = sysm;
4393 break;
4394 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4395 *tgtmode = ARM_CPU_MODE_IRQ;
4396 *regno = sysm & 1 ? 13 : 14;
4397 break;
4398 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4399 *tgtmode = ARM_CPU_MODE_SVC;
4400 *regno = sysm & 1 ? 13 : 14;
4401 break;
4402 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4403 *tgtmode = ARM_CPU_MODE_ABT;
4404 *regno = sysm & 1 ? 13 : 14;
4405 break;
4406 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4407 *tgtmode = ARM_CPU_MODE_UND;
4408 *regno = sysm & 1 ? 13 : 14;
4409 break;
4410 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4411 *tgtmode = ARM_CPU_MODE_MON;
4412 *regno = sysm & 1 ? 13 : 14;
4413 break;
4414 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4415 *tgtmode = ARM_CPU_MODE_HYP;
4416 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4417 *regno = sysm & 1 ? 13 : 17;
4418 break;
4419 default: /* unallocated */
4420 goto undef;
4421 }
4422 }
4423
4424 /* Catch the 'accessing inaccessible register' cases we can detect
4425 * at translate time.
4426 */
4427 switch (*tgtmode) {
4428 case ARM_CPU_MODE_MON:
4429 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4430 goto undef;
4431 }
4432 if (s->current_el == 1) {
4433 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4434 * then accesses to Mon registers trap to EL3
4435 */
4436 exc_target = 3;
4437 goto undef;
4438 }
4439 break;
4440 case ARM_CPU_MODE_HYP:
4441 /* Note that we can forbid accesses from EL2 here because they
4442 * must be from Hyp mode itself
4443 */
4444 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 3) {
4445 goto undef;
4446 }
4447 break;
4448 default:
4449 break;
4450 }
4451
4452 return true;
4453
4454undef:
4455 /* If we get here then some access check did not pass */
4456 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4457 return false;
4458}
4459
4460static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4461{
4462 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4463 int tgtmode = 0, regno = 0;
4464
4465 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4466 return;
4467 }
4468
4469 /* Sync state because msr_banked() can raise exceptions */
4470 gen_set_condexec(s);
4471 gen_set_pc_im(s, s->pc - 4);
4472 tcg_reg = load_reg(s, rn);
4473 tcg_tgtmode = tcg_const_i32(tgtmode);
4474 tcg_regno = tcg_const_i32(regno);
4475 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4476 tcg_temp_free_i32(tcg_tgtmode);
4477 tcg_temp_free_i32(tcg_regno);
4478 tcg_temp_free_i32(tcg_reg);
dcba3a8d 4479 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4480}
4481
4482static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4483{
4484 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4485 int tgtmode = 0, regno = 0;
4486
4487 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4488 return;
4489 }
4490
4491 /* Sync state because mrs_banked() can raise exceptions */
4492 gen_set_condexec(s);
4493 gen_set_pc_im(s, s->pc - 4);
4494 tcg_reg = tcg_temp_new_i32();
4495 tcg_tgtmode = tcg_const_i32(tgtmode);
4496 tcg_regno = tcg_const_i32(regno);
4497 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4498 tcg_temp_free_i32(tcg_tgtmode);
4499 tcg_temp_free_i32(tcg_regno);
4500 store_reg(s, rn, tcg_reg);
dcba3a8d 4501 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4502}
4503
fb0e8e79
PM
4504/* Store value to PC as for an exception return (ie don't
4505 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4506 * will do the masking based on the new value of the Thumb bit.
4507 */
4508static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4509{
fb0e8e79
PM
4510 tcg_gen_mov_i32(cpu_R[15], pc);
4511 tcg_temp_free_i32(pc);
b5ff1b31
FB
4512}
4513
b0109805 4514/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4515static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4516{
fb0e8e79
PM
4517 store_pc_exc_ret(s, pc);
4518 /* The cpsr_write_eret helper will mask the low bits of PC
4519 * appropriately depending on the new Thumb bit, so it must
4520 * be called after storing the new PC.
4521 */
235ea1f5 4522 gen_helper_cpsr_write_eret(cpu_env, cpsr);
7d1b0095 4523 tcg_temp_free_i32(cpsr);
b29fd33d 4524 /* Must exit loop to check un-masked IRQs */
dcba3a8d 4525 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb 4526}
3b46e624 4527
fb0e8e79
PM
4528/* Generate an old-style exception return. Marks pc as dead. */
4529static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4530{
4531 gen_rfe(s, pc, load_cpu_field(spsr));
4532}
4533
c22edfeb
AB
4534/*
4535 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4536 * only call the helper when running single threaded TCG code to ensure
4537 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4538 * just skip this instruction. Currently the SEV/SEVL instructions
4539 * which are *one* of many ways to wake the CPU from WFE are not
4540 * implemented so we can't sleep like WFI does.
4541 */
9ee6e8bb
PB
4542static void gen_nop_hint(DisasContext *s, int val)
4543{
4544 switch (val) {
2399d4e7
EC
4545 /* When running in MTTCG we don't generate jumps to the yield and
4546 * WFE helpers as it won't affect the scheduling of other vCPUs.
4547 * If we wanted to more completely model WFE/SEV so we don't busy
4548 * spin unnecessarily we would need to do something more involved.
4549 */
c87e5a61 4550 case 1: /* yield */
2399d4e7 4551 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4552 gen_set_pc_im(s, s->pc);
dcba3a8d 4553 s->base.is_jmp = DISAS_YIELD;
c22edfeb 4554 }
c87e5a61 4555 break;
9ee6e8bb 4556 case 3: /* wfi */
eaed129d 4557 gen_set_pc_im(s, s->pc);
dcba3a8d 4558 s->base.is_jmp = DISAS_WFI;
9ee6e8bb
PB
4559 break;
4560 case 2: /* wfe */
2399d4e7 4561 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4562 gen_set_pc_im(s, s->pc);
dcba3a8d 4563 s->base.is_jmp = DISAS_WFE;
c22edfeb 4564 }
72c1d3af 4565 break;
9ee6e8bb 4566 case 4: /* sev */
12b10571
MR
4567 case 5: /* sevl */
4568 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4569 default: /* nop */
4570 break;
4571 }
4572}
99c475ab 4573
ad69471c 4574#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4575
39d5492a 4576static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4577{
4578 switch (size) {
dd8fbd78
FN
4579 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4580 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4581 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4582 default: abort();
9ee6e8bb 4583 }
9ee6e8bb
PB
4584}
4585
39d5492a 4586static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4587{
4588 switch (size) {
dd8fbd78
FN
4589 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4590 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4591 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4592 default: return;
4593 }
4594}
4595
4596/* 32-bit pairwise ops end up the same as the elementwise versions. */
4597#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4598#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4599#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4600#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4601
ad69471c
PB
4602#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4603 switch ((size << 1) | u) { \
4604 case 0: \
dd8fbd78 4605 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4606 break; \
4607 case 1: \
dd8fbd78 4608 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4609 break; \
4610 case 2: \
dd8fbd78 4611 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4612 break; \
4613 case 3: \
dd8fbd78 4614 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4615 break; \
4616 case 4: \
dd8fbd78 4617 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4618 break; \
4619 case 5: \
dd8fbd78 4620 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4621 break; \
4622 default: return 1; \
4623 }} while (0)
9ee6e8bb
PB
4624
4625#define GEN_NEON_INTEGER_OP(name) do { \
4626 switch ((size << 1) | u) { \
ad69471c 4627 case 0: \
dd8fbd78 4628 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4629 break; \
4630 case 1: \
dd8fbd78 4631 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4632 break; \
4633 case 2: \
dd8fbd78 4634 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4635 break; \
4636 case 3: \
dd8fbd78 4637 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4638 break; \
4639 case 4: \
dd8fbd78 4640 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4641 break; \
4642 case 5: \
dd8fbd78 4643 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4644 break; \
9ee6e8bb
PB
4645 default: return 1; \
4646 }} while (0)
4647
39d5492a 4648static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4649{
39d5492a 4650 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4651 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4652 return tmp;
9ee6e8bb
PB
4653}
4654
39d5492a 4655static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4656{
dd8fbd78 4657 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4658 tcg_temp_free_i32(var);
9ee6e8bb
PB
4659}
4660
39d5492a 4661static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4662{
39d5492a 4663 TCGv_i32 tmp;
9ee6e8bb 4664 if (size == 1) {
0fad6efc
PM
4665 tmp = neon_load_reg(reg & 7, reg >> 4);
4666 if (reg & 8) {
dd8fbd78 4667 gen_neon_dup_high16(tmp);
0fad6efc
PM
4668 } else {
4669 gen_neon_dup_low16(tmp);
dd8fbd78 4670 }
0fad6efc
PM
4671 } else {
4672 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4673 }
dd8fbd78 4674 return tmp;
9ee6e8bb
PB
4675}
4676
02acedf9 4677static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4678{
39d5492a 4679 TCGv_i32 tmp, tmp2;
600b828c 4680 if (!q && size == 2) {
02acedf9
PM
4681 return 1;
4682 }
4683 tmp = tcg_const_i32(rd);
4684 tmp2 = tcg_const_i32(rm);
4685 if (q) {
4686 switch (size) {
4687 case 0:
02da0b2d 4688 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4689 break;
4690 case 1:
02da0b2d 4691 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4692 break;
4693 case 2:
02da0b2d 4694 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
4695 break;
4696 default:
4697 abort();
4698 }
4699 } else {
4700 switch (size) {
4701 case 0:
02da0b2d 4702 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4703 break;
4704 case 1:
02da0b2d 4705 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4706 break;
4707 default:
4708 abort();
4709 }
4710 }
4711 tcg_temp_free_i32(tmp);
4712 tcg_temp_free_i32(tmp2);
4713 return 0;
19457615
FN
4714}
4715
d68a6f3a 4716static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4717{
39d5492a 4718 TCGv_i32 tmp, tmp2;
600b828c 4719 if (!q && size == 2) {
d68a6f3a
PM
4720 return 1;
4721 }
4722 tmp = tcg_const_i32(rd);
4723 tmp2 = tcg_const_i32(rm);
4724 if (q) {
4725 switch (size) {
4726 case 0:
02da0b2d 4727 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4728 break;
4729 case 1:
02da0b2d 4730 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4731 break;
4732 case 2:
02da0b2d 4733 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
4734 break;
4735 default:
4736 abort();
4737 }
4738 } else {
4739 switch (size) {
4740 case 0:
02da0b2d 4741 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4742 break;
4743 case 1:
02da0b2d 4744 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4745 break;
4746 default:
4747 abort();
4748 }
4749 }
4750 tcg_temp_free_i32(tmp);
4751 tcg_temp_free_i32(tmp2);
4752 return 0;
19457615
FN
4753}
4754
39d5492a 4755static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4756{
39d5492a 4757 TCGv_i32 rd, tmp;
19457615 4758
7d1b0095
PM
4759 rd = tcg_temp_new_i32();
4760 tmp = tcg_temp_new_i32();
19457615
FN
4761
4762 tcg_gen_shli_i32(rd, t0, 8);
4763 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4764 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4765 tcg_gen_or_i32(rd, rd, tmp);
4766
4767 tcg_gen_shri_i32(t1, t1, 8);
4768 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4769 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4770 tcg_gen_or_i32(t1, t1, tmp);
4771 tcg_gen_mov_i32(t0, rd);
4772
7d1b0095
PM
4773 tcg_temp_free_i32(tmp);
4774 tcg_temp_free_i32(rd);
19457615
FN
4775}
4776
39d5492a 4777static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4778{
39d5492a 4779 TCGv_i32 rd, tmp;
19457615 4780
7d1b0095
PM
4781 rd = tcg_temp_new_i32();
4782 tmp = tcg_temp_new_i32();
19457615
FN
4783
4784 tcg_gen_shli_i32(rd, t0, 16);
4785 tcg_gen_andi_i32(tmp, t1, 0xffff);
4786 tcg_gen_or_i32(rd, rd, tmp);
4787 tcg_gen_shri_i32(t1, t1, 16);
4788 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4789 tcg_gen_or_i32(t1, t1, tmp);
4790 tcg_gen_mov_i32(t0, rd);
4791
7d1b0095
PM
4792 tcg_temp_free_i32(tmp);
4793 tcg_temp_free_i32(rd);
19457615
FN
4794}
4795
4796
9ee6e8bb
PB
4797static struct {
4798 int nregs;
4799 int interleave;
4800 int spacing;
4801} neon_ls_element_type[11] = {
4802 {4, 4, 1},
4803 {4, 4, 2},
4804 {4, 1, 1},
4805 {4, 2, 1},
4806 {3, 3, 1},
4807 {3, 3, 2},
4808 {3, 1, 1},
4809 {1, 1, 1},
4810 {2, 2, 1},
4811 {2, 2, 2},
4812 {2, 1, 1}
4813};
4814
4815/* Translate a NEON load/store element instruction. Return nonzero if the
4816 instruction is invalid. */
7dcc1f89 4817static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4818{
4819 int rd, rn, rm;
4820 int op;
4821 int nregs;
4822 int interleave;
84496233 4823 int spacing;
9ee6e8bb
PB
4824 int stride;
4825 int size;
4826 int reg;
4827 int pass;
4828 int load;
4829 int shift;
9ee6e8bb 4830 int n;
39d5492a
PM
4831 TCGv_i32 addr;
4832 TCGv_i32 tmp;
4833 TCGv_i32 tmp2;
84496233 4834 TCGv_i64 tmp64;
9ee6e8bb 4835
2c7ffc41
PM
4836 /* FIXME: this access check should not take precedence over UNDEF
4837 * for invalid encodings; we will generate incorrect syndrome information
4838 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4839 */
9dbbc748 4840 if (s->fp_excp_el) {
2c7ffc41 4841 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 4842 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
4843 return 0;
4844 }
4845
5df8bac1 4846 if (!s->vfp_enabled)
9ee6e8bb
PB
4847 return 1;
4848 VFP_DREG_D(rd, insn);
4849 rn = (insn >> 16) & 0xf;
4850 rm = insn & 0xf;
4851 load = (insn & (1 << 21)) != 0;
4852 if ((insn & (1 << 23)) == 0) {
4853 /* Load store all elements. */
4854 op = (insn >> 8) & 0xf;
4855 size = (insn >> 6) & 3;
84496233 4856 if (op > 10)
9ee6e8bb 4857 return 1;
f2dd89d0
PM
4858 /* Catch UNDEF cases for bad values of align field */
4859 switch (op & 0xc) {
4860 case 4:
4861 if (((insn >> 5) & 1) == 1) {
4862 return 1;
4863 }
4864 break;
4865 case 8:
4866 if (((insn >> 4) & 3) == 3) {
4867 return 1;
4868 }
4869 break;
4870 default:
4871 break;
4872 }
9ee6e8bb
PB
4873 nregs = neon_ls_element_type[op].nregs;
4874 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4875 spacing = neon_ls_element_type[op].spacing;
4876 if (size == 3 && (interleave | spacing) != 1)
4877 return 1;
e318a60b 4878 addr = tcg_temp_new_i32();
dcc65026 4879 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4880 stride = (1 << size) * interleave;
4881 for (reg = 0; reg < nregs; reg++) {
4882 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4883 load_reg_var(s, addr, rn);
4884 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4885 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4886 load_reg_var(s, addr, rn);
4887 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4888 }
84496233 4889 if (size == 3) {
8ed1237d 4890 tmp64 = tcg_temp_new_i64();
84496233 4891 if (load) {
12dcc321 4892 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
84496233 4893 neon_store_reg64(tmp64, rd);
84496233 4894 } else {
84496233 4895 neon_load_reg64(tmp64, rd);
12dcc321 4896 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
84496233 4897 }
8ed1237d 4898 tcg_temp_free_i64(tmp64);
84496233
JR
4899 tcg_gen_addi_i32(addr, addr, stride);
4900 } else {
4901 for (pass = 0; pass < 2; pass++) {
4902 if (size == 2) {
4903 if (load) {
58ab8e96 4904 tmp = tcg_temp_new_i32();
12dcc321 4905 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
84496233
JR
4906 neon_store_reg(rd, pass, tmp);
4907 } else {
4908 tmp = neon_load_reg(rd, pass);
12dcc321 4909 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
58ab8e96 4910 tcg_temp_free_i32(tmp);
84496233 4911 }
1b2b1e54 4912 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4913 } else if (size == 1) {
4914 if (load) {
58ab8e96 4915 tmp = tcg_temp_new_i32();
12dcc321 4916 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
84496233 4917 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4918 tmp2 = tcg_temp_new_i32();
12dcc321 4919 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
84496233 4920 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4921 tcg_gen_shli_i32(tmp2, tmp2, 16);
4922 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4923 tcg_temp_free_i32(tmp2);
84496233
JR
4924 neon_store_reg(rd, pass, tmp);
4925 } else {
4926 tmp = neon_load_reg(rd, pass);
7d1b0095 4927 tmp2 = tcg_temp_new_i32();
84496233 4928 tcg_gen_shri_i32(tmp2, tmp, 16);
12dcc321 4929 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
58ab8e96 4930 tcg_temp_free_i32(tmp);
84496233 4931 tcg_gen_addi_i32(addr, addr, stride);
12dcc321 4932 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
58ab8e96 4933 tcg_temp_free_i32(tmp2);
1b2b1e54 4934 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4935 }
84496233
JR
4936 } else /* size == 0 */ {
4937 if (load) {
39d5492a 4938 TCGV_UNUSED_I32(tmp2);
84496233 4939 for (n = 0; n < 4; n++) {
58ab8e96 4940 tmp = tcg_temp_new_i32();
12dcc321 4941 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
84496233
JR
4942 tcg_gen_addi_i32(addr, addr, stride);
4943 if (n == 0) {
4944 tmp2 = tmp;
4945 } else {
41ba8341
PB
4946 tcg_gen_shli_i32(tmp, tmp, n * 8);
4947 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4948 tcg_temp_free_i32(tmp);
84496233 4949 }
9ee6e8bb 4950 }
84496233
JR
4951 neon_store_reg(rd, pass, tmp2);
4952 } else {
4953 tmp2 = neon_load_reg(rd, pass);
4954 for (n = 0; n < 4; n++) {
7d1b0095 4955 tmp = tcg_temp_new_i32();
84496233
JR
4956 if (n == 0) {
4957 tcg_gen_mov_i32(tmp, tmp2);
4958 } else {
4959 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4960 }
12dcc321 4961 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
58ab8e96 4962 tcg_temp_free_i32(tmp);
84496233
JR
4963 tcg_gen_addi_i32(addr, addr, stride);
4964 }
7d1b0095 4965 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4966 }
4967 }
4968 }
4969 }
84496233 4970 rd += spacing;
9ee6e8bb 4971 }
e318a60b 4972 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4973 stride = nregs * 8;
4974 } else {
4975 size = (insn >> 10) & 3;
4976 if (size == 3) {
4977 /* Load single element to all lanes. */
8e18cde3
PM
4978 int a = (insn >> 4) & 1;
4979 if (!load) {
9ee6e8bb 4980 return 1;
8e18cde3 4981 }
9ee6e8bb
PB
4982 size = (insn >> 6) & 3;
4983 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4984
4985 if (size == 3) {
4986 if (nregs != 4 || a == 0) {
9ee6e8bb 4987 return 1;
99c475ab 4988 }
8e18cde3
PM
4989 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4990 size = 2;
4991 }
4992 if (nregs == 1 && a == 1 && size == 0) {
4993 return 1;
4994 }
4995 if (nregs == 3 && a == 1) {
4996 return 1;
4997 }
e318a60b 4998 addr = tcg_temp_new_i32();
8e18cde3
PM
4999 load_reg_var(s, addr, rn);
5000 if (nregs == 1) {
5001 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
5002 tmp = gen_load_and_replicate(s, addr, size);
5003 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
5004 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
5005 if (insn & (1 << 5)) {
5006 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
5007 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
5008 }
5009 tcg_temp_free_i32(tmp);
5010 } else {
5011 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
5012 stride = (insn & (1 << 5)) ? 2 : 1;
5013 for (reg = 0; reg < nregs; reg++) {
5014 tmp = gen_load_and_replicate(s, addr, size);
5015 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
5016 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
5017 tcg_temp_free_i32(tmp);
5018 tcg_gen_addi_i32(addr, addr, 1 << size);
5019 rd += stride;
5020 }
9ee6e8bb 5021 }
e318a60b 5022 tcg_temp_free_i32(addr);
9ee6e8bb
PB
5023 stride = (1 << size) * nregs;
5024 } else {
5025 /* Single element. */
93262b16 5026 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
5027 pass = (insn >> 7) & 1;
5028 switch (size) {
5029 case 0:
5030 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
5031 stride = 1;
5032 break;
5033 case 1:
5034 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
5035 stride = (insn & (1 << 5)) ? 2 : 1;
5036 break;
5037 case 2:
5038 shift = 0;
9ee6e8bb
PB
5039 stride = (insn & (1 << 6)) ? 2 : 1;
5040 break;
5041 default:
5042 abort();
5043 }
5044 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
5045 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
5046 switch (nregs) {
5047 case 1:
5048 if (((idx & (1 << size)) != 0) ||
5049 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
5050 return 1;
5051 }
5052 break;
5053 case 3:
5054 if ((idx & 1) != 0) {
5055 return 1;
5056 }
5057 /* fall through */
5058 case 2:
5059 if (size == 2 && (idx & 2) != 0) {
5060 return 1;
5061 }
5062 break;
5063 case 4:
5064 if ((size == 2) && ((idx & 3) == 3)) {
5065 return 1;
5066 }
5067 break;
5068 default:
5069 abort();
5070 }
5071 if ((rd + stride * (nregs - 1)) > 31) {
5072 /* Attempts to write off the end of the register file
5073 * are UNPREDICTABLE; we choose to UNDEF because otherwise
5074 * the neon_load_reg() would write off the end of the array.
5075 */
5076 return 1;
5077 }
e318a60b 5078 addr = tcg_temp_new_i32();
dcc65026 5079 load_reg_var(s, addr, rn);
9ee6e8bb
PB
5080 for (reg = 0; reg < nregs; reg++) {
5081 if (load) {
58ab8e96 5082 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
5083 switch (size) {
5084 case 0:
12dcc321 5085 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5086 break;
5087 case 1:
12dcc321 5088 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5089 break;
5090 case 2:
12dcc321 5091 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5092 break;
a50f5b91
PB
5093 default: /* Avoid compiler warnings. */
5094 abort();
9ee6e8bb
PB
5095 }
5096 if (size != 2) {
8f8e3aa4 5097 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
5098 tcg_gen_deposit_i32(tmp, tmp2, tmp,
5099 shift, size ? 16 : 8);
7d1b0095 5100 tcg_temp_free_i32(tmp2);
9ee6e8bb 5101 }
8f8e3aa4 5102 neon_store_reg(rd, pass, tmp);
9ee6e8bb 5103 } else { /* Store */
8f8e3aa4
PB
5104 tmp = neon_load_reg(rd, pass);
5105 if (shift)
5106 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
5107 switch (size) {
5108 case 0:
12dcc321 5109 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5110 break;
5111 case 1:
12dcc321 5112 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5113 break;
5114 case 2:
12dcc321 5115 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5116 break;
99c475ab 5117 }
58ab8e96 5118 tcg_temp_free_i32(tmp);
99c475ab 5119 }
9ee6e8bb 5120 rd += stride;
1b2b1e54 5121 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 5122 }
e318a60b 5123 tcg_temp_free_i32(addr);
9ee6e8bb 5124 stride = nregs * (1 << size);
99c475ab 5125 }
9ee6e8bb
PB
5126 }
5127 if (rm != 15) {
39d5492a 5128 TCGv_i32 base;
b26eefb6
PB
5129
5130 base = load_reg(s, rn);
9ee6e8bb 5131 if (rm == 13) {
b26eefb6 5132 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 5133 } else {
39d5492a 5134 TCGv_i32 index;
b26eefb6
PB
5135 index = load_reg(s, rm);
5136 tcg_gen_add_i32(base, base, index);
7d1b0095 5137 tcg_temp_free_i32(index);
9ee6e8bb 5138 }
b26eefb6 5139 store_reg(s, rn, base);
9ee6e8bb
PB
5140 }
5141 return 0;
5142}
3b46e624 5143
8f8e3aa4 5144/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 5145static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
5146{
5147 tcg_gen_and_i32(t, t, c);
f669df27 5148 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
5149 tcg_gen_or_i32(dest, t, f);
5150}
5151
39d5492a 5152static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5153{
5154 switch (size) {
5155 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5156 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 5157 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
5158 default: abort();
5159 }
5160}
5161
39d5492a 5162static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5163{
5164 switch (size) {
02da0b2d
PM
5165 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5166 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5167 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
5168 default: abort();
5169 }
5170}
5171
39d5492a 5172static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5173{
5174 switch (size) {
02da0b2d
PM
5175 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5176 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5177 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
5178 default: abort();
5179 }
5180}
5181
39d5492a 5182static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
5183{
5184 switch (size) {
02da0b2d
PM
5185 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5186 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5187 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
5188 default: abort();
5189 }
5190}
5191
39d5492a 5192static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
5193 int q, int u)
5194{
5195 if (q) {
5196 if (u) {
5197 switch (size) {
5198 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5199 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5200 default: abort();
5201 }
5202 } else {
5203 switch (size) {
5204 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5205 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5206 default: abort();
5207 }
5208 }
5209 } else {
5210 if (u) {
5211 switch (size) {
b408a9b0
CL
5212 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5213 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
5214 default: abort();
5215 }
5216 } else {
5217 switch (size) {
5218 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5219 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5220 default: abort();
5221 }
5222 }
5223 }
5224}
5225
39d5492a 5226static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
5227{
5228 if (u) {
5229 switch (size) {
5230 case 0: gen_helper_neon_widen_u8(dest, src); break;
5231 case 1: gen_helper_neon_widen_u16(dest, src); break;
5232 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5233 default: abort();
5234 }
5235 } else {
5236 switch (size) {
5237 case 0: gen_helper_neon_widen_s8(dest, src); break;
5238 case 1: gen_helper_neon_widen_s16(dest, src); break;
5239 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5240 default: abort();
5241 }
5242 }
7d1b0095 5243 tcg_temp_free_i32(src);
ad69471c
PB
5244}
5245
5246static inline void gen_neon_addl(int size)
5247{
5248 switch (size) {
5249 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5250 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5251 case 2: tcg_gen_add_i64(CPU_V001); break;
5252 default: abort();
5253 }
5254}
5255
5256static inline void gen_neon_subl(int size)
5257{
5258 switch (size) {
5259 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5260 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5261 case 2: tcg_gen_sub_i64(CPU_V001); break;
5262 default: abort();
5263 }
5264}
5265
a7812ae4 5266static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
5267{
5268 switch (size) {
5269 case 0: gen_helper_neon_negl_u16(var, var); break;
5270 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
5271 case 2:
5272 tcg_gen_neg_i64(var, var);
5273 break;
ad69471c
PB
5274 default: abort();
5275 }
5276}
5277
a7812ae4 5278static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
5279{
5280 switch (size) {
02da0b2d
PM
5281 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5282 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
5283 default: abort();
5284 }
5285}
5286
39d5492a
PM
5287static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5288 int size, int u)
ad69471c 5289{
a7812ae4 5290 TCGv_i64 tmp;
ad69471c
PB
5291
5292 switch ((size << 1) | u) {
5293 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5294 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5295 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5296 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5297 case 4:
5298 tmp = gen_muls_i64_i32(a, b);
5299 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5300 tcg_temp_free_i64(tmp);
ad69471c
PB
5301 break;
5302 case 5:
5303 tmp = gen_mulu_i64_i32(a, b);
5304 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5305 tcg_temp_free_i64(tmp);
ad69471c
PB
5306 break;
5307 default: abort();
5308 }
c6067f04
CL
5309
5310 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5311 Don't forget to clean them now. */
5312 if (size < 2) {
7d1b0095
PM
5313 tcg_temp_free_i32(a);
5314 tcg_temp_free_i32(b);
c6067f04 5315 }
ad69471c
PB
5316}
5317
39d5492a
PM
5318static void gen_neon_narrow_op(int op, int u, int size,
5319 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
5320{
5321 if (op) {
5322 if (u) {
5323 gen_neon_unarrow_sats(size, dest, src);
5324 } else {
5325 gen_neon_narrow(size, dest, src);
5326 }
5327 } else {
5328 if (u) {
5329 gen_neon_narrow_satu(size, dest, src);
5330 } else {
5331 gen_neon_narrow_sats(size, dest, src);
5332 }
5333 }
5334}
5335
62698be3
PM
5336/* Symbolic constants for op fields for Neon 3-register same-length.
5337 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5338 * table A7-9.
5339 */
5340#define NEON_3R_VHADD 0
5341#define NEON_3R_VQADD 1
5342#define NEON_3R_VRHADD 2
5343#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5344#define NEON_3R_VHSUB 4
5345#define NEON_3R_VQSUB 5
5346#define NEON_3R_VCGT 6
5347#define NEON_3R_VCGE 7
5348#define NEON_3R_VSHL 8
5349#define NEON_3R_VQSHL 9
5350#define NEON_3R_VRSHL 10
5351#define NEON_3R_VQRSHL 11
5352#define NEON_3R_VMAX 12
5353#define NEON_3R_VMIN 13
5354#define NEON_3R_VABD 14
5355#define NEON_3R_VABA 15
5356#define NEON_3R_VADD_VSUB 16
5357#define NEON_3R_VTST_VCEQ 17
5358#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5359#define NEON_3R_VMUL 19
5360#define NEON_3R_VPMAX 20
5361#define NEON_3R_VPMIN 21
5362#define NEON_3R_VQDMULH_VQRDMULH 22
5363#define NEON_3R_VPADD 23
f1ecb913 5364#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
da97f52c 5365#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
5366#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5367#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5368#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5369#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5370#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 5371#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
5372
5373static const uint8_t neon_3r_sizes[] = {
5374 [NEON_3R_VHADD] = 0x7,
5375 [NEON_3R_VQADD] = 0xf,
5376 [NEON_3R_VRHADD] = 0x7,
5377 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5378 [NEON_3R_VHSUB] = 0x7,
5379 [NEON_3R_VQSUB] = 0xf,
5380 [NEON_3R_VCGT] = 0x7,
5381 [NEON_3R_VCGE] = 0x7,
5382 [NEON_3R_VSHL] = 0xf,
5383 [NEON_3R_VQSHL] = 0xf,
5384 [NEON_3R_VRSHL] = 0xf,
5385 [NEON_3R_VQRSHL] = 0xf,
5386 [NEON_3R_VMAX] = 0x7,
5387 [NEON_3R_VMIN] = 0x7,
5388 [NEON_3R_VABD] = 0x7,
5389 [NEON_3R_VABA] = 0x7,
5390 [NEON_3R_VADD_VSUB] = 0xf,
5391 [NEON_3R_VTST_VCEQ] = 0x7,
5392 [NEON_3R_VML] = 0x7,
5393 [NEON_3R_VMUL] = 0x7,
5394 [NEON_3R_VPMAX] = 0x7,
5395 [NEON_3R_VPMIN] = 0x7,
5396 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
5397 [NEON_3R_VPADD] = 0x7,
f1ecb913 5398 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
da97f52c 5399 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5400 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5401 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5402 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5403 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5404 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 5405 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5406};
5407
600b828c
PM
5408/* Symbolic constants for op fields for Neon 2-register miscellaneous.
5409 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5410 * table A7-13.
5411 */
5412#define NEON_2RM_VREV64 0
5413#define NEON_2RM_VREV32 1
5414#define NEON_2RM_VREV16 2
5415#define NEON_2RM_VPADDL 4
5416#define NEON_2RM_VPADDL_U 5
9d935509
AB
5417#define NEON_2RM_AESE 6 /* Includes AESD */
5418#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
5419#define NEON_2RM_VCLS 8
5420#define NEON_2RM_VCLZ 9
5421#define NEON_2RM_VCNT 10
5422#define NEON_2RM_VMVN 11
5423#define NEON_2RM_VPADAL 12
5424#define NEON_2RM_VPADAL_U 13
5425#define NEON_2RM_VQABS 14
5426#define NEON_2RM_VQNEG 15
5427#define NEON_2RM_VCGT0 16
5428#define NEON_2RM_VCGE0 17
5429#define NEON_2RM_VCEQ0 18
5430#define NEON_2RM_VCLE0 19
5431#define NEON_2RM_VCLT0 20
f1ecb913 5432#define NEON_2RM_SHA1H 21
600b828c
PM
5433#define NEON_2RM_VABS 22
5434#define NEON_2RM_VNEG 23
5435#define NEON_2RM_VCGT0_F 24
5436#define NEON_2RM_VCGE0_F 25
5437#define NEON_2RM_VCEQ0_F 26
5438#define NEON_2RM_VCLE0_F 27
5439#define NEON_2RM_VCLT0_F 28
5440#define NEON_2RM_VABS_F 30
5441#define NEON_2RM_VNEG_F 31
5442#define NEON_2RM_VSWP 32
5443#define NEON_2RM_VTRN 33
5444#define NEON_2RM_VUZP 34
5445#define NEON_2RM_VZIP 35
5446#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5447#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5448#define NEON_2RM_VSHLL 38
f1ecb913 5449#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 5450#define NEON_2RM_VRINTN 40
2ce70625 5451#define NEON_2RM_VRINTX 41
34f7b0a2
WN
5452#define NEON_2RM_VRINTA 42
5453#define NEON_2RM_VRINTZ 43
600b828c 5454#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 5455#define NEON_2RM_VRINTM 45
600b828c 5456#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 5457#define NEON_2RM_VRINTP 47
901ad525
WN
5458#define NEON_2RM_VCVTAU 48
5459#define NEON_2RM_VCVTAS 49
5460#define NEON_2RM_VCVTNU 50
5461#define NEON_2RM_VCVTNS 51
5462#define NEON_2RM_VCVTPU 52
5463#define NEON_2RM_VCVTPS 53
5464#define NEON_2RM_VCVTMU 54
5465#define NEON_2RM_VCVTMS 55
600b828c
PM
5466#define NEON_2RM_VRECPE 56
5467#define NEON_2RM_VRSQRTE 57
5468#define NEON_2RM_VRECPE_F 58
5469#define NEON_2RM_VRSQRTE_F 59
5470#define NEON_2RM_VCVT_FS 60
5471#define NEON_2RM_VCVT_FU 61
5472#define NEON_2RM_VCVT_SF 62
5473#define NEON_2RM_VCVT_UF 63
5474
5475static int neon_2rm_is_float_op(int op)
5476{
5477 /* Return true if this neon 2reg-misc op is float-to-float */
5478 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5479 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5480 op == NEON_2RM_VRINTM ||
5481 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5482 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5483}
5484
fe8fcf3d
PM
5485static bool neon_2rm_is_v8_op(int op)
5486{
5487 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5488 switch (op) {
5489 case NEON_2RM_VRINTN:
5490 case NEON_2RM_VRINTA:
5491 case NEON_2RM_VRINTM:
5492 case NEON_2RM_VRINTP:
5493 case NEON_2RM_VRINTZ:
5494 case NEON_2RM_VRINTX:
5495 case NEON_2RM_VCVTAU:
5496 case NEON_2RM_VCVTAS:
5497 case NEON_2RM_VCVTNU:
5498 case NEON_2RM_VCVTNS:
5499 case NEON_2RM_VCVTPU:
5500 case NEON_2RM_VCVTPS:
5501 case NEON_2RM_VCVTMU:
5502 case NEON_2RM_VCVTMS:
5503 return true;
5504 default:
5505 return false;
5506 }
5507}
5508
600b828c
PM
5509/* Each entry in this array has bit n set if the insn allows
5510 * size value n (otherwise it will UNDEF). Since unallocated
5511 * op values will have no bits set they always UNDEF.
5512 */
5513static const uint8_t neon_2rm_sizes[] = {
5514 [NEON_2RM_VREV64] = 0x7,
5515 [NEON_2RM_VREV32] = 0x3,
5516 [NEON_2RM_VREV16] = 0x1,
5517 [NEON_2RM_VPADDL] = 0x7,
5518 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5519 [NEON_2RM_AESE] = 0x1,
5520 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5521 [NEON_2RM_VCLS] = 0x7,
5522 [NEON_2RM_VCLZ] = 0x7,
5523 [NEON_2RM_VCNT] = 0x1,
5524 [NEON_2RM_VMVN] = 0x1,
5525 [NEON_2RM_VPADAL] = 0x7,
5526 [NEON_2RM_VPADAL_U] = 0x7,
5527 [NEON_2RM_VQABS] = 0x7,
5528 [NEON_2RM_VQNEG] = 0x7,
5529 [NEON_2RM_VCGT0] = 0x7,
5530 [NEON_2RM_VCGE0] = 0x7,
5531 [NEON_2RM_VCEQ0] = 0x7,
5532 [NEON_2RM_VCLE0] = 0x7,
5533 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5534 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5535 [NEON_2RM_VABS] = 0x7,
5536 [NEON_2RM_VNEG] = 0x7,
5537 [NEON_2RM_VCGT0_F] = 0x4,
5538 [NEON_2RM_VCGE0_F] = 0x4,
5539 [NEON_2RM_VCEQ0_F] = 0x4,
5540 [NEON_2RM_VCLE0_F] = 0x4,
5541 [NEON_2RM_VCLT0_F] = 0x4,
5542 [NEON_2RM_VABS_F] = 0x4,
5543 [NEON_2RM_VNEG_F] = 0x4,
5544 [NEON_2RM_VSWP] = 0x1,
5545 [NEON_2RM_VTRN] = 0x7,
5546 [NEON_2RM_VUZP] = 0x7,
5547 [NEON_2RM_VZIP] = 0x7,
5548 [NEON_2RM_VMOVN] = 0x7,
5549 [NEON_2RM_VQMOVN] = 0x7,
5550 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5551 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5552 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5553 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5554 [NEON_2RM_VRINTA] = 0x4,
5555 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5556 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5557 [NEON_2RM_VRINTM] = 0x4,
600b828c 5558 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5559 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5560 [NEON_2RM_VCVTAU] = 0x4,
5561 [NEON_2RM_VCVTAS] = 0x4,
5562 [NEON_2RM_VCVTNU] = 0x4,
5563 [NEON_2RM_VCVTNS] = 0x4,
5564 [NEON_2RM_VCVTPU] = 0x4,
5565 [NEON_2RM_VCVTPS] = 0x4,
5566 [NEON_2RM_VCVTMU] = 0x4,
5567 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5568 [NEON_2RM_VRECPE] = 0x4,
5569 [NEON_2RM_VRSQRTE] = 0x4,
5570 [NEON_2RM_VRECPE_F] = 0x4,
5571 [NEON_2RM_VRSQRTE_F] = 0x4,
5572 [NEON_2RM_VCVT_FS] = 0x4,
5573 [NEON_2RM_VCVT_FU] = 0x4,
5574 [NEON_2RM_VCVT_SF] = 0x4,
5575 [NEON_2RM_VCVT_UF] = 0x4,
5576};
5577
9ee6e8bb
PB
5578/* Translate a NEON data processing instruction. Return nonzero if the
5579 instruction is invalid.
ad69471c
PB
5580 We process data in a mixture of 32-bit and 64-bit chunks.
5581 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 5582
7dcc1f89 5583static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5584{
5585 int op;
5586 int q;
5587 int rd, rn, rm;
5588 int size;
5589 int shift;
5590 int pass;
5591 int count;
5592 int pairwise;
5593 int u;
ca9a32e4 5594 uint32_t imm, mask;
39d5492a 5595 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 5596 TCGv_i64 tmp64;
9ee6e8bb 5597
2c7ffc41
PM
5598 /* FIXME: this access check should not take precedence over UNDEF
5599 * for invalid encodings; we will generate incorrect syndrome information
5600 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5601 */
9dbbc748 5602 if (s->fp_excp_el) {
2c7ffc41 5603 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 5604 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
5605 return 0;
5606 }
5607
5df8bac1 5608 if (!s->vfp_enabled)
9ee6e8bb
PB
5609 return 1;
5610 q = (insn & (1 << 6)) != 0;
5611 u = (insn >> 24) & 1;
5612 VFP_DREG_D(rd, insn);
5613 VFP_DREG_N(rn, insn);
5614 VFP_DREG_M(rm, insn);
5615 size = (insn >> 20) & 3;
5616 if ((insn & (1 << 23)) == 0) {
5617 /* Three register same length. */
5618 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5619 /* Catch invalid op and bad size combinations: UNDEF */
5620 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5621 return 1;
5622 }
25f84f79
PM
5623 /* All insns of this form UNDEF for either this condition or the
5624 * superset of cases "Q==1"; we catch the latter later.
5625 */
5626 if (q && ((rd | rn | rm) & 1)) {
5627 return 1;
5628 }
f1ecb913
AB
5629 /*
5630 * The SHA-1/SHA-256 3-register instructions require special treatment
5631 * here, as their size field is overloaded as an op type selector, and
5632 * they all consume their input in a single pass.
5633 */
5634 if (op == NEON_3R_SHA) {
5635 if (!q) {
5636 return 1;
5637 }
5638 if (!u) { /* SHA-1 */
d614a513 5639 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
5640 return 1;
5641 }
5642 tmp = tcg_const_i32(rd);
5643 tmp2 = tcg_const_i32(rn);
5644 tmp3 = tcg_const_i32(rm);
5645 tmp4 = tcg_const_i32(size);
5646 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5647 tcg_temp_free_i32(tmp4);
5648 } else { /* SHA-256 */
d614a513 5649 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
f1ecb913
AB
5650 return 1;
5651 }
5652 tmp = tcg_const_i32(rd);
5653 tmp2 = tcg_const_i32(rn);
5654 tmp3 = tcg_const_i32(rm);
5655 switch (size) {
5656 case 0:
5657 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5658 break;
5659 case 1:
5660 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5661 break;
5662 case 2:
5663 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5664 break;
5665 }
5666 }
5667 tcg_temp_free_i32(tmp);
5668 tcg_temp_free_i32(tmp2);
5669 tcg_temp_free_i32(tmp3);
5670 return 0;
5671 }
62698be3
PM
5672 if (size == 3 && op != NEON_3R_LOGIC) {
5673 /* 64-bit element instructions. */
9ee6e8bb 5674 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5675 neon_load_reg64(cpu_V0, rn + pass);
5676 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5677 switch (op) {
62698be3 5678 case NEON_3R_VQADD:
9ee6e8bb 5679 if (u) {
02da0b2d
PM
5680 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5681 cpu_V0, cpu_V1);
2c0262af 5682 } else {
02da0b2d
PM
5683 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5684 cpu_V0, cpu_V1);
2c0262af 5685 }
9ee6e8bb 5686 break;
62698be3 5687 case NEON_3R_VQSUB:
9ee6e8bb 5688 if (u) {
02da0b2d
PM
5689 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5690 cpu_V0, cpu_V1);
ad69471c 5691 } else {
02da0b2d
PM
5692 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5693 cpu_V0, cpu_V1);
ad69471c
PB
5694 }
5695 break;
62698be3 5696 case NEON_3R_VSHL:
ad69471c
PB
5697 if (u) {
5698 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5699 } else {
5700 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5701 }
5702 break;
62698be3 5703 case NEON_3R_VQSHL:
ad69471c 5704 if (u) {
02da0b2d
PM
5705 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5706 cpu_V1, cpu_V0);
ad69471c 5707 } else {
02da0b2d
PM
5708 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5709 cpu_V1, cpu_V0);
ad69471c
PB
5710 }
5711 break;
62698be3 5712 case NEON_3R_VRSHL:
ad69471c
PB
5713 if (u) {
5714 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5715 } else {
ad69471c
PB
5716 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5717 }
5718 break;
62698be3 5719 case NEON_3R_VQRSHL:
ad69471c 5720 if (u) {
02da0b2d
PM
5721 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5722 cpu_V1, cpu_V0);
ad69471c 5723 } else {
02da0b2d
PM
5724 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5725 cpu_V1, cpu_V0);
1e8d4eec 5726 }
9ee6e8bb 5727 break;
62698be3 5728 case NEON_3R_VADD_VSUB:
9ee6e8bb 5729 if (u) {
ad69471c 5730 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5731 } else {
ad69471c 5732 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5733 }
5734 break;
5735 default:
5736 abort();
2c0262af 5737 }
ad69471c 5738 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5739 }
9ee6e8bb 5740 return 0;
2c0262af 5741 }
25f84f79 5742 pairwise = 0;
9ee6e8bb 5743 switch (op) {
62698be3
PM
5744 case NEON_3R_VSHL:
5745 case NEON_3R_VQSHL:
5746 case NEON_3R_VRSHL:
5747 case NEON_3R_VQRSHL:
9ee6e8bb 5748 {
ad69471c
PB
5749 int rtmp;
5750 /* Shift instruction operands are reversed. */
5751 rtmp = rn;
9ee6e8bb 5752 rn = rm;
ad69471c 5753 rm = rtmp;
9ee6e8bb 5754 }
2c0262af 5755 break;
25f84f79
PM
5756 case NEON_3R_VPADD:
5757 if (u) {
5758 return 1;
5759 }
5760 /* Fall through */
62698be3
PM
5761 case NEON_3R_VPMAX:
5762 case NEON_3R_VPMIN:
9ee6e8bb 5763 pairwise = 1;
2c0262af 5764 break;
25f84f79
PM
5765 case NEON_3R_FLOAT_ARITH:
5766 pairwise = (u && size < 2); /* if VPADD (float) */
5767 break;
5768 case NEON_3R_FLOAT_MINMAX:
5769 pairwise = u; /* if VPMIN/VPMAX (float) */
5770 break;
5771 case NEON_3R_FLOAT_CMP:
5772 if (!u && size) {
5773 /* no encoding for U=0 C=1x */
5774 return 1;
5775 }
5776 break;
5777 case NEON_3R_FLOAT_ACMP:
5778 if (!u) {
5779 return 1;
5780 }
5781 break;
505935fc
WN
5782 case NEON_3R_FLOAT_MISC:
5783 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5784 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5785 return 1;
5786 }
2c0262af 5787 break;
25f84f79
PM
5788 case NEON_3R_VMUL:
5789 if (u && (size != 0)) {
5790 /* UNDEF on invalid size for polynomial subcase */
5791 return 1;
5792 }
2c0262af 5793 break;
da97f52c 5794 case NEON_3R_VFM:
d614a513 5795 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
da97f52c
PM
5796 return 1;
5797 }
5798 break;
9ee6e8bb 5799 default:
2c0262af 5800 break;
9ee6e8bb 5801 }
dd8fbd78 5802
25f84f79
PM
5803 if (pairwise && q) {
5804 /* All the pairwise insns UNDEF if Q is set */
5805 return 1;
5806 }
5807
9ee6e8bb
PB
5808 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5809
5810 if (pairwise) {
5811 /* Pairwise. */
a5a14945
JR
5812 if (pass < 1) {
5813 tmp = neon_load_reg(rn, 0);
5814 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5815 } else {
a5a14945
JR
5816 tmp = neon_load_reg(rm, 0);
5817 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5818 }
5819 } else {
5820 /* Elementwise. */
dd8fbd78
FN
5821 tmp = neon_load_reg(rn, pass);
5822 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5823 }
5824 switch (op) {
62698be3 5825 case NEON_3R_VHADD:
9ee6e8bb
PB
5826 GEN_NEON_INTEGER_OP(hadd);
5827 break;
62698be3 5828 case NEON_3R_VQADD:
02da0b2d 5829 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5830 break;
62698be3 5831 case NEON_3R_VRHADD:
9ee6e8bb 5832 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5833 break;
62698be3 5834 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5835 switch ((u << 2) | size) {
5836 case 0: /* VAND */
dd8fbd78 5837 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5838 break;
5839 case 1: /* BIC */
f669df27 5840 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5841 break;
5842 case 2: /* VORR */
dd8fbd78 5843 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5844 break;
5845 case 3: /* VORN */
f669df27 5846 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5847 break;
5848 case 4: /* VEOR */
dd8fbd78 5849 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5850 break;
5851 case 5: /* VBSL */
dd8fbd78
FN
5852 tmp3 = neon_load_reg(rd, pass);
5853 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5854 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5855 break;
5856 case 6: /* VBIT */
dd8fbd78
FN
5857 tmp3 = neon_load_reg(rd, pass);
5858 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5859 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5860 break;
5861 case 7: /* VBIF */
dd8fbd78
FN
5862 tmp3 = neon_load_reg(rd, pass);
5863 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5864 tcg_temp_free_i32(tmp3);
9ee6e8bb 5865 break;
2c0262af
FB
5866 }
5867 break;
62698be3 5868 case NEON_3R_VHSUB:
9ee6e8bb
PB
5869 GEN_NEON_INTEGER_OP(hsub);
5870 break;
62698be3 5871 case NEON_3R_VQSUB:
02da0b2d 5872 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5873 break;
62698be3 5874 case NEON_3R_VCGT:
9ee6e8bb
PB
5875 GEN_NEON_INTEGER_OP(cgt);
5876 break;
62698be3 5877 case NEON_3R_VCGE:
9ee6e8bb
PB
5878 GEN_NEON_INTEGER_OP(cge);
5879 break;
62698be3 5880 case NEON_3R_VSHL:
ad69471c 5881 GEN_NEON_INTEGER_OP(shl);
2c0262af 5882 break;
62698be3 5883 case NEON_3R_VQSHL:
02da0b2d 5884 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5885 break;
62698be3 5886 case NEON_3R_VRSHL:
ad69471c 5887 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5888 break;
62698be3 5889 case NEON_3R_VQRSHL:
02da0b2d 5890 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5891 break;
62698be3 5892 case NEON_3R_VMAX:
9ee6e8bb
PB
5893 GEN_NEON_INTEGER_OP(max);
5894 break;
62698be3 5895 case NEON_3R_VMIN:
9ee6e8bb
PB
5896 GEN_NEON_INTEGER_OP(min);
5897 break;
62698be3 5898 case NEON_3R_VABD:
9ee6e8bb
PB
5899 GEN_NEON_INTEGER_OP(abd);
5900 break;
62698be3 5901 case NEON_3R_VABA:
9ee6e8bb 5902 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5903 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5904 tmp2 = neon_load_reg(rd, pass);
5905 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5906 break;
62698be3 5907 case NEON_3R_VADD_VSUB:
9ee6e8bb 5908 if (!u) { /* VADD */
62698be3 5909 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5910 } else { /* VSUB */
5911 switch (size) {
dd8fbd78
FN
5912 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5913 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5914 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 5915 default: abort();
9ee6e8bb
PB
5916 }
5917 }
5918 break;
62698be3 5919 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
5920 if (!u) { /* VTST */
5921 switch (size) {
dd8fbd78
FN
5922 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5923 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5924 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 5925 default: abort();
9ee6e8bb
PB
5926 }
5927 } else { /* VCEQ */
5928 switch (size) {
dd8fbd78
FN
5929 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5930 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5931 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 5932 default: abort();
9ee6e8bb
PB
5933 }
5934 }
5935 break;
62698be3 5936 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 5937 switch (size) {
dd8fbd78
FN
5938 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5939 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5940 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5941 default: abort();
9ee6e8bb 5942 }
7d1b0095 5943 tcg_temp_free_i32(tmp2);
dd8fbd78 5944 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5945 if (u) { /* VMLS */
dd8fbd78 5946 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 5947 } else { /* VMLA */
dd8fbd78 5948 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5949 }
5950 break;
62698be3 5951 case NEON_3R_VMUL:
9ee6e8bb 5952 if (u) { /* polynomial */
dd8fbd78 5953 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
5954 } else { /* Integer */
5955 switch (size) {
dd8fbd78
FN
5956 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5957 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5958 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5959 default: abort();
9ee6e8bb
PB
5960 }
5961 }
5962 break;
62698be3 5963 case NEON_3R_VPMAX:
9ee6e8bb
PB
5964 GEN_NEON_INTEGER_OP(pmax);
5965 break;
62698be3 5966 case NEON_3R_VPMIN:
9ee6e8bb
PB
5967 GEN_NEON_INTEGER_OP(pmin);
5968 break;
62698be3 5969 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5970 if (!u) { /* VQDMULH */
5971 switch (size) {
02da0b2d
PM
5972 case 1:
5973 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5974 break;
5975 case 2:
5976 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5977 break;
62698be3 5978 default: abort();
9ee6e8bb 5979 }
62698be3 5980 } else { /* VQRDMULH */
9ee6e8bb 5981 switch (size) {
02da0b2d
PM
5982 case 1:
5983 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5984 break;
5985 case 2:
5986 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5987 break;
62698be3 5988 default: abort();
9ee6e8bb
PB
5989 }
5990 }
5991 break;
62698be3 5992 case NEON_3R_VPADD:
9ee6e8bb 5993 switch (size) {
dd8fbd78
FN
5994 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5995 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5996 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5997 default: abort();
9ee6e8bb
PB
5998 }
5999 break;
62698be3 6000 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
6001 {
6002 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
6003 switch ((u << 2) | size) {
6004 case 0: /* VADD */
aa47cfdd
PM
6005 case 4: /* VPADD */
6006 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6007 break;
6008 case 2: /* VSUB */
aa47cfdd 6009 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6010 break;
6011 case 6: /* VABD */
aa47cfdd 6012 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6013 break;
6014 default:
62698be3 6015 abort();
9ee6e8bb 6016 }
aa47cfdd 6017 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6018 break;
aa47cfdd 6019 }
62698be3 6020 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
6021 {
6022 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6023 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6024 if (!u) {
7d1b0095 6025 tcg_temp_free_i32(tmp2);
dd8fbd78 6026 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6027 if (size == 0) {
aa47cfdd 6028 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6029 } else {
aa47cfdd 6030 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
6031 }
6032 }
aa47cfdd 6033 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6034 break;
aa47cfdd 6035 }
62698be3 6036 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
6037 {
6038 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 6039 if (!u) {
aa47cfdd 6040 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 6041 } else {
aa47cfdd
PM
6042 if (size == 0) {
6043 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6044 } else {
6045 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6046 }
b5ff1b31 6047 }
aa47cfdd 6048 tcg_temp_free_ptr(fpstatus);
2c0262af 6049 break;
aa47cfdd 6050 }
62698be3 6051 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
6052 {
6053 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6054 if (size == 0) {
6055 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
6056 } else {
6057 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
6058 }
6059 tcg_temp_free_ptr(fpstatus);
2c0262af 6060 break;
aa47cfdd 6061 }
62698be3 6062 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
6063 {
6064 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6065 if (size == 0) {
f71a2ae5 6066 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 6067 } else {
f71a2ae5 6068 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
6069 }
6070 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6071 break;
aa47cfdd 6072 }
505935fc
WN
6073 case NEON_3R_FLOAT_MISC:
6074 if (u) {
6075 /* VMAXNM/VMINNM */
6076 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6077 if (size == 0) {
f71a2ae5 6078 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 6079 } else {
f71a2ae5 6080 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
6081 }
6082 tcg_temp_free_ptr(fpstatus);
6083 } else {
6084 if (size == 0) {
6085 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
6086 } else {
6087 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
6088 }
6089 }
2c0262af 6090 break;
da97f52c
PM
6091 case NEON_3R_VFM:
6092 {
6093 /* VFMA, VFMS: fused multiply-add */
6094 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6095 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
6096 if (size) {
6097 /* VFMS */
6098 gen_helper_vfp_negs(tmp, tmp);
6099 }
6100 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
6101 tcg_temp_free_i32(tmp3);
6102 tcg_temp_free_ptr(fpstatus);
6103 break;
6104 }
9ee6e8bb
PB
6105 default:
6106 abort();
2c0262af 6107 }
7d1b0095 6108 tcg_temp_free_i32(tmp2);
dd8fbd78 6109
9ee6e8bb
PB
6110 /* Save the result. For elementwise operations we can put it
6111 straight into the destination register. For pairwise operations
6112 we have to be careful to avoid clobbering the source operands. */
6113 if (pairwise && rd == rm) {
dd8fbd78 6114 neon_store_scratch(pass, tmp);
9ee6e8bb 6115 } else {
dd8fbd78 6116 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6117 }
6118
6119 } /* for pass */
6120 if (pairwise && rd == rm) {
6121 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
6122 tmp = neon_load_scratch(pass);
6123 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6124 }
6125 }
ad69471c 6126 /* End of 3 register same size operations. */
9ee6e8bb
PB
6127 } else if (insn & (1 << 4)) {
6128 if ((insn & 0x00380080) != 0) {
6129 /* Two registers and shift. */
6130 op = (insn >> 8) & 0xf;
6131 if (insn & (1 << 7)) {
cc13115b
PM
6132 /* 64-bit shift. */
6133 if (op > 7) {
6134 return 1;
6135 }
9ee6e8bb
PB
6136 size = 3;
6137 } else {
6138 size = 2;
6139 while ((insn & (1 << (size + 19))) == 0)
6140 size--;
6141 }
6142 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 6143 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
6144 by immediate using the variable shift operations. */
6145 if (op < 8) {
6146 /* Shift by immediate:
6147 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
6148 if (q && ((rd | rm) & 1)) {
6149 return 1;
6150 }
6151 if (!u && (op == 4 || op == 6)) {
6152 return 1;
6153 }
9ee6e8bb
PB
6154 /* Right shifts are encoded as N - shift, where N is the
6155 element size in bits. */
6156 if (op <= 4)
6157 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
6158 if (size == 3) {
6159 count = q + 1;
6160 } else {
6161 count = q ? 4: 2;
6162 }
6163 switch (size) {
6164 case 0:
6165 imm = (uint8_t) shift;
6166 imm |= imm << 8;
6167 imm |= imm << 16;
6168 break;
6169 case 1:
6170 imm = (uint16_t) shift;
6171 imm |= imm << 16;
6172 break;
6173 case 2:
6174 case 3:
6175 imm = shift;
6176 break;
6177 default:
6178 abort();
6179 }
6180
6181 for (pass = 0; pass < count; pass++) {
ad69471c
PB
6182 if (size == 3) {
6183 neon_load_reg64(cpu_V0, rm + pass);
6184 tcg_gen_movi_i64(cpu_V1, imm);
6185 switch (op) {
6186 case 0: /* VSHR */
6187 case 1: /* VSRA */
6188 if (u)
6189 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6190 else
ad69471c 6191 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6192 break;
ad69471c
PB
6193 case 2: /* VRSHR */
6194 case 3: /* VRSRA */
6195 if (u)
6196 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6197 else
ad69471c 6198 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6199 break;
ad69471c 6200 case 4: /* VSRI */
ad69471c
PB
6201 case 5: /* VSHL, VSLI */
6202 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6203 break;
0322b26e 6204 case 6: /* VQSHLU */
02da0b2d
PM
6205 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6206 cpu_V0, cpu_V1);
ad69471c 6207 break;
0322b26e
PM
6208 case 7: /* VQSHL */
6209 if (u) {
02da0b2d 6210 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
6211 cpu_V0, cpu_V1);
6212 } else {
02da0b2d 6213 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
6214 cpu_V0, cpu_V1);
6215 }
9ee6e8bb 6216 break;
9ee6e8bb 6217 }
ad69471c
PB
6218 if (op == 1 || op == 3) {
6219 /* Accumulate. */
5371cb81 6220 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
6221 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6222 } else if (op == 4 || (op == 5 && u)) {
6223 /* Insert */
923e6509
CL
6224 neon_load_reg64(cpu_V1, rd + pass);
6225 uint64_t mask;
6226 if (shift < -63 || shift > 63) {
6227 mask = 0;
6228 } else {
6229 if (op == 4) {
6230 mask = 0xffffffffffffffffull >> -shift;
6231 } else {
6232 mask = 0xffffffffffffffffull << shift;
6233 }
6234 }
6235 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6236 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
6237 }
6238 neon_store_reg64(cpu_V0, rd + pass);
6239 } else { /* size < 3 */
6240 /* Operands in T0 and T1. */
dd8fbd78 6241 tmp = neon_load_reg(rm, pass);
7d1b0095 6242 tmp2 = tcg_temp_new_i32();
dd8fbd78 6243 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
6244 switch (op) {
6245 case 0: /* VSHR */
6246 case 1: /* VSRA */
6247 GEN_NEON_INTEGER_OP(shl);
6248 break;
6249 case 2: /* VRSHR */
6250 case 3: /* VRSRA */
6251 GEN_NEON_INTEGER_OP(rshl);
6252 break;
6253 case 4: /* VSRI */
ad69471c
PB
6254 case 5: /* VSHL, VSLI */
6255 switch (size) {
dd8fbd78
FN
6256 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6257 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6258 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 6259 default: abort();
ad69471c
PB
6260 }
6261 break;
0322b26e 6262 case 6: /* VQSHLU */
ad69471c 6263 switch (size) {
0322b26e 6264 case 0:
02da0b2d
PM
6265 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6266 tmp, tmp2);
0322b26e
PM
6267 break;
6268 case 1:
02da0b2d
PM
6269 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6270 tmp, tmp2);
0322b26e
PM
6271 break;
6272 case 2:
02da0b2d
PM
6273 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6274 tmp, tmp2);
0322b26e
PM
6275 break;
6276 default:
cc13115b 6277 abort();
ad69471c
PB
6278 }
6279 break;
0322b26e 6280 case 7: /* VQSHL */
02da0b2d 6281 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 6282 break;
ad69471c 6283 }
7d1b0095 6284 tcg_temp_free_i32(tmp2);
ad69471c
PB
6285
6286 if (op == 1 || op == 3) {
6287 /* Accumulate. */
dd8fbd78 6288 tmp2 = neon_load_reg(rd, pass);
5371cb81 6289 gen_neon_add(size, tmp, tmp2);
7d1b0095 6290 tcg_temp_free_i32(tmp2);
ad69471c
PB
6291 } else if (op == 4 || (op == 5 && u)) {
6292 /* Insert */
6293 switch (size) {
6294 case 0:
6295 if (op == 4)
ca9a32e4 6296 mask = 0xff >> -shift;
ad69471c 6297 else
ca9a32e4
JR
6298 mask = (uint8_t)(0xff << shift);
6299 mask |= mask << 8;
6300 mask |= mask << 16;
ad69471c
PB
6301 break;
6302 case 1:
6303 if (op == 4)
ca9a32e4 6304 mask = 0xffff >> -shift;
ad69471c 6305 else
ca9a32e4
JR
6306 mask = (uint16_t)(0xffff << shift);
6307 mask |= mask << 16;
ad69471c
PB
6308 break;
6309 case 2:
ca9a32e4
JR
6310 if (shift < -31 || shift > 31) {
6311 mask = 0;
6312 } else {
6313 if (op == 4)
6314 mask = 0xffffffffu >> -shift;
6315 else
6316 mask = 0xffffffffu << shift;
6317 }
ad69471c
PB
6318 break;
6319 default:
6320 abort();
6321 }
dd8fbd78 6322 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
6323 tcg_gen_andi_i32(tmp, tmp, mask);
6324 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 6325 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 6326 tcg_temp_free_i32(tmp2);
ad69471c 6327 }
dd8fbd78 6328 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6329 }
6330 } /* for pass */
6331 } else if (op < 10) {
ad69471c 6332 /* Shift by immediate and narrow:
9ee6e8bb 6333 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 6334 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
6335 if (rm & 1) {
6336 return 1;
6337 }
9ee6e8bb
PB
6338 shift = shift - (1 << (size + 3));
6339 size++;
92cdfaeb 6340 if (size == 3) {
a7812ae4 6341 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
6342 neon_load_reg64(cpu_V0, rm);
6343 neon_load_reg64(cpu_V1, rm + 1);
6344 for (pass = 0; pass < 2; pass++) {
6345 TCGv_i64 in;
6346 if (pass == 0) {
6347 in = cpu_V0;
6348 } else {
6349 in = cpu_V1;
6350 }
ad69471c 6351 if (q) {
0b36f4cd 6352 if (input_unsigned) {
92cdfaeb 6353 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 6354 } else {
92cdfaeb 6355 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 6356 }
ad69471c 6357 } else {
0b36f4cd 6358 if (input_unsigned) {
92cdfaeb 6359 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 6360 } else {
92cdfaeb 6361 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 6362 }
ad69471c 6363 }
7d1b0095 6364 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6365 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6366 neon_store_reg(rd, pass, tmp);
6367 } /* for pass */
6368 tcg_temp_free_i64(tmp64);
6369 } else {
6370 if (size == 1) {
6371 imm = (uint16_t)shift;
6372 imm |= imm << 16;
2c0262af 6373 } else {
92cdfaeb
PM
6374 /* size == 2 */
6375 imm = (uint32_t)shift;
6376 }
6377 tmp2 = tcg_const_i32(imm);
6378 tmp4 = neon_load_reg(rm + 1, 0);
6379 tmp5 = neon_load_reg(rm + 1, 1);
6380 for (pass = 0; pass < 2; pass++) {
6381 if (pass == 0) {
6382 tmp = neon_load_reg(rm, 0);
6383 } else {
6384 tmp = tmp4;
6385 }
0b36f4cd
CL
6386 gen_neon_shift_narrow(size, tmp, tmp2, q,
6387 input_unsigned);
92cdfaeb
PM
6388 if (pass == 0) {
6389 tmp3 = neon_load_reg(rm, 1);
6390 } else {
6391 tmp3 = tmp5;
6392 }
0b36f4cd
CL
6393 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6394 input_unsigned);
36aa55dc 6395 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
6396 tcg_temp_free_i32(tmp);
6397 tcg_temp_free_i32(tmp3);
6398 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6399 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6400 neon_store_reg(rd, pass, tmp);
6401 } /* for pass */
c6067f04 6402 tcg_temp_free_i32(tmp2);
b75263d6 6403 }
9ee6e8bb 6404 } else if (op == 10) {
cc13115b
PM
6405 /* VSHLL, VMOVL */
6406 if (q || (rd & 1)) {
9ee6e8bb 6407 return 1;
cc13115b 6408 }
ad69471c
PB
6409 tmp = neon_load_reg(rm, 0);
6410 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6411 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6412 if (pass == 1)
6413 tmp = tmp2;
6414
6415 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 6416
9ee6e8bb
PB
6417 if (shift != 0) {
6418 /* The shift is less than the width of the source
ad69471c
PB
6419 type, so we can just shift the whole register. */
6420 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
6421 /* Widen the result of shift: we need to clear
6422 * the potential overflow bits resulting from
6423 * left bits of the narrow input appearing as
6424 * right bits of left the neighbour narrow
6425 * input. */
ad69471c
PB
6426 if (size < 2 || !u) {
6427 uint64_t imm64;
6428 if (size == 0) {
6429 imm = (0xffu >> (8 - shift));
6430 imm |= imm << 16;
acdf01ef 6431 } else if (size == 1) {
ad69471c 6432 imm = 0xffff >> (16 - shift);
acdf01ef
CL
6433 } else {
6434 /* size == 2 */
6435 imm = 0xffffffff >> (32 - shift);
6436 }
6437 if (size < 2) {
6438 imm64 = imm | (((uint64_t)imm) << 32);
6439 } else {
6440 imm64 = imm;
9ee6e8bb 6441 }
acdf01ef 6442 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
6443 }
6444 }
ad69471c 6445 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6446 }
f73534a5 6447 } else if (op >= 14) {
9ee6e8bb 6448 /* VCVT fixed-point. */
cc13115b
PM
6449 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6450 return 1;
6451 }
f73534a5
PM
6452 /* We have already masked out the must-be-1 top bit of imm6,
6453 * hence this 32-shift where the ARM ARM has 64-imm6.
6454 */
6455 shift = 32 - shift;
9ee6e8bb 6456 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 6457 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 6458 if (!(op & 1)) {
9ee6e8bb 6459 if (u)
5500b06c 6460 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 6461 else
5500b06c 6462 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
6463 } else {
6464 if (u)
5500b06c 6465 gen_vfp_toul(0, shift, 1);
9ee6e8bb 6466 else
5500b06c 6467 gen_vfp_tosl(0, shift, 1);
2c0262af 6468 }
4373f3ce 6469 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
6470 }
6471 } else {
9ee6e8bb
PB
6472 return 1;
6473 }
6474 } else { /* (insn & 0x00380080) == 0 */
6475 int invert;
7d80fee5
PM
6476 if (q && (rd & 1)) {
6477 return 1;
6478 }
9ee6e8bb
PB
6479
6480 op = (insn >> 8) & 0xf;
6481 /* One register and immediate. */
6482 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6483 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
6484 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6485 * We choose to not special-case this and will behave as if a
6486 * valid constant encoding of 0 had been given.
6487 */
9ee6e8bb
PB
6488 switch (op) {
6489 case 0: case 1:
6490 /* no-op */
6491 break;
6492 case 2: case 3:
6493 imm <<= 8;
6494 break;
6495 case 4: case 5:
6496 imm <<= 16;
6497 break;
6498 case 6: case 7:
6499 imm <<= 24;
6500 break;
6501 case 8: case 9:
6502 imm |= imm << 16;
6503 break;
6504 case 10: case 11:
6505 imm = (imm << 8) | (imm << 24);
6506 break;
6507 case 12:
8e31209e 6508 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
6509 break;
6510 case 13:
6511 imm = (imm << 16) | 0xffff;
6512 break;
6513 case 14:
6514 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6515 if (invert)
6516 imm = ~imm;
6517 break;
6518 case 15:
7d80fee5
PM
6519 if (invert) {
6520 return 1;
6521 }
9ee6e8bb
PB
6522 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6523 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6524 break;
6525 }
6526 if (invert)
6527 imm = ~imm;
6528
9ee6e8bb
PB
6529 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6530 if (op & 1 && op < 12) {
ad69471c 6531 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
6532 if (invert) {
6533 /* The immediate value has already been inverted, so
6534 BIC becomes AND. */
ad69471c 6535 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 6536 } else {
ad69471c 6537 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 6538 }
9ee6e8bb 6539 } else {
ad69471c 6540 /* VMOV, VMVN. */
7d1b0095 6541 tmp = tcg_temp_new_i32();
9ee6e8bb 6542 if (op == 14 && invert) {
a5a14945 6543 int n;
ad69471c
PB
6544 uint32_t val;
6545 val = 0;
9ee6e8bb
PB
6546 for (n = 0; n < 4; n++) {
6547 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 6548 val |= 0xff << (n * 8);
9ee6e8bb 6549 }
ad69471c
PB
6550 tcg_gen_movi_i32(tmp, val);
6551 } else {
6552 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 6553 }
9ee6e8bb 6554 }
ad69471c 6555 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6556 }
6557 }
e4b3861d 6558 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
6559 if (size != 3) {
6560 op = (insn >> 8) & 0xf;
6561 if ((insn & (1 << 6)) == 0) {
6562 /* Three registers of different lengths. */
6563 int src1_wide;
6564 int src2_wide;
6565 int prewiden;
526d0096
PM
6566 /* undefreq: bit 0 : UNDEF if size == 0
6567 * bit 1 : UNDEF if size == 1
6568 * bit 2 : UNDEF if size == 2
6569 * bit 3 : UNDEF if U == 1
6570 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
6571 */
6572 int undefreq;
6573 /* prewiden, src1_wide, src2_wide, undefreq */
6574 static const int neon_3reg_wide[16][4] = {
6575 {1, 0, 0, 0}, /* VADDL */
6576 {1, 1, 0, 0}, /* VADDW */
6577 {1, 0, 0, 0}, /* VSUBL */
6578 {1, 1, 0, 0}, /* VSUBW */
6579 {0, 1, 1, 0}, /* VADDHN */
6580 {0, 0, 0, 0}, /* VABAL */
6581 {0, 1, 1, 0}, /* VSUBHN */
6582 {0, 0, 0, 0}, /* VABDL */
6583 {0, 0, 0, 0}, /* VMLAL */
526d0096 6584 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 6585 {0, 0, 0, 0}, /* VMLSL */
526d0096 6586 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 6587 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 6588 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 6589 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 6590 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
6591 };
6592
6593 prewiden = neon_3reg_wide[op][0];
6594 src1_wide = neon_3reg_wide[op][1];
6595 src2_wide = neon_3reg_wide[op][2];
695272dc 6596 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 6597
526d0096
PM
6598 if ((undefreq & (1 << size)) ||
6599 ((undefreq & 8) && u)) {
695272dc
PM
6600 return 1;
6601 }
6602 if ((src1_wide && (rn & 1)) ||
6603 (src2_wide && (rm & 1)) ||
6604 (!src2_wide && (rd & 1))) {
ad69471c 6605 return 1;
695272dc 6606 }
ad69471c 6607
4e624eda
PM
6608 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6609 * outside the loop below as it only performs a single pass.
6610 */
6611 if (op == 14 && size == 2) {
6612 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6613
d614a513 6614 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
4e624eda
PM
6615 return 1;
6616 }
6617 tcg_rn = tcg_temp_new_i64();
6618 tcg_rm = tcg_temp_new_i64();
6619 tcg_rd = tcg_temp_new_i64();
6620 neon_load_reg64(tcg_rn, rn);
6621 neon_load_reg64(tcg_rm, rm);
6622 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6623 neon_store_reg64(tcg_rd, rd);
6624 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6625 neon_store_reg64(tcg_rd, rd + 1);
6626 tcg_temp_free_i64(tcg_rn);
6627 tcg_temp_free_i64(tcg_rm);
6628 tcg_temp_free_i64(tcg_rd);
6629 return 0;
6630 }
6631
9ee6e8bb
PB
6632 /* Avoid overlapping operands. Wide source operands are
6633 always aligned so will never overlap with wide
6634 destinations in problematic ways. */
8f8e3aa4 6635 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6636 tmp = neon_load_reg(rm, 1);
6637 neon_store_scratch(2, tmp);
8f8e3aa4 6638 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6639 tmp = neon_load_reg(rn, 1);
6640 neon_store_scratch(2, tmp);
9ee6e8bb 6641 }
39d5492a 6642 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 6643 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6644 if (src1_wide) {
6645 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 6646 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6647 } else {
ad69471c 6648 if (pass == 1 && rd == rn) {
dd8fbd78 6649 tmp = neon_load_scratch(2);
9ee6e8bb 6650 } else {
ad69471c
PB
6651 tmp = neon_load_reg(rn, pass);
6652 }
6653 if (prewiden) {
6654 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6655 }
6656 }
ad69471c
PB
6657 if (src2_wide) {
6658 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 6659 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6660 } else {
ad69471c 6661 if (pass == 1 && rd == rm) {
dd8fbd78 6662 tmp2 = neon_load_scratch(2);
9ee6e8bb 6663 } else {
ad69471c
PB
6664 tmp2 = neon_load_reg(rm, pass);
6665 }
6666 if (prewiden) {
6667 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6668 }
9ee6e8bb
PB
6669 }
6670 switch (op) {
6671 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6672 gen_neon_addl(size);
9ee6e8bb 6673 break;
79b0e534 6674 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6675 gen_neon_subl(size);
9ee6e8bb
PB
6676 break;
6677 case 5: case 7: /* VABAL, VABDL */
6678 switch ((size << 1) | u) {
ad69471c
PB
6679 case 0:
6680 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6681 break;
6682 case 1:
6683 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6684 break;
6685 case 2:
6686 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6687 break;
6688 case 3:
6689 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6690 break;
6691 case 4:
6692 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6693 break;
6694 case 5:
6695 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6696 break;
9ee6e8bb
PB
6697 default: abort();
6698 }
7d1b0095
PM
6699 tcg_temp_free_i32(tmp2);
6700 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6701 break;
6702 case 8: case 9: case 10: case 11: case 12: case 13:
6703 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6704 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6705 break;
6706 case 14: /* Polynomial VMULL */
e5ca24cb 6707 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6708 tcg_temp_free_i32(tmp2);
6709 tcg_temp_free_i32(tmp);
e5ca24cb 6710 break;
695272dc
PM
6711 default: /* 15 is RESERVED: caught earlier */
6712 abort();
9ee6e8bb 6713 }
ebcd88ce
PM
6714 if (op == 13) {
6715 /* VQDMULL */
6716 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6717 neon_store_reg64(cpu_V0, rd + pass);
6718 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6719 /* Accumulate. */
ebcd88ce 6720 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6721 switch (op) {
4dc064e6
PM
6722 case 10: /* VMLSL */
6723 gen_neon_negl(cpu_V0, size);
6724 /* Fall through */
6725 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6726 gen_neon_addl(size);
9ee6e8bb
PB
6727 break;
6728 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6729 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6730 if (op == 11) {
6731 gen_neon_negl(cpu_V0, size);
6732 }
ad69471c
PB
6733 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6734 break;
9ee6e8bb
PB
6735 default:
6736 abort();
6737 }
ad69471c 6738 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6739 } else if (op == 4 || op == 6) {
6740 /* Narrowing operation. */
7d1b0095 6741 tmp = tcg_temp_new_i32();
79b0e534 6742 if (!u) {
9ee6e8bb 6743 switch (size) {
ad69471c
PB
6744 case 0:
6745 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6746 break;
6747 case 1:
6748 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6749 break;
6750 case 2:
6751 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6752 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6753 break;
9ee6e8bb
PB
6754 default: abort();
6755 }
6756 } else {
6757 switch (size) {
ad69471c
PB
6758 case 0:
6759 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6760 break;
6761 case 1:
6762 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6763 break;
6764 case 2:
6765 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6766 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6767 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6768 break;
9ee6e8bb
PB
6769 default: abort();
6770 }
6771 }
ad69471c
PB
6772 if (pass == 0) {
6773 tmp3 = tmp;
6774 } else {
6775 neon_store_reg(rd, 0, tmp3);
6776 neon_store_reg(rd, 1, tmp);
6777 }
9ee6e8bb
PB
6778 } else {
6779 /* Write back the result. */
ad69471c 6780 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6781 }
6782 }
6783 } else {
3e3326df
PM
6784 /* Two registers and a scalar. NB that for ops of this form
6785 * the ARM ARM labels bit 24 as Q, but it is in our variable
6786 * 'u', not 'q'.
6787 */
6788 if (size == 0) {
6789 return 1;
6790 }
9ee6e8bb 6791 switch (op) {
9ee6e8bb 6792 case 1: /* Float VMLA scalar */
9ee6e8bb 6793 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6794 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6795 if (size == 1) {
6796 return 1;
6797 }
6798 /* fall through */
6799 case 0: /* Integer VMLA scalar */
6800 case 4: /* Integer VMLS scalar */
6801 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6802 case 12: /* VQDMULH scalar */
6803 case 13: /* VQRDMULH scalar */
3e3326df
PM
6804 if (u && ((rd | rn) & 1)) {
6805 return 1;
6806 }
dd8fbd78
FN
6807 tmp = neon_get_scalar(size, rm);
6808 neon_store_scratch(0, tmp);
9ee6e8bb 6809 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6810 tmp = neon_load_scratch(0);
6811 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6812 if (op == 12) {
6813 if (size == 1) {
02da0b2d 6814 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6815 } else {
02da0b2d 6816 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6817 }
6818 } else if (op == 13) {
6819 if (size == 1) {
02da0b2d 6820 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6821 } else {
02da0b2d 6822 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6823 }
6824 } else if (op & 1) {
aa47cfdd
PM
6825 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6826 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6827 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6828 } else {
6829 switch (size) {
dd8fbd78
FN
6830 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6831 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6832 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6833 default: abort();
9ee6e8bb
PB
6834 }
6835 }
7d1b0095 6836 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6837 if (op < 8) {
6838 /* Accumulate. */
dd8fbd78 6839 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6840 switch (op) {
6841 case 0:
dd8fbd78 6842 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6843 break;
6844 case 1:
aa47cfdd
PM
6845 {
6846 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6847 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6848 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6849 break;
aa47cfdd 6850 }
9ee6e8bb 6851 case 4:
dd8fbd78 6852 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6853 break;
6854 case 5:
aa47cfdd
PM
6855 {
6856 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6857 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6858 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6859 break;
aa47cfdd 6860 }
9ee6e8bb
PB
6861 default:
6862 abort();
6863 }
7d1b0095 6864 tcg_temp_free_i32(tmp2);
9ee6e8bb 6865 }
dd8fbd78 6866 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6867 }
6868 break;
9ee6e8bb 6869 case 3: /* VQDMLAL scalar */
9ee6e8bb 6870 case 7: /* VQDMLSL scalar */
9ee6e8bb 6871 case 11: /* VQDMULL scalar */
3e3326df 6872 if (u == 1) {
ad69471c 6873 return 1;
3e3326df
PM
6874 }
6875 /* fall through */
6876 case 2: /* VMLAL sclar */
6877 case 6: /* VMLSL scalar */
6878 case 10: /* VMULL scalar */
6879 if (rd & 1) {
6880 return 1;
6881 }
dd8fbd78 6882 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6883 /* We need a copy of tmp2 because gen_neon_mull
6884 * deletes it during pass 0. */
7d1b0095 6885 tmp4 = tcg_temp_new_i32();
c6067f04 6886 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6887 tmp3 = neon_load_reg(rn, 1);
ad69471c 6888
9ee6e8bb 6889 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6890 if (pass == 0) {
6891 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6892 } else {
dd8fbd78 6893 tmp = tmp3;
c6067f04 6894 tmp2 = tmp4;
9ee6e8bb 6895 }
ad69471c 6896 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6897 if (op != 11) {
6898 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6899 }
9ee6e8bb 6900 switch (op) {
4dc064e6
PM
6901 case 6:
6902 gen_neon_negl(cpu_V0, size);
6903 /* Fall through */
6904 case 2:
ad69471c 6905 gen_neon_addl(size);
9ee6e8bb
PB
6906 break;
6907 case 3: case 7:
ad69471c 6908 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6909 if (op == 7) {
6910 gen_neon_negl(cpu_V0, size);
6911 }
ad69471c 6912 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6913 break;
6914 case 10:
6915 /* no-op */
6916 break;
6917 case 11:
ad69471c 6918 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6919 break;
6920 default:
6921 abort();
6922 }
ad69471c 6923 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6924 }
dd8fbd78 6925
dd8fbd78 6926
9ee6e8bb
PB
6927 break;
6928 default: /* 14 and 15 are RESERVED */
6929 return 1;
6930 }
6931 }
6932 } else { /* size == 3 */
6933 if (!u) {
6934 /* Extract. */
9ee6e8bb 6935 imm = (insn >> 8) & 0xf;
ad69471c
PB
6936
6937 if (imm > 7 && !q)
6938 return 1;
6939
52579ea1
PM
6940 if (q && ((rd | rn | rm) & 1)) {
6941 return 1;
6942 }
6943
ad69471c
PB
6944 if (imm == 0) {
6945 neon_load_reg64(cpu_V0, rn);
6946 if (q) {
6947 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6948 }
ad69471c
PB
6949 } else if (imm == 8) {
6950 neon_load_reg64(cpu_V0, rn + 1);
6951 if (q) {
6952 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6953 }
ad69471c 6954 } else if (q) {
a7812ae4 6955 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6956 if (imm < 8) {
6957 neon_load_reg64(cpu_V0, rn);
a7812ae4 6958 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6959 } else {
6960 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6961 neon_load_reg64(tmp64, rm);
ad69471c
PB
6962 }
6963 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6964 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6965 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6966 if (imm < 8) {
6967 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6968 } else {
ad69471c
PB
6969 neon_load_reg64(cpu_V1, rm + 1);
6970 imm -= 8;
9ee6e8bb 6971 }
ad69471c 6972 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6973 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6974 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6975 tcg_temp_free_i64(tmp64);
ad69471c 6976 } else {
a7812ae4 6977 /* BUGFIX */
ad69471c 6978 neon_load_reg64(cpu_V0, rn);
a7812ae4 6979 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6980 neon_load_reg64(cpu_V1, rm);
a7812ae4 6981 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6982 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6983 }
6984 neon_store_reg64(cpu_V0, rd);
6985 if (q) {
6986 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6987 }
6988 } else if ((insn & (1 << 11)) == 0) {
6989 /* Two register misc. */
6990 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6991 size = (insn >> 18) & 3;
600b828c
PM
6992 /* UNDEF for unknown op values and bad op-size combinations */
6993 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6994 return 1;
6995 }
fe8fcf3d
PM
6996 if (neon_2rm_is_v8_op(op) &&
6997 !arm_dc_feature(s, ARM_FEATURE_V8)) {
6998 return 1;
6999 }
fc2a9b37
PM
7000 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
7001 q && ((rm | rd) & 1)) {
7002 return 1;
7003 }
9ee6e8bb 7004 switch (op) {
600b828c 7005 case NEON_2RM_VREV64:
9ee6e8bb 7006 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
7007 tmp = neon_load_reg(rm, pass * 2);
7008 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 7009 switch (size) {
dd8fbd78
FN
7010 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7011 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
7012 case 2: /* no-op */ break;
7013 default: abort();
7014 }
dd8fbd78 7015 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 7016 if (size == 2) {
dd8fbd78 7017 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 7018 } else {
9ee6e8bb 7019 switch (size) {
dd8fbd78
FN
7020 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
7021 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
7022 default: abort();
7023 }
dd8fbd78 7024 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
7025 }
7026 }
7027 break;
600b828c
PM
7028 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
7029 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
7030 for (pass = 0; pass < q + 1; pass++) {
7031 tmp = neon_load_reg(rm, pass * 2);
7032 gen_neon_widen(cpu_V0, tmp, size, op & 1);
7033 tmp = neon_load_reg(rm, pass * 2 + 1);
7034 gen_neon_widen(cpu_V1, tmp, size, op & 1);
7035 switch (size) {
7036 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
7037 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
7038 case 2: tcg_gen_add_i64(CPU_V001); break;
7039 default: abort();
7040 }
600b828c 7041 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 7042 /* Accumulate. */
ad69471c
PB
7043 neon_load_reg64(cpu_V1, rd + pass);
7044 gen_neon_addl(size);
9ee6e8bb 7045 }
ad69471c 7046 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7047 }
7048 break;
600b828c 7049 case NEON_2RM_VTRN:
9ee6e8bb 7050 if (size == 2) {
a5a14945 7051 int n;
9ee6e8bb 7052 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
7053 tmp = neon_load_reg(rm, n);
7054 tmp2 = neon_load_reg(rd, n + 1);
7055 neon_store_reg(rm, n, tmp2);
7056 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
7057 }
7058 } else {
7059 goto elementwise;
7060 }
7061 break;
600b828c 7062 case NEON_2RM_VUZP:
02acedf9 7063 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 7064 return 1;
9ee6e8bb
PB
7065 }
7066 break;
600b828c 7067 case NEON_2RM_VZIP:
d68a6f3a 7068 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 7069 return 1;
9ee6e8bb
PB
7070 }
7071 break;
600b828c
PM
7072 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
7073 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
7074 if (rm & 1) {
7075 return 1;
7076 }
39d5492a 7077 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 7078 for (pass = 0; pass < 2; pass++) {
ad69471c 7079 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 7080 tmp = tcg_temp_new_i32();
600b828c
PM
7081 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
7082 tmp, cpu_V0);
ad69471c
PB
7083 if (pass == 0) {
7084 tmp2 = tmp;
7085 } else {
7086 neon_store_reg(rd, 0, tmp2);
7087 neon_store_reg(rd, 1, tmp);
9ee6e8bb 7088 }
9ee6e8bb
PB
7089 }
7090 break;
600b828c 7091 case NEON_2RM_VSHLL:
fc2a9b37 7092 if (q || (rd & 1)) {
9ee6e8bb 7093 return 1;
600b828c 7094 }
ad69471c
PB
7095 tmp = neon_load_reg(rm, 0);
7096 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 7097 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7098 if (pass == 1)
7099 tmp = tmp2;
7100 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 7101 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 7102 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7103 }
7104 break;
600b828c 7105 case NEON_2RM_VCVT_F16_F32:
d614a513 7106 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7107 q || (rm & 1)) {
7108 return 1;
7109 }
7d1b0095
PM
7110 tmp = tcg_temp_new_i32();
7111 tmp2 = tcg_temp_new_i32();
60011498 7112 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 7113 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 7114 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 7115 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
7116 tcg_gen_shli_i32(tmp2, tmp2, 16);
7117 tcg_gen_or_i32(tmp2, tmp2, tmp);
7118 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 7119 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
7120 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
7121 neon_store_reg(rd, 0, tmp2);
7d1b0095 7122 tmp2 = tcg_temp_new_i32();
2d981da7 7123 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
7124 tcg_gen_shli_i32(tmp2, tmp2, 16);
7125 tcg_gen_or_i32(tmp2, tmp2, tmp);
7126 neon_store_reg(rd, 1, tmp2);
7d1b0095 7127 tcg_temp_free_i32(tmp);
60011498 7128 break;
600b828c 7129 case NEON_2RM_VCVT_F32_F16:
d614a513 7130 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7131 q || (rd & 1)) {
7132 return 1;
7133 }
7d1b0095 7134 tmp3 = tcg_temp_new_i32();
60011498
PB
7135 tmp = neon_load_reg(rm, 0);
7136 tmp2 = neon_load_reg(rm, 1);
7137 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 7138 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
7139 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
7140 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 7141 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 7142 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 7143 tcg_temp_free_i32(tmp);
60011498 7144 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 7145 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
7146 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
7147 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 7148 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 7149 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
7150 tcg_temp_free_i32(tmp2);
7151 tcg_temp_free_i32(tmp3);
60011498 7152 break;
9d935509 7153 case NEON_2RM_AESE: case NEON_2RM_AESMC:
d614a513 7154 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
9d935509
AB
7155 || ((rm | rd) & 1)) {
7156 return 1;
7157 }
7158 tmp = tcg_const_i32(rd);
7159 tmp2 = tcg_const_i32(rm);
7160
7161 /* Bit 6 is the lowest opcode bit; it distinguishes between
7162 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7163 */
7164 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7165
7166 if (op == NEON_2RM_AESE) {
7167 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
7168 } else {
7169 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
7170 }
7171 tcg_temp_free_i32(tmp);
7172 tcg_temp_free_i32(tmp2);
7173 tcg_temp_free_i32(tmp3);
7174 break;
f1ecb913 7175 case NEON_2RM_SHA1H:
d614a513 7176 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
f1ecb913
AB
7177 || ((rm | rd) & 1)) {
7178 return 1;
7179 }
7180 tmp = tcg_const_i32(rd);
7181 tmp2 = tcg_const_i32(rm);
7182
7183 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
7184
7185 tcg_temp_free_i32(tmp);
7186 tcg_temp_free_i32(tmp2);
7187 break;
7188 case NEON_2RM_SHA1SU1:
7189 if ((rm | rd) & 1) {
7190 return 1;
7191 }
7192 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7193 if (q) {
d614a513 7194 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
f1ecb913
AB
7195 return 1;
7196 }
d614a513 7197 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
7198 return 1;
7199 }
7200 tmp = tcg_const_i32(rd);
7201 tmp2 = tcg_const_i32(rm);
7202 if (q) {
7203 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
7204 } else {
7205 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
7206 }
7207 tcg_temp_free_i32(tmp);
7208 tcg_temp_free_i32(tmp2);
7209 break;
9ee6e8bb
PB
7210 default:
7211 elementwise:
7212 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 7213 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7214 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7215 neon_reg_offset(rm, pass));
39d5492a 7216 TCGV_UNUSED_I32(tmp);
9ee6e8bb 7217 } else {
dd8fbd78 7218 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
7219 }
7220 switch (op) {
600b828c 7221 case NEON_2RM_VREV32:
9ee6e8bb 7222 switch (size) {
dd8fbd78
FN
7223 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7224 case 1: gen_swap_half(tmp); break;
600b828c 7225 default: abort();
9ee6e8bb
PB
7226 }
7227 break;
600b828c 7228 case NEON_2RM_VREV16:
dd8fbd78 7229 gen_rev16(tmp);
9ee6e8bb 7230 break;
600b828c 7231 case NEON_2RM_VCLS:
9ee6e8bb 7232 switch (size) {
dd8fbd78
FN
7233 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7234 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7235 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 7236 default: abort();
9ee6e8bb
PB
7237 }
7238 break;
600b828c 7239 case NEON_2RM_VCLZ:
9ee6e8bb 7240 switch (size) {
dd8fbd78
FN
7241 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7242 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7539a012 7243 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
600b828c 7244 default: abort();
9ee6e8bb
PB
7245 }
7246 break;
600b828c 7247 case NEON_2RM_VCNT:
dd8fbd78 7248 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 7249 break;
600b828c 7250 case NEON_2RM_VMVN:
dd8fbd78 7251 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 7252 break;
600b828c 7253 case NEON_2RM_VQABS:
9ee6e8bb 7254 switch (size) {
02da0b2d
PM
7255 case 0:
7256 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7257 break;
7258 case 1:
7259 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7260 break;
7261 case 2:
7262 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7263 break;
600b828c 7264 default: abort();
9ee6e8bb
PB
7265 }
7266 break;
600b828c 7267 case NEON_2RM_VQNEG:
9ee6e8bb 7268 switch (size) {
02da0b2d
PM
7269 case 0:
7270 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7271 break;
7272 case 1:
7273 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7274 break;
7275 case 2:
7276 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7277 break;
600b828c 7278 default: abort();
9ee6e8bb
PB
7279 }
7280 break;
600b828c 7281 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 7282 tmp2 = tcg_const_i32(0);
9ee6e8bb 7283 switch(size) {
dd8fbd78
FN
7284 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7285 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7286 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 7287 default: abort();
9ee6e8bb 7288 }
39d5492a 7289 tcg_temp_free_i32(tmp2);
600b828c 7290 if (op == NEON_2RM_VCLE0) {
dd8fbd78 7291 tcg_gen_not_i32(tmp, tmp);
600b828c 7292 }
9ee6e8bb 7293 break;
600b828c 7294 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 7295 tmp2 = tcg_const_i32(0);
9ee6e8bb 7296 switch(size) {
dd8fbd78
FN
7297 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7298 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7299 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 7300 default: abort();
9ee6e8bb 7301 }
39d5492a 7302 tcg_temp_free_i32(tmp2);
600b828c 7303 if (op == NEON_2RM_VCLT0) {
dd8fbd78 7304 tcg_gen_not_i32(tmp, tmp);
600b828c 7305 }
9ee6e8bb 7306 break;
600b828c 7307 case NEON_2RM_VCEQ0:
dd8fbd78 7308 tmp2 = tcg_const_i32(0);
9ee6e8bb 7309 switch(size) {
dd8fbd78
FN
7310 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7311 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7312 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 7313 default: abort();
9ee6e8bb 7314 }
39d5492a 7315 tcg_temp_free_i32(tmp2);
9ee6e8bb 7316 break;
600b828c 7317 case NEON_2RM_VABS:
9ee6e8bb 7318 switch(size) {
dd8fbd78
FN
7319 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7320 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7321 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 7322 default: abort();
9ee6e8bb
PB
7323 }
7324 break;
600b828c 7325 case NEON_2RM_VNEG:
dd8fbd78
FN
7326 tmp2 = tcg_const_i32(0);
7327 gen_neon_rsb(size, tmp, tmp2);
39d5492a 7328 tcg_temp_free_i32(tmp2);
9ee6e8bb 7329 break;
600b828c 7330 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
7331 {
7332 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7333 tmp2 = tcg_const_i32(0);
aa47cfdd 7334 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7335 tcg_temp_free_i32(tmp2);
aa47cfdd 7336 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7337 break;
aa47cfdd 7338 }
600b828c 7339 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
7340 {
7341 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7342 tmp2 = tcg_const_i32(0);
aa47cfdd 7343 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7344 tcg_temp_free_i32(tmp2);
aa47cfdd 7345 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7346 break;
aa47cfdd 7347 }
600b828c 7348 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
7349 {
7350 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7351 tmp2 = tcg_const_i32(0);
aa47cfdd 7352 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7353 tcg_temp_free_i32(tmp2);
aa47cfdd 7354 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7355 break;
aa47cfdd 7356 }
600b828c 7357 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
7358 {
7359 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7360 tmp2 = tcg_const_i32(0);
aa47cfdd 7361 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7362 tcg_temp_free_i32(tmp2);
aa47cfdd 7363 tcg_temp_free_ptr(fpstatus);
0e326109 7364 break;
aa47cfdd 7365 }
600b828c 7366 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
7367 {
7368 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7369 tmp2 = tcg_const_i32(0);
aa47cfdd 7370 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7371 tcg_temp_free_i32(tmp2);
aa47cfdd 7372 tcg_temp_free_ptr(fpstatus);
0e326109 7373 break;
aa47cfdd 7374 }
600b828c 7375 case NEON_2RM_VABS_F:
4373f3ce 7376 gen_vfp_abs(0);
9ee6e8bb 7377 break;
600b828c 7378 case NEON_2RM_VNEG_F:
4373f3ce 7379 gen_vfp_neg(0);
9ee6e8bb 7380 break;
600b828c 7381 case NEON_2RM_VSWP:
dd8fbd78
FN
7382 tmp2 = neon_load_reg(rd, pass);
7383 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7384 break;
600b828c 7385 case NEON_2RM_VTRN:
dd8fbd78 7386 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 7387 switch (size) {
dd8fbd78
FN
7388 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7389 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 7390 default: abort();
9ee6e8bb 7391 }
dd8fbd78 7392 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7393 break;
34f7b0a2
WN
7394 case NEON_2RM_VRINTN:
7395 case NEON_2RM_VRINTA:
7396 case NEON_2RM_VRINTM:
7397 case NEON_2RM_VRINTP:
7398 case NEON_2RM_VRINTZ:
7399 {
7400 TCGv_i32 tcg_rmode;
7401 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7402 int rmode;
7403
7404 if (op == NEON_2RM_VRINTZ) {
7405 rmode = FPROUNDING_ZERO;
7406 } else {
7407 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7408 }
7409
7410 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7411 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7412 cpu_env);
7413 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7414 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7415 cpu_env);
7416 tcg_temp_free_ptr(fpstatus);
7417 tcg_temp_free_i32(tcg_rmode);
7418 break;
7419 }
2ce70625
WN
7420 case NEON_2RM_VRINTX:
7421 {
7422 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7423 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7424 tcg_temp_free_ptr(fpstatus);
7425 break;
7426 }
901ad525
WN
7427 case NEON_2RM_VCVTAU:
7428 case NEON_2RM_VCVTAS:
7429 case NEON_2RM_VCVTNU:
7430 case NEON_2RM_VCVTNS:
7431 case NEON_2RM_VCVTPU:
7432 case NEON_2RM_VCVTPS:
7433 case NEON_2RM_VCVTMU:
7434 case NEON_2RM_VCVTMS:
7435 {
7436 bool is_signed = !extract32(insn, 7, 1);
7437 TCGv_ptr fpst = get_fpstatus_ptr(1);
7438 TCGv_i32 tcg_rmode, tcg_shift;
7439 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7440
7441 tcg_shift = tcg_const_i32(0);
7442 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7443 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7444 cpu_env);
7445
7446 if (is_signed) {
7447 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7448 tcg_shift, fpst);
7449 } else {
7450 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7451 tcg_shift, fpst);
7452 }
7453
7454 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7455 cpu_env);
7456 tcg_temp_free_i32(tcg_rmode);
7457 tcg_temp_free_i32(tcg_shift);
7458 tcg_temp_free_ptr(fpst);
7459 break;
7460 }
600b828c 7461 case NEON_2RM_VRECPE:
b6d4443a
AB
7462 {
7463 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7464 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7465 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7466 break;
b6d4443a 7467 }
600b828c 7468 case NEON_2RM_VRSQRTE:
c2fb418e
AB
7469 {
7470 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7471 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7472 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7473 break;
c2fb418e 7474 }
600b828c 7475 case NEON_2RM_VRECPE_F:
b6d4443a
AB
7476 {
7477 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7478 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7479 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7480 break;
b6d4443a 7481 }
600b828c 7482 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
7483 {
7484 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7485 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7486 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7487 break;
c2fb418e 7488 }
600b828c 7489 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 7490 gen_vfp_sito(0, 1);
9ee6e8bb 7491 break;
600b828c 7492 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 7493 gen_vfp_uito(0, 1);
9ee6e8bb 7494 break;
600b828c 7495 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 7496 gen_vfp_tosiz(0, 1);
9ee6e8bb 7497 break;
600b828c 7498 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 7499 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
7500 break;
7501 default:
600b828c
PM
7502 /* Reserved op values were caught by the
7503 * neon_2rm_sizes[] check earlier.
7504 */
7505 abort();
9ee6e8bb 7506 }
600b828c 7507 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7508 tcg_gen_st_f32(cpu_F0s, cpu_env,
7509 neon_reg_offset(rd, pass));
9ee6e8bb 7510 } else {
dd8fbd78 7511 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7512 }
7513 }
7514 break;
7515 }
7516 } else if ((insn & (1 << 10)) == 0) {
7517 /* VTBL, VTBX. */
56907d77
PM
7518 int n = ((insn >> 8) & 3) + 1;
7519 if ((rn + n) > 32) {
7520 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7521 * helper function running off the end of the register file.
7522 */
7523 return 1;
7524 }
7525 n <<= 3;
9ee6e8bb 7526 if (insn & (1 << 6)) {
8f8e3aa4 7527 tmp = neon_load_reg(rd, 0);
9ee6e8bb 7528 } else {
7d1b0095 7529 tmp = tcg_temp_new_i32();
8f8e3aa4 7530 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7531 }
8f8e3aa4 7532 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
7533 tmp4 = tcg_const_i32(rn);
7534 tmp5 = tcg_const_i32(n);
9ef39277 7535 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 7536 tcg_temp_free_i32(tmp);
9ee6e8bb 7537 if (insn & (1 << 6)) {
8f8e3aa4 7538 tmp = neon_load_reg(rd, 1);
9ee6e8bb 7539 } else {
7d1b0095 7540 tmp = tcg_temp_new_i32();
8f8e3aa4 7541 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7542 }
8f8e3aa4 7543 tmp3 = neon_load_reg(rm, 1);
9ef39277 7544 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
7545 tcg_temp_free_i32(tmp5);
7546 tcg_temp_free_i32(tmp4);
8f8e3aa4 7547 neon_store_reg(rd, 0, tmp2);
3018f259 7548 neon_store_reg(rd, 1, tmp3);
7d1b0095 7549 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7550 } else if ((insn & 0x380) == 0) {
7551 /* VDUP */
133da6aa
JR
7552 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7553 return 1;
7554 }
9ee6e8bb 7555 if (insn & (1 << 19)) {
dd8fbd78 7556 tmp = neon_load_reg(rm, 1);
9ee6e8bb 7557 } else {
dd8fbd78 7558 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
7559 }
7560 if (insn & (1 << 16)) {
dd8fbd78 7561 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
7562 } else if (insn & (1 << 17)) {
7563 if ((insn >> 18) & 1)
dd8fbd78 7564 gen_neon_dup_high16(tmp);
9ee6e8bb 7565 else
dd8fbd78 7566 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
7567 }
7568 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 7569 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
7570 tcg_gen_mov_i32(tmp2, tmp);
7571 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 7572 }
7d1b0095 7573 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7574 } else {
7575 return 1;
7576 }
7577 }
7578 }
7579 return 0;
7580}
7581
7dcc1f89 7582static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7583{
4b6a83fb
PM
7584 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7585 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7586
7587 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7588
7589 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7590 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7591 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7592 return 1;
7593 }
d614a513 7594 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7595 return disas_iwmmxt_insn(s, insn);
d614a513 7596 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7597 return disas_dsp_insn(s, insn);
c0f4af17
PM
7598 }
7599 return 1;
4b6a83fb
PM
7600 }
7601
7602 /* Otherwise treat as a generic register access */
7603 is64 = (insn & (1 << 25)) == 0;
7604 if (!is64 && ((insn & (1 << 4)) == 0)) {
7605 /* cdp */
7606 return 1;
7607 }
7608
7609 crm = insn & 0xf;
7610 if (is64) {
7611 crn = 0;
7612 opc1 = (insn >> 4) & 0xf;
7613 opc2 = 0;
7614 rt2 = (insn >> 16) & 0xf;
7615 } else {
7616 crn = (insn >> 16) & 0xf;
7617 opc1 = (insn >> 21) & 7;
7618 opc2 = (insn >> 5) & 7;
7619 rt2 = 0;
7620 }
7621 isread = (insn >> 20) & 1;
7622 rt = (insn >> 12) & 0xf;
7623
60322b39 7624 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7625 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7626 if (ri) {
7627 /* Check access permissions */
dcbff19b 7628 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7629 return 1;
7630 }
7631
c0f4af17 7632 if (ri->accessfn ||
d614a513 7633 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7634 /* Emit code to perform further access permissions checks at
7635 * runtime; this may result in an exception.
c0f4af17
PM
7636 * Note that on XScale all cp0..c13 registers do an access check
7637 * call in order to handle c15_cpar.
f59df3f2
PM
7638 */
7639 TCGv_ptr tmpptr;
3f208fd7 7640 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
7641 uint32_t syndrome;
7642
7643 /* Note that since we are an implementation which takes an
7644 * exception on a trapped conditional instruction only if the
7645 * instruction passes its condition code check, we can take
7646 * advantage of the clause in the ARM ARM that allows us to set
7647 * the COND field in the instruction to 0xE in all cases.
7648 * We could fish the actual condition out of the insn (ARM)
7649 * or the condexec bits (Thumb) but it isn't necessary.
7650 */
7651 switch (cpnum) {
7652 case 14:
7653 if (is64) {
7654 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7655 isread, false);
8bcbf37c
PM
7656 } else {
7657 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7658 rt, isread, false);
8bcbf37c
PM
7659 }
7660 break;
7661 case 15:
7662 if (is64) {
7663 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7664 isread, false);
8bcbf37c
PM
7665 } else {
7666 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7667 rt, isread, false);
8bcbf37c
PM
7668 }
7669 break;
7670 default:
7671 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7672 * so this can only happen if this is an ARMv7 or earlier CPU,
7673 * in which case the syndrome information won't actually be
7674 * guest visible.
7675 */
d614a513 7676 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
7677 syndrome = syn_uncategorized();
7678 break;
7679 }
7680
43bfa4a1 7681 gen_set_condexec(s);
3977ee5d 7682 gen_set_pc_im(s, s->pc - 4);
f59df3f2 7683 tmpptr = tcg_const_ptr(ri);
8bcbf37c 7684 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
7685 tcg_isread = tcg_const_i32(isread);
7686 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7687 tcg_isread);
f59df3f2 7688 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7689 tcg_temp_free_i32(tcg_syn);
3f208fd7 7690 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
7691 }
7692
4b6a83fb
PM
7693 /* Handle special cases first */
7694 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7695 case ARM_CP_NOP:
7696 return 0;
7697 case ARM_CP_WFI:
7698 if (isread) {
7699 return 1;
7700 }
eaed129d 7701 gen_set_pc_im(s, s->pc);
dcba3a8d 7702 s->base.is_jmp = DISAS_WFI;
2bee5105 7703 return 0;
4b6a83fb
PM
7704 default:
7705 break;
7706 }
7707
c5a49c63 7708 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7709 gen_io_start();
7710 }
7711
4b6a83fb
PM
7712 if (isread) {
7713 /* Read */
7714 if (is64) {
7715 TCGv_i64 tmp64;
7716 TCGv_i32 tmp;
7717 if (ri->type & ARM_CP_CONST) {
7718 tmp64 = tcg_const_i64(ri->resetvalue);
7719 } else if (ri->readfn) {
7720 TCGv_ptr tmpptr;
4b6a83fb
PM
7721 tmp64 = tcg_temp_new_i64();
7722 tmpptr = tcg_const_ptr(ri);
7723 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7724 tcg_temp_free_ptr(tmpptr);
7725 } else {
7726 tmp64 = tcg_temp_new_i64();
7727 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7728 }
7729 tmp = tcg_temp_new_i32();
ecc7b3aa 7730 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
7731 store_reg(s, rt, tmp);
7732 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 7733 tmp = tcg_temp_new_i32();
ecc7b3aa 7734 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 7735 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7736 store_reg(s, rt2, tmp);
7737 } else {
39d5492a 7738 TCGv_i32 tmp;
4b6a83fb
PM
7739 if (ri->type & ARM_CP_CONST) {
7740 tmp = tcg_const_i32(ri->resetvalue);
7741 } else if (ri->readfn) {
7742 TCGv_ptr tmpptr;
4b6a83fb
PM
7743 tmp = tcg_temp_new_i32();
7744 tmpptr = tcg_const_ptr(ri);
7745 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7746 tcg_temp_free_ptr(tmpptr);
7747 } else {
7748 tmp = load_cpu_offset(ri->fieldoffset);
7749 }
7750 if (rt == 15) {
7751 /* Destination register of r15 for 32 bit loads sets
7752 * the condition codes from the high 4 bits of the value
7753 */
7754 gen_set_nzcv(tmp);
7755 tcg_temp_free_i32(tmp);
7756 } else {
7757 store_reg(s, rt, tmp);
7758 }
7759 }
7760 } else {
7761 /* Write */
7762 if (ri->type & ARM_CP_CONST) {
7763 /* If not forbidden by access permissions, treat as WI */
7764 return 0;
7765 }
7766
7767 if (is64) {
39d5492a 7768 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7769 TCGv_i64 tmp64 = tcg_temp_new_i64();
7770 tmplo = load_reg(s, rt);
7771 tmphi = load_reg(s, rt2);
7772 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7773 tcg_temp_free_i32(tmplo);
7774 tcg_temp_free_i32(tmphi);
7775 if (ri->writefn) {
7776 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7777 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7778 tcg_temp_free_ptr(tmpptr);
7779 } else {
7780 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7781 }
7782 tcg_temp_free_i64(tmp64);
7783 } else {
7784 if (ri->writefn) {
39d5492a 7785 TCGv_i32 tmp;
4b6a83fb 7786 TCGv_ptr tmpptr;
4b6a83fb
PM
7787 tmp = load_reg(s, rt);
7788 tmpptr = tcg_const_ptr(ri);
7789 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7790 tcg_temp_free_ptr(tmpptr);
7791 tcg_temp_free_i32(tmp);
7792 } else {
39d5492a 7793 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7794 store_cpu_offset(tmp, ri->fieldoffset);
7795 }
7796 }
2452731c
PM
7797 }
7798
c5a49c63 7799 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7800 /* I/O operations must end the TB here (whether read or write) */
7801 gen_io_end();
7802 gen_lookup_tb(s);
7803 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7804 /* We default to ending the TB on a coprocessor register write,
7805 * but allow this to be suppressed by the register definition
7806 * (usually only necessary to work around guest bugs).
7807 */
2452731c 7808 gen_lookup_tb(s);
4b6a83fb 7809 }
2452731c 7810
4b6a83fb
PM
7811 return 0;
7812 }
7813
626187d8
PM
7814 /* Unknown register; this might be a guest error or a QEMU
7815 * unimplemented feature.
7816 */
7817 if (is64) {
7818 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7819 "64 bit system register cp:%d opc1: %d crm:%d "
7820 "(%s)\n",
7821 isread ? "read" : "write", cpnum, opc1, crm,
7822 s->ns ? "non-secure" : "secure");
626187d8
PM
7823 } else {
7824 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7825 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7826 "(%s)\n",
7827 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7828 s->ns ? "non-secure" : "secure");
626187d8
PM
7829 }
7830
4a9a539f 7831 return 1;
9ee6e8bb
PB
7832}
7833
5e3f878a
PB
7834
7835/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7836static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7837{
39d5492a 7838 TCGv_i32 tmp;
7d1b0095 7839 tmp = tcg_temp_new_i32();
ecc7b3aa 7840 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 7841 store_reg(s, rlow, tmp);
7d1b0095 7842 tmp = tcg_temp_new_i32();
5e3f878a 7843 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 7844 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
7845 store_reg(s, rhigh, tmp);
7846}
7847
7848/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7849static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7850{
a7812ae4 7851 TCGv_i64 tmp;
39d5492a 7852 TCGv_i32 tmp2;
5e3f878a 7853
36aa55dc 7854 /* Load value and extend to 64 bits. */
a7812ae4 7855 tmp = tcg_temp_new_i64();
5e3f878a
PB
7856 tmp2 = load_reg(s, rlow);
7857 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7858 tcg_temp_free_i32(tmp2);
5e3f878a 7859 tcg_gen_add_i64(val, val, tmp);
b75263d6 7860 tcg_temp_free_i64(tmp);
5e3f878a
PB
7861}
7862
7863/* load and add a 64-bit value from a register pair. */
a7812ae4 7864static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7865{
a7812ae4 7866 TCGv_i64 tmp;
39d5492a
PM
7867 TCGv_i32 tmpl;
7868 TCGv_i32 tmph;
5e3f878a
PB
7869
7870 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7871 tmpl = load_reg(s, rlow);
7872 tmph = load_reg(s, rhigh);
a7812ae4 7873 tmp = tcg_temp_new_i64();
36aa55dc 7874 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7875 tcg_temp_free_i32(tmpl);
7876 tcg_temp_free_i32(tmph);
5e3f878a 7877 tcg_gen_add_i64(val, val, tmp);
b75263d6 7878 tcg_temp_free_i64(tmp);
5e3f878a
PB
7879}
7880
c9f10124 7881/* Set N and Z flags from hi|lo. */
39d5492a 7882static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7883{
c9f10124
RH
7884 tcg_gen_mov_i32(cpu_NF, hi);
7885 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7886}
7887
426f5abc
PB
7888/* Load/Store exclusive instructions are implemented by remembering
7889 the value/address loaded, and seeing if these are the same
354161b3 7890 when the store is performed. This should be sufficient to implement
426f5abc 7891 the architecturally mandated semantics, and avoids having to monitor
354161b3
EC
7892 regular stores. The compare vs the remembered value is done during
7893 the cmpxchg operation, but we must compare the addresses manually. */
426f5abc 7894static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7895 TCGv_i32 addr, int size)
426f5abc 7896{
94ee24e7 7897 TCGv_i32 tmp = tcg_temp_new_i32();
354161b3 7898 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc 7899
50225ad0
PM
7900 s->is_ldex = true;
7901
426f5abc 7902 if (size == 3) {
39d5492a 7903 TCGv_i32 tmp2 = tcg_temp_new_i32();
354161b3 7904 TCGv_i64 t64 = tcg_temp_new_i64();
03d05e2d 7905
3448d47b
PM
7906 /* For AArch32, architecturally the 32-bit word at the lowest
7907 * address is always Rt and the one at addr+4 is Rt2, even if
7908 * the CPU is big-endian. That means we don't want to do a
7909 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
7910 * for an architecturally 64-bit access, but instead do a
7911 * 64-bit access using MO_BE if appropriate and then split
7912 * the two halves.
7913 * This only makes a difference for BE32 user-mode, where
7914 * frob64() must not flip the two halves of the 64-bit data
7915 * but this code must treat BE32 user-mode like BE32 system.
7916 */
7917 TCGv taddr = gen_aa32_addr(s, addr, opc);
7918
7919 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
7920 tcg_temp_free(taddr);
354161b3 7921 tcg_gen_mov_i64(cpu_exclusive_val, t64);
3448d47b
PM
7922 if (s->be_data == MO_BE) {
7923 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
7924 } else {
7925 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
7926 }
354161b3
EC
7927 tcg_temp_free_i64(t64);
7928
7929 store_reg(s, rt2, tmp2);
03d05e2d 7930 } else {
354161b3 7931 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
03d05e2d 7932 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7933 }
03d05e2d
PM
7934
7935 store_reg(s, rt, tmp);
7936 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7937}
7938
7939static void gen_clrex(DisasContext *s)
7940{
03d05e2d 7941 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7942}
7943
426f5abc 7944static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7945 TCGv_i32 addr, int size)
426f5abc 7946{
354161b3
EC
7947 TCGv_i32 t0, t1, t2;
7948 TCGv_i64 extaddr;
7949 TCGv taddr;
42a268c2
RH
7950 TCGLabel *done_label;
7951 TCGLabel *fail_label;
354161b3 7952 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc
PB
7953
7954 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7955 [addr] = {Rt};
7956 {Rd} = 0;
7957 } else {
7958 {Rd} = 1;
7959 } */
7960 fail_label = gen_new_label();
7961 done_label = gen_new_label();
03d05e2d
PM
7962 extaddr = tcg_temp_new_i64();
7963 tcg_gen_extu_i32_i64(extaddr, addr);
7964 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7965 tcg_temp_free_i64(extaddr);
7966
354161b3
EC
7967 taddr = gen_aa32_addr(s, addr, opc);
7968 t0 = tcg_temp_new_i32();
7969 t1 = load_reg(s, rt);
426f5abc 7970 if (size == 3) {
354161b3
EC
7971 TCGv_i64 o64 = tcg_temp_new_i64();
7972 TCGv_i64 n64 = tcg_temp_new_i64();
03d05e2d 7973
354161b3 7974 t2 = load_reg(s, rt2);
3448d47b
PM
7975 /* For AArch32, architecturally the 32-bit word at the lowest
7976 * address is always Rt and the one at addr+4 is Rt2, even if
7977 * the CPU is big-endian. Since we're going to treat this as a
7978 * single 64-bit BE store, we need to put the two halves in the
7979 * opposite order for BE to LE, so that they end up in the right
7980 * places.
7981 * We don't want gen_aa32_frob64() because that does the wrong
7982 * thing for BE32 usermode.
7983 */
7984 if (s->be_data == MO_BE) {
7985 tcg_gen_concat_i32_i64(n64, t2, t1);
7986 } else {
7987 tcg_gen_concat_i32_i64(n64, t1, t2);
7988 }
354161b3 7989 tcg_temp_free_i32(t2);
03d05e2d 7990
354161b3
EC
7991 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
7992 get_mem_index(s), opc);
7993 tcg_temp_free_i64(n64);
7994
354161b3
EC
7995 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
7996 tcg_gen_extrl_i64_i32(t0, o64);
7997
7998 tcg_temp_free_i64(o64);
7999 } else {
8000 t2 = tcg_temp_new_i32();
8001 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
8002 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
8003 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
8004 tcg_temp_free_i32(t2);
426f5abc 8005 }
354161b3
EC
8006 tcg_temp_free_i32(t1);
8007 tcg_temp_free(taddr);
8008 tcg_gen_mov_i32(cpu_R[rd], t0);
8009 tcg_temp_free_i32(t0);
426f5abc 8010 tcg_gen_br(done_label);
354161b3 8011
426f5abc
PB
8012 gen_set_label(fail_label);
8013 tcg_gen_movi_i32(cpu_R[rd], 1);
8014 gen_set_label(done_label);
03d05e2d 8015 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc 8016}
426f5abc 8017
81465888
PM
8018/* gen_srs:
8019 * @env: CPUARMState
8020 * @s: DisasContext
8021 * @mode: mode field from insn (which stack to store to)
8022 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
8023 * @writeback: true if writeback bit set
8024 *
8025 * Generate code for the SRS (Store Return State) insn.
8026 */
8027static void gen_srs(DisasContext *s,
8028 uint32_t mode, uint32_t amode, bool writeback)
8029{
8030 int32_t offset;
cbc0326b
PM
8031 TCGv_i32 addr, tmp;
8032 bool undef = false;
8033
8034 /* SRS is:
8035 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 8036 * and specified mode is monitor mode
cbc0326b
PM
8037 * - UNDEFINED in Hyp mode
8038 * - UNPREDICTABLE in User or System mode
8039 * - UNPREDICTABLE if the specified mode is:
8040 * -- not implemented
8041 * -- not a valid mode number
8042 * -- a mode that's at a higher exception level
8043 * -- Monitor, if we are Non-secure
f01377f5 8044 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 8045 */
ba63cf47 8046 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
8047 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
8048 return;
8049 }
8050
8051 if (s->current_el == 0 || s->current_el == 2) {
8052 undef = true;
8053 }
8054
8055 switch (mode) {
8056 case ARM_CPU_MODE_USR:
8057 case ARM_CPU_MODE_FIQ:
8058 case ARM_CPU_MODE_IRQ:
8059 case ARM_CPU_MODE_SVC:
8060 case ARM_CPU_MODE_ABT:
8061 case ARM_CPU_MODE_UND:
8062 case ARM_CPU_MODE_SYS:
8063 break;
8064 case ARM_CPU_MODE_HYP:
8065 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
8066 undef = true;
8067 }
8068 break;
8069 case ARM_CPU_MODE_MON:
8070 /* No need to check specifically for "are we non-secure" because
8071 * we've already made EL0 UNDEF and handled the trap for S-EL1;
8072 * so if this isn't EL3 then we must be non-secure.
8073 */
8074 if (s->current_el != 3) {
8075 undef = true;
8076 }
8077 break;
8078 default:
8079 undef = true;
8080 }
8081
8082 if (undef) {
8083 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
8084 default_exception_el(s));
8085 return;
8086 }
8087
8088 addr = tcg_temp_new_i32();
8089 tmp = tcg_const_i32(mode);
f01377f5
PM
8090 /* get_r13_banked() will raise an exception if called from System mode */
8091 gen_set_condexec(s);
8092 gen_set_pc_im(s, s->pc - 4);
81465888
PM
8093 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8094 tcg_temp_free_i32(tmp);
8095 switch (amode) {
8096 case 0: /* DA */
8097 offset = -4;
8098 break;
8099 case 1: /* IA */
8100 offset = 0;
8101 break;
8102 case 2: /* DB */
8103 offset = -8;
8104 break;
8105 case 3: /* IB */
8106 offset = 4;
8107 break;
8108 default:
8109 abort();
8110 }
8111 tcg_gen_addi_i32(addr, addr, offset);
8112 tmp = load_reg(s, 14);
12dcc321 8113 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8114 tcg_temp_free_i32(tmp);
81465888
PM
8115 tmp = load_cpu_field(spsr);
8116 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 8117 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8118 tcg_temp_free_i32(tmp);
81465888
PM
8119 if (writeback) {
8120 switch (amode) {
8121 case 0:
8122 offset = -8;
8123 break;
8124 case 1:
8125 offset = 4;
8126 break;
8127 case 2:
8128 offset = -4;
8129 break;
8130 case 3:
8131 offset = 0;
8132 break;
8133 default:
8134 abort();
8135 }
8136 tcg_gen_addi_i32(addr, addr, offset);
8137 tmp = tcg_const_i32(mode);
8138 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8139 tcg_temp_free_i32(tmp);
8140 }
8141 tcg_temp_free_i32(addr);
dcba3a8d 8142 s->base.is_jmp = DISAS_UPDATE;
81465888
PM
8143}
8144
f4df2210 8145static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 8146{
f4df2210 8147 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
8148 TCGv_i32 tmp;
8149 TCGv_i32 tmp2;
8150 TCGv_i32 tmp3;
8151 TCGv_i32 addr;
a7812ae4 8152 TCGv_i64 tmp64;
9ee6e8bb 8153
e13886e3
PM
8154 /* M variants do not implement ARM mode; this must raise the INVSTATE
8155 * UsageFault exception.
8156 */
b53d8923 8157 if (arm_dc_feature(s, ARM_FEATURE_M)) {
e13886e3
PM
8158 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
8159 default_exception_el(s));
8160 return;
b53d8923 8161 }
9ee6e8bb
PB
8162 cond = insn >> 28;
8163 if (cond == 0xf){
be5e7a76
DES
8164 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8165 * choose to UNDEF. In ARMv5 and above the space is used
8166 * for miscellaneous unconditional instructions.
8167 */
8168 ARCH(5);
8169
9ee6e8bb
PB
8170 /* Unconditional instructions. */
8171 if (((insn >> 25) & 7) == 1) {
8172 /* NEON Data processing. */
d614a513 8173 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8174 goto illegal_op;
d614a513 8175 }
9ee6e8bb 8176
7dcc1f89 8177 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 8178 goto illegal_op;
7dcc1f89 8179 }
9ee6e8bb
PB
8180 return;
8181 }
8182 if ((insn & 0x0f100000) == 0x04000000) {
8183 /* NEON load/store. */
d614a513 8184 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8185 goto illegal_op;
d614a513 8186 }
9ee6e8bb 8187
7dcc1f89 8188 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 8189 goto illegal_op;
7dcc1f89 8190 }
9ee6e8bb
PB
8191 return;
8192 }
6a57f3eb
WN
8193 if ((insn & 0x0f000e10) == 0x0e000a00) {
8194 /* VFP. */
7dcc1f89 8195 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
8196 goto illegal_op;
8197 }
8198 return;
8199 }
3d185e5d
PM
8200 if (((insn & 0x0f30f000) == 0x0510f000) ||
8201 ((insn & 0x0f30f010) == 0x0710f000)) {
8202 if ((insn & (1 << 22)) == 0) {
8203 /* PLDW; v7MP */
d614a513 8204 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8205 goto illegal_op;
8206 }
8207 }
8208 /* Otherwise PLD; v5TE+ */
be5e7a76 8209 ARCH(5TE);
3d185e5d
PM
8210 return;
8211 }
8212 if (((insn & 0x0f70f000) == 0x0450f000) ||
8213 ((insn & 0x0f70f010) == 0x0650f000)) {
8214 ARCH(7);
8215 return; /* PLI; V7 */
8216 }
8217 if (((insn & 0x0f700000) == 0x04100000) ||
8218 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 8219 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8220 goto illegal_op;
8221 }
8222 return; /* v7MP: Unallocated memory hint: must NOP */
8223 }
8224
8225 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
8226 ARCH(6);
8227 /* setend */
9886ecdf
PB
8228 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8229 gen_helper_setend(cpu_env);
dcba3a8d 8230 s->base.is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
8231 }
8232 return;
8233 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8234 switch ((insn >> 4) & 0xf) {
8235 case 1: /* clrex */
8236 ARCH(6K);
426f5abc 8237 gen_clrex(s);
9ee6e8bb
PB
8238 return;
8239 case 4: /* dsb */
8240 case 5: /* dmb */
9ee6e8bb 8241 ARCH(7);
61e4c432 8242 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 8243 return;
6df99dec
SS
8244 case 6: /* isb */
8245 /* We need to break the TB after this insn to execute
8246 * self-modifying code correctly and also to take
8247 * any pending interrupts immediately.
8248 */
0b609cc1 8249 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 8250 return;
9ee6e8bb
PB
8251 default:
8252 goto illegal_op;
8253 }
8254 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8255 /* srs */
81465888
PM
8256 ARCH(6);
8257 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 8258 return;
ea825eee 8259 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 8260 /* rfe */
c67b6b71 8261 int32_t offset;
9ee6e8bb
PB
8262 if (IS_USER(s))
8263 goto illegal_op;
8264 ARCH(6);
8265 rn = (insn >> 16) & 0xf;
b0109805 8266 addr = load_reg(s, rn);
9ee6e8bb
PB
8267 i = (insn >> 23) & 3;
8268 switch (i) {
b0109805 8269 case 0: offset = -4; break; /* DA */
c67b6b71
FN
8270 case 1: offset = 0; break; /* IA */
8271 case 2: offset = -8; break; /* DB */
b0109805 8272 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
8273 default: abort();
8274 }
8275 if (offset)
b0109805
PB
8276 tcg_gen_addi_i32(addr, addr, offset);
8277 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 8278 tmp = tcg_temp_new_i32();
12dcc321 8279 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 8280 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8281 tmp2 = tcg_temp_new_i32();
12dcc321 8282 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
8283 if (insn & (1 << 21)) {
8284 /* Base writeback. */
8285 switch (i) {
b0109805 8286 case 0: offset = -8; break;
c67b6b71
FN
8287 case 1: offset = 4; break;
8288 case 2: offset = -4; break;
b0109805 8289 case 3: offset = 0; break;
9ee6e8bb
PB
8290 default: abort();
8291 }
8292 if (offset)
b0109805
PB
8293 tcg_gen_addi_i32(addr, addr, offset);
8294 store_reg(s, rn, addr);
8295 } else {
7d1b0095 8296 tcg_temp_free_i32(addr);
9ee6e8bb 8297 }
b0109805 8298 gen_rfe(s, tmp, tmp2);
c67b6b71 8299 return;
9ee6e8bb
PB
8300 } else if ((insn & 0x0e000000) == 0x0a000000) {
8301 /* branch link and change to thumb (blx <offset>) */
8302 int32_t offset;
8303
8304 val = (uint32_t)s->pc;
7d1b0095 8305 tmp = tcg_temp_new_i32();
d9ba4830
PB
8306 tcg_gen_movi_i32(tmp, val);
8307 store_reg(s, 14, tmp);
9ee6e8bb
PB
8308 /* Sign-extend the 24-bit offset */
8309 offset = (((int32_t)insn) << 8) >> 8;
8310 /* offset * 4 + bit24 * 2 + (thumb bit) */
8311 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8312 /* pipeline offset */
8313 val += 4;
be5e7a76 8314 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 8315 gen_bx_im(s, val);
9ee6e8bb
PB
8316 return;
8317 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 8318 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 8319 /* iWMMXt register transfer. */
c0f4af17 8320 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 8321 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 8322 return;
c0f4af17
PM
8323 }
8324 }
9ee6e8bb
PB
8325 }
8326 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8327 /* Coprocessor double register transfer. */
be5e7a76 8328 ARCH(5TE);
9ee6e8bb
PB
8329 } else if ((insn & 0x0f000010) == 0x0e000010) {
8330 /* Additional coprocessor register transfer. */
7997d92f 8331 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
8332 uint32_t mask;
8333 uint32_t val;
8334 /* cps (privileged) */
8335 if (IS_USER(s))
8336 return;
8337 mask = val = 0;
8338 if (insn & (1 << 19)) {
8339 if (insn & (1 << 8))
8340 mask |= CPSR_A;
8341 if (insn & (1 << 7))
8342 mask |= CPSR_I;
8343 if (insn & (1 << 6))
8344 mask |= CPSR_F;
8345 if (insn & (1 << 18))
8346 val |= mask;
8347 }
7997d92f 8348 if (insn & (1 << 17)) {
9ee6e8bb
PB
8349 mask |= CPSR_M;
8350 val |= (insn & 0x1f);
8351 }
8352 if (mask) {
2fbac54b 8353 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
8354 }
8355 return;
8356 }
8357 goto illegal_op;
8358 }
8359 if (cond != 0xe) {
8360 /* if not always execute, we generate a conditional jump to
8361 next instruction */
8362 s->condlabel = gen_new_label();
39fb730a 8363 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8364 s->condjmp = 1;
8365 }
8366 if ((insn & 0x0f900000) == 0x03000000) {
8367 if ((insn & (1 << 21)) == 0) {
8368 ARCH(6T2);
8369 rd = (insn >> 12) & 0xf;
8370 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8371 if ((insn & (1 << 22)) == 0) {
8372 /* MOVW */
7d1b0095 8373 tmp = tcg_temp_new_i32();
5e3f878a 8374 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
8375 } else {
8376 /* MOVT */
5e3f878a 8377 tmp = load_reg(s, rd);
86831435 8378 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8379 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 8380 }
5e3f878a 8381 store_reg(s, rd, tmp);
9ee6e8bb
PB
8382 } else {
8383 if (((insn >> 12) & 0xf) != 0xf)
8384 goto illegal_op;
8385 if (((insn >> 16) & 0xf) == 0) {
8386 gen_nop_hint(s, insn & 0xff);
8387 } else {
8388 /* CPSR = immediate */
8389 val = insn & 0xff;
8390 shift = ((insn >> 8) & 0xf) * 2;
8391 if (shift)
8392 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 8393 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
8394 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8395 i, val)) {
9ee6e8bb 8396 goto illegal_op;
7dcc1f89 8397 }
9ee6e8bb
PB
8398 }
8399 }
8400 } else if ((insn & 0x0f900000) == 0x01000000
8401 && (insn & 0x00000090) != 0x00000090) {
8402 /* miscellaneous instructions */
8403 op1 = (insn >> 21) & 3;
8404 sh = (insn >> 4) & 0xf;
8405 rm = insn & 0xf;
8406 switch (sh) {
8bfd0550
PM
8407 case 0x0: /* MSR, MRS */
8408 if (insn & (1 << 9)) {
8409 /* MSR (banked) and MRS (banked) */
8410 int sysm = extract32(insn, 16, 4) |
8411 (extract32(insn, 8, 1) << 4);
8412 int r = extract32(insn, 22, 1);
8413
8414 if (op1 & 1) {
8415 /* MSR (banked) */
8416 gen_msr_banked(s, r, sysm, rm);
8417 } else {
8418 /* MRS (banked) */
8419 int rd = extract32(insn, 12, 4);
8420
8421 gen_mrs_banked(s, r, sysm, rd);
8422 }
8423 break;
8424 }
8425
8426 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
8427 if (op1 & 1) {
8428 /* PSR = reg */
2fbac54b 8429 tmp = load_reg(s, rm);
9ee6e8bb 8430 i = ((op1 & 2) != 0);
7dcc1f89 8431 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
8432 goto illegal_op;
8433 } else {
8434 /* reg = PSR */
8435 rd = (insn >> 12) & 0xf;
8436 if (op1 & 2) {
8437 if (IS_USER(s))
8438 goto illegal_op;
d9ba4830 8439 tmp = load_cpu_field(spsr);
9ee6e8bb 8440 } else {
7d1b0095 8441 tmp = tcg_temp_new_i32();
9ef39277 8442 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8443 }
d9ba4830 8444 store_reg(s, rd, tmp);
9ee6e8bb
PB
8445 }
8446 break;
8447 case 0x1:
8448 if (op1 == 1) {
8449 /* branch/exchange thumb (bx). */
be5e7a76 8450 ARCH(4T);
d9ba4830
PB
8451 tmp = load_reg(s, rm);
8452 gen_bx(s, tmp);
9ee6e8bb
PB
8453 } else if (op1 == 3) {
8454 /* clz */
be5e7a76 8455 ARCH(5);
9ee6e8bb 8456 rd = (insn >> 12) & 0xf;
1497c961 8457 tmp = load_reg(s, rm);
7539a012 8458 tcg_gen_clzi_i32(tmp, tmp, 32);
1497c961 8459 store_reg(s, rd, tmp);
9ee6e8bb
PB
8460 } else {
8461 goto illegal_op;
8462 }
8463 break;
8464 case 0x2:
8465 if (op1 == 1) {
8466 ARCH(5J); /* bxj */
8467 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8468 tmp = load_reg(s, rm);
8469 gen_bx(s, tmp);
9ee6e8bb
PB
8470 } else {
8471 goto illegal_op;
8472 }
8473 break;
8474 case 0x3:
8475 if (op1 != 1)
8476 goto illegal_op;
8477
be5e7a76 8478 ARCH(5);
9ee6e8bb 8479 /* branch link/exchange thumb (blx) */
d9ba4830 8480 tmp = load_reg(s, rm);
7d1b0095 8481 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
8482 tcg_gen_movi_i32(tmp2, s->pc);
8483 store_reg(s, 14, tmp2);
8484 gen_bx(s, tmp);
9ee6e8bb 8485 break;
eb0ecd5a
WN
8486 case 0x4:
8487 {
8488 /* crc32/crc32c */
8489 uint32_t c = extract32(insn, 8, 4);
8490
8491 /* Check this CPU supports ARMv8 CRC instructions.
8492 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8493 * Bits 8, 10 and 11 should be zero.
8494 */
d614a513 8495 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
eb0ecd5a
WN
8496 (c & 0xd) != 0) {
8497 goto illegal_op;
8498 }
8499
8500 rn = extract32(insn, 16, 4);
8501 rd = extract32(insn, 12, 4);
8502
8503 tmp = load_reg(s, rn);
8504 tmp2 = load_reg(s, rm);
aa633469
PM
8505 if (op1 == 0) {
8506 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8507 } else if (op1 == 1) {
8508 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8509 }
eb0ecd5a
WN
8510 tmp3 = tcg_const_i32(1 << op1);
8511 if (c & 0x2) {
8512 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8513 } else {
8514 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8515 }
8516 tcg_temp_free_i32(tmp2);
8517 tcg_temp_free_i32(tmp3);
8518 store_reg(s, rd, tmp);
8519 break;
8520 }
9ee6e8bb 8521 case 0x5: /* saturating add/subtract */
be5e7a76 8522 ARCH(5TE);
9ee6e8bb
PB
8523 rd = (insn >> 12) & 0xf;
8524 rn = (insn >> 16) & 0xf;
b40d0353 8525 tmp = load_reg(s, rm);
5e3f878a 8526 tmp2 = load_reg(s, rn);
9ee6e8bb 8527 if (op1 & 2)
9ef39277 8528 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 8529 if (op1 & 1)
9ef39277 8530 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8531 else
9ef39277 8532 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8533 tcg_temp_free_i32(tmp2);
5e3f878a 8534 store_reg(s, rd, tmp);
9ee6e8bb 8535 break;
49e14940 8536 case 7:
d4a2dc67
PM
8537 {
8538 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e 8539 switch (op1) {
19a6e31c
PM
8540 case 0:
8541 /* HLT */
8542 gen_hlt(s, imm16);
8543 break;
37e6456e
PM
8544 case 1:
8545 /* bkpt */
8546 ARCH(5);
8547 gen_exception_insn(s, 4, EXCP_BKPT,
73710361
GB
8548 syn_aa32_bkpt(imm16, false),
8549 default_exception_el(s));
37e6456e
PM
8550 break;
8551 case 2:
8552 /* Hypervisor call (v7) */
8553 ARCH(7);
8554 if (IS_USER(s)) {
8555 goto illegal_op;
8556 }
8557 gen_hvc(s, imm16);
8558 break;
8559 case 3:
8560 /* Secure monitor call (v6+) */
8561 ARCH(6K);
8562 if (IS_USER(s)) {
8563 goto illegal_op;
8564 }
8565 gen_smc(s);
8566 break;
8567 default:
19a6e31c 8568 g_assert_not_reached();
49e14940 8569 }
9ee6e8bb 8570 break;
d4a2dc67 8571 }
9ee6e8bb
PB
8572 case 0x8: /* signed multiply */
8573 case 0xa:
8574 case 0xc:
8575 case 0xe:
be5e7a76 8576 ARCH(5TE);
9ee6e8bb
PB
8577 rs = (insn >> 8) & 0xf;
8578 rn = (insn >> 12) & 0xf;
8579 rd = (insn >> 16) & 0xf;
8580 if (op1 == 1) {
8581 /* (32 * 16) >> 16 */
5e3f878a
PB
8582 tmp = load_reg(s, rm);
8583 tmp2 = load_reg(s, rs);
9ee6e8bb 8584 if (sh & 4)
5e3f878a 8585 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8586 else
5e3f878a 8587 gen_sxth(tmp2);
a7812ae4
PB
8588 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8589 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8590 tmp = tcg_temp_new_i32();
ecc7b3aa 8591 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 8592 tcg_temp_free_i64(tmp64);
9ee6e8bb 8593 if ((sh & 2) == 0) {
5e3f878a 8594 tmp2 = load_reg(s, rn);
9ef39277 8595 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8596 tcg_temp_free_i32(tmp2);
9ee6e8bb 8597 }
5e3f878a 8598 store_reg(s, rd, tmp);
9ee6e8bb
PB
8599 } else {
8600 /* 16 * 16 */
5e3f878a
PB
8601 tmp = load_reg(s, rm);
8602 tmp2 = load_reg(s, rs);
8603 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 8604 tcg_temp_free_i32(tmp2);
9ee6e8bb 8605 if (op1 == 2) {
a7812ae4
PB
8606 tmp64 = tcg_temp_new_i64();
8607 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8608 tcg_temp_free_i32(tmp);
a7812ae4
PB
8609 gen_addq(s, tmp64, rn, rd);
8610 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 8611 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8612 } else {
8613 if (op1 == 0) {
5e3f878a 8614 tmp2 = load_reg(s, rn);
9ef39277 8615 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8616 tcg_temp_free_i32(tmp2);
9ee6e8bb 8617 }
5e3f878a 8618 store_reg(s, rd, tmp);
9ee6e8bb
PB
8619 }
8620 }
8621 break;
8622 default:
8623 goto illegal_op;
8624 }
8625 } else if (((insn & 0x0e000000) == 0 &&
8626 (insn & 0x00000090) != 0x90) ||
8627 ((insn & 0x0e000000) == (1 << 25))) {
8628 int set_cc, logic_cc, shiftop;
8629
8630 op1 = (insn >> 21) & 0xf;
8631 set_cc = (insn >> 20) & 1;
8632 logic_cc = table_logic_cc[op1] & set_cc;
8633
8634 /* data processing instruction */
8635 if (insn & (1 << 25)) {
8636 /* immediate operand */
8637 val = insn & 0xff;
8638 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 8639 if (shift) {
9ee6e8bb 8640 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 8641 }
7d1b0095 8642 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
8643 tcg_gen_movi_i32(tmp2, val);
8644 if (logic_cc && shift) {
8645 gen_set_CF_bit31(tmp2);
8646 }
9ee6e8bb
PB
8647 } else {
8648 /* register */
8649 rm = (insn) & 0xf;
e9bb4aa9 8650 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8651 shiftop = (insn >> 5) & 3;
8652 if (!(insn & (1 << 4))) {
8653 shift = (insn >> 7) & 0x1f;
e9bb4aa9 8654 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
8655 } else {
8656 rs = (insn >> 8) & 0xf;
8984bd2e 8657 tmp = load_reg(s, rs);
e9bb4aa9 8658 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
8659 }
8660 }
8661 if (op1 != 0x0f && op1 != 0x0d) {
8662 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
8663 tmp = load_reg(s, rn);
8664 } else {
39d5492a 8665 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
8666 }
8667 rd = (insn >> 12) & 0xf;
8668 switch(op1) {
8669 case 0x00:
e9bb4aa9
JR
8670 tcg_gen_and_i32(tmp, tmp, tmp2);
8671 if (logic_cc) {
8672 gen_logic_CC(tmp);
8673 }
7dcc1f89 8674 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8675 break;
8676 case 0x01:
e9bb4aa9
JR
8677 tcg_gen_xor_i32(tmp, tmp, tmp2);
8678 if (logic_cc) {
8679 gen_logic_CC(tmp);
8680 }
7dcc1f89 8681 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8682 break;
8683 case 0x02:
8684 if (set_cc && rd == 15) {
8685 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 8686 if (IS_USER(s)) {
9ee6e8bb 8687 goto illegal_op;
e9bb4aa9 8688 }
72485ec4 8689 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 8690 gen_exception_return(s, tmp);
9ee6e8bb 8691 } else {
e9bb4aa9 8692 if (set_cc) {
72485ec4 8693 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8694 } else {
8695 tcg_gen_sub_i32(tmp, tmp, tmp2);
8696 }
7dcc1f89 8697 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8698 }
8699 break;
8700 case 0x03:
e9bb4aa9 8701 if (set_cc) {
72485ec4 8702 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8703 } else {
8704 tcg_gen_sub_i32(tmp, tmp2, tmp);
8705 }
7dcc1f89 8706 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8707 break;
8708 case 0x04:
e9bb4aa9 8709 if (set_cc) {
72485ec4 8710 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8711 } else {
8712 tcg_gen_add_i32(tmp, tmp, tmp2);
8713 }
7dcc1f89 8714 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8715 break;
8716 case 0x05:
e9bb4aa9 8717 if (set_cc) {
49b4c31e 8718 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8719 } else {
8720 gen_add_carry(tmp, tmp, tmp2);
8721 }
7dcc1f89 8722 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8723 break;
8724 case 0x06:
e9bb4aa9 8725 if (set_cc) {
2de68a49 8726 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8727 } else {
8728 gen_sub_carry(tmp, tmp, tmp2);
8729 }
7dcc1f89 8730 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8731 break;
8732 case 0x07:
e9bb4aa9 8733 if (set_cc) {
2de68a49 8734 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8735 } else {
8736 gen_sub_carry(tmp, tmp2, tmp);
8737 }
7dcc1f89 8738 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8739 break;
8740 case 0x08:
8741 if (set_cc) {
e9bb4aa9
JR
8742 tcg_gen_and_i32(tmp, tmp, tmp2);
8743 gen_logic_CC(tmp);
9ee6e8bb 8744 }
7d1b0095 8745 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8746 break;
8747 case 0x09:
8748 if (set_cc) {
e9bb4aa9
JR
8749 tcg_gen_xor_i32(tmp, tmp, tmp2);
8750 gen_logic_CC(tmp);
9ee6e8bb 8751 }
7d1b0095 8752 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8753 break;
8754 case 0x0a:
8755 if (set_cc) {
72485ec4 8756 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 8757 }
7d1b0095 8758 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8759 break;
8760 case 0x0b:
8761 if (set_cc) {
72485ec4 8762 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 8763 }
7d1b0095 8764 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8765 break;
8766 case 0x0c:
e9bb4aa9
JR
8767 tcg_gen_or_i32(tmp, tmp, tmp2);
8768 if (logic_cc) {
8769 gen_logic_CC(tmp);
8770 }
7dcc1f89 8771 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8772 break;
8773 case 0x0d:
8774 if (logic_cc && rd == 15) {
8775 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 8776 if (IS_USER(s)) {
9ee6e8bb 8777 goto illegal_op;
e9bb4aa9
JR
8778 }
8779 gen_exception_return(s, tmp2);
9ee6e8bb 8780 } else {
e9bb4aa9
JR
8781 if (logic_cc) {
8782 gen_logic_CC(tmp2);
8783 }
7dcc1f89 8784 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8785 }
8786 break;
8787 case 0x0e:
f669df27 8788 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
8789 if (logic_cc) {
8790 gen_logic_CC(tmp);
8791 }
7dcc1f89 8792 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8793 break;
8794 default:
8795 case 0x0f:
e9bb4aa9
JR
8796 tcg_gen_not_i32(tmp2, tmp2);
8797 if (logic_cc) {
8798 gen_logic_CC(tmp2);
8799 }
7dcc1f89 8800 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8801 break;
8802 }
e9bb4aa9 8803 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 8804 tcg_temp_free_i32(tmp2);
e9bb4aa9 8805 }
9ee6e8bb
PB
8806 } else {
8807 /* other instructions */
8808 op1 = (insn >> 24) & 0xf;
8809 switch(op1) {
8810 case 0x0:
8811 case 0x1:
8812 /* multiplies, extra load/stores */
8813 sh = (insn >> 5) & 3;
8814 if (sh == 0) {
8815 if (op1 == 0x0) {
8816 rd = (insn >> 16) & 0xf;
8817 rn = (insn >> 12) & 0xf;
8818 rs = (insn >> 8) & 0xf;
8819 rm = (insn) & 0xf;
8820 op1 = (insn >> 20) & 0xf;
8821 switch (op1) {
8822 case 0: case 1: case 2: case 3: case 6:
8823 /* 32 bit mul */
5e3f878a
PB
8824 tmp = load_reg(s, rs);
8825 tmp2 = load_reg(s, rm);
8826 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8827 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8828 if (insn & (1 << 22)) {
8829 /* Subtract (mls) */
8830 ARCH(6T2);
5e3f878a
PB
8831 tmp2 = load_reg(s, rn);
8832 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 8833 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8834 } else if (insn & (1 << 21)) {
8835 /* Add */
5e3f878a
PB
8836 tmp2 = load_reg(s, rn);
8837 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8838 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8839 }
8840 if (insn & (1 << 20))
5e3f878a
PB
8841 gen_logic_CC(tmp);
8842 store_reg(s, rd, tmp);
9ee6e8bb 8843 break;
8aac08b1
AJ
8844 case 4:
8845 /* 64 bit mul double accumulate (UMAAL) */
8846 ARCH(6);
8847 tmp = load_reg(s, rs);
8848 tmp2 = load_reg(s, rm);
8849 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8850 gen_addq_lo(s, tmp64, rn);
8851 gen_addq_lo(s, tmp64, rd);
8852 gen_storeq_reg(s, rn, rd, tmp64);
8853 tcg_temp_free_i64(tmp64);
8854 break;
8855 case 8: case 9: case 10: case 11:
8856 case 12: case 13: case 14: case 15:
8857 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
8858 tmp = load_reg(s, rs);
8859 tmp2 = load_reg(s, rm);
8aac08b1 8860 if (insn & (1 << 22)) {
c9f10124 8861 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 8862 } else {
c9f10124 8863 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
8864 }
8865 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
8866 TCGv_i32 al = load_reg(s, rn);
8867 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8868 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8869 tcg_temp_free_i32(al);
8870 tcg_temp_free_i32(ah);
9ee6e8bb 8871 }
8aac08b1 8872 if (insn & (1 << 20)) {
c9f10124 8873 gen_logicq_cc(tmp, tmp2);
8aac08b1 8874 }
c9f10124
RH
8875 store_reg(s, rn, tmp);
8876 store_reg(s, rd, tmp2);
9ee6e8bb 8877 break;
8aac08b1
AJ
8878 default:
8879 goto illegal_op;
9ee6e8bb
PB
8880 }
8881 } else {
8882 rn = (insn >> 16) & 0xf;
8883 rd = (insn >> 12) & 0xf;
8884 if (insn & (1 << 23)) {
8885 /* load/store exclusive */
2359bf80 8886 int op2 = (insn >> 8) & 3;
86753403 8887 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8888
8889 switch (op2) {
8890 case 0: /* lda/stl */
8891 if (op1 == 1) {
8892 goto illegal_op;
8893 }
8894 ARCH(8);
8895 break;
8896 case 1: /* reserved */
8897 goto illegal_op;
8898 case 2: /* ldaex/stlex */
8899 ARCH(8);
8900 break;
8901 case 3: /* ldrex/strex */
8902 if (op1) {
8903 ARCH(6K);
8904 } else {
8905 ARCH(6);
8906 }
8907 break;
8908 }
8909
3174f8e9 8910 addr = tcg_temp_local_new_i32();
98a46317 8911 load_reg_var(s, addr, rn);
2359bf80
MR
8912
8913 /* Since the emulation does not have barriers,
8914 the acquire/release semantics need no special
8915 handling */
8916 if (op2 == 0) {
8917 if (insn & (1 << 20)) {
8918 tmp = tcg_temp_new_i32();
8919 switch (op1) {
8920 case 0: /* lda */
9bb6558a
PM
8921 gen_aa32_ld32u_iss(s, tmp, addr,
8922 get_mem_index(s),
8923 rd | ISSIsAcqRel);
2359bf80
MR
8924 break;
8925 case 2: /* ldab */
9bb6558a
PM
8926 gen_aa32_ld8u_iss(s, tmp, addr,
8927 get_mem_index(s),
8928 rd | ISSIsAcqRel);
2359bf80
MR
8929 break;
8930 case 3: /* ldah */
9bb6558a
PM
8931 gen_aa32_ld16u_iss(s, tmp, addr,
8932 get_mem_index(s),
8933 rd | ISSIsAcqRel);
2359bf80
MR
8934 break;
8935 default:
8936 abort();
8937 }
8938 store_reg(s, rd, tmp);
8939 } else {
8940 rm = insn & 0xf;
8941 tmp = load_reg(s, rm);
8942 switch (op1) {
8943 case 0: /* stl */
9bb6558a
PM
8944 gen_aa32_st32_iss(s, tmp, addr,
8945 get_mem_index(s),
8946 rm | ISSIsAcqRel);
2359bf80
MR
8947 break;
8948 case 2: /* stlb */
9bb6558a
PM
8949 gen_aa32_st8_iss(s, tmp, addr,
8950 get_mem_index(s),
8951 rm | ISSIsAcqRel);
2359bf80
MR
8952 break;
8953 case 3: /* stlh */
9bb6558a
PM
8954 gen_aa32_st16_iss(s, tmp, addr,
8955 get_mem_index(s),
8956 rm | ISSIsAcqRel);
2359bf80
MR
8957 break;
8958 default:
8959 abort();
8960 }
8961 tcg_temp_free_i32(tmp);
8962 }
8963 } else if (insn & (1 << 20)) {
86753403
PB
8964 switch (op1) {
8965 case 0: /* ldrex */
426f5abc 8966 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8967 break;
8968 case 1: /* ldrexd */
426f5abc 8969 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8970 break;
8971 case 2: /* ldrexb */
426f5abc 8972 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8973 break;
8974 case 3: /* ldrexh */
426f5abc 8975 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8976 break;
8977 default:
8978 abort();
8979 }
9ee6e8bb
PB
8980 } else {
8981 rm = insn & 0xf;
86753403
PB
8982 switch (op1) {
8983 case 0: /* strex */
426f5abc 8984 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8985 break;
8986 case 1: /* strexd */
502e64fe 8987 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
8988 break;
8989 case 2: /* strexb */
426f5abc 8990 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
8991 break;
8992 case 3: /* strexh */
426f5abc 8993 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
8994 break;
8995 default:
8996 abort();
8997 }
9ee6e8bb 8998 }
39d5492a 8999 tcg_temp_free_i32(addr);
9ee6e8bb 9000 } else {
cf12bce0
EC
9001 TCGv taddr;
9002 TCGMemOp opc = s->be_data;
9003
9ee6e8bb
PB
9004 /* SWP instruction */
9005 rm = (insn) & 0xf;
9006
9ee6e8bb 9007 if (insn & (1 << 22)) {
cf12bce0 9008 opc |= MO_UB;
9ee6e8bb 9009 } else {
cf12bce0 9010 opc |= MO_UL | MO_ALIGN;
9ee6e8bb 9011 }
cf12bce0
EC
9012
9013 addr = load_reg(s, rn);
9014 taddr = gen_aa32_addr(s, addr, opc);
7d1b0095 9015 tcg_temp_free_i32(addr);
cf12bce0
EC
9016
9017 tmp = load_reg(s, rm);
9018 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
9019 get_mem_index(s), opc);
9020 tcg_temp_free(taddr);
9021 store_reg(s, rd, tmp);
9ee6e8bb
PB
9022 }
9023 }
9024 } else {
9025 int address_offset;
3960c336 9026 bool load = insn & (1 << 20);
63f26fcf
PM
9027 bool wbit = insn & (1 << 21);
9028 bool pbit = insn & (1 << 24);
3960c336 9029 bool doubleword = false;
9bb6558a
PM
9030 ISSInfo issinfo;
9031
9ee6e8bb
PB
9032 /* Misc load/store */
9033 rn = (insn >> 16) & 0xf;
9034 rd = (insn >> 12) & 0xf;
3960c336 9035
9bb6558a
PM
9036 /* ISS not valid if writeback */
9037 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
9038
3960c336
PM
9039 if (!load && (sh & 2)) {
9040 /* doubleword */
9041 ARCH(5TE);
9042 if (rd & 1) {
9043 /* UNPREDICTABLE; we choose to UNDEF */
9044 goto illegal_op;
9045 }
9046 load = (sh & 1) == 0;
9047 doubleword = true;
9048 }
9049
b0109805 9050 addr = load_reg(s, rn);
63f26fcf 9051 if (pbit) {
b0109805 9052 gen_add_datah_offset(s, insn, 0, addr);
63f26fcf 9053 }
9ee6e8bb 9054 address_offset = 0;
3960c336
PM
9055
9056 if (doubleword) {
9057 if (!load) {
9ee6e8bb 9058 /* store */
b0109805 9059 tmp = load_reg(s, rd);
12dcc321 9060 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9061 tcg_temp_free_i32(tmp);
b0109805
PB
9062 tcg_gen_addi_i32(addr, addr, 4);
9063 tmp = load_reg(s, rd + 1);
12dcc321 9064 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9065 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9066 } else {
9067 /* load */
5a839c0d 9068 tmp = tcg_temp_new_i32();
12dcc321 9069 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9070 store_reg(s, rd, tmp);
9071 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 9072 tmp = tcg_temp_new_i32();
12dcc321 9073 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9074 rd++;
9ee6e8bb
PB
9075 }
9076 address_offset = -4;
3960c336
PM
9077 } else if (load) {
9078 /* load */
9079 tmp = tcg_temp_new_i32();
9080 switch (sh) {
9081 case 1:
9bb6558a
PM
9082 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9083 issinfo);
3960c336
PM
9084 break;
9085 case 2:
9bb6558a
PM
9086 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
9087 issinfo);
3960c336
PM
9088 break;
9089 default:
9090 case 3:
9bb6558a
PM
9091 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
9092 issinfo);
3960c336
PM
9093 break;
9094 }
9ee6e8bb
PB
9095 } else {
9096 /* store */
b0109805 9097 tmp = load_reg(s, rd);
9bb6558a 9098 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
5a839c0d 9099 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9100 }
9101 /* Perform base writeback before the loaded value to
9102 ensure correct behavior with overlapping index registers.
b6af0975 9103 ldrd with base writeback is undefined if the
9ee6e8bb 9104 destination and index registers overlap. */
63f26fcf 9105 if (!pbit) {
b0109805
PB
9106 gen_add_datah_offset(s, insn, address_offset, addr);
9107 store_reg(s, rn, addr);
63f26fcf 9108 } else if (wbit) {
9ee6e8bb 9109 if (address_offset)
b0109805
PB
9110 tcg_gen_addi_i32(addr, addr, address_offset);
9111 store_reg(s, rn, addr);
9112 } else {
7d1b0095 9113 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9114 }
9115 if (load) {
9116 /* Complete the load. */
b0109805 9117 store_reg(s, rd, tmp);
9ee6e8bb
PB
9118 }
9119 }
9120 break;
9121 case 0x4:
9122 case 0x5:
9123 goto do_ldst;
9124 case 0x6:
9125 case 0x7:
9126 if (insn & (1 << 4)) {
9127 ARCH(6);
9128 /* Armv6 Media instructions. */
9129 rm = insn & 0xf;
9130 rn = (insn >> 16) & 0xf;
2c0262af 9131 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
9132 rs = (insn >> 8) & 0xf;
9133 switch ((insn >> 23) & 3) {
9134 case 0: /* Parallel add/subtract. */
9135 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
9136 tmp = load_reg(s, rn);
9137 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9138 sh = (insn >> 5) & 7;
9139 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
9140 goto illegal_op;
6ddbc6e4 9141 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 9142 tcg_temp_free_i32(tmp2);
6ddbc6e4 9143 store_reg(s, rd, tmp);
9ee6e8bb
PB
9144 break;
9145 case 1:
9146 if ((insn & 0x00700020) == 0) {
6c95676b 9147 /* Halfword pack. */
3670669c
PB
9148 tmp = load_reg(s, rn);
9149 tmp2 = load_reg(s, rm);
9ee6e8bb 9150 shift = (insn >> 7) & 0x1f;
3670669c
PB
9151 if (insn & (1 << 6)) {
9152 /* pkhtb */
22478e79
AZ
9153 if (shift == 0)
9154 shift = 31;
9155 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 9156 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 9157 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
9158 } else {
9159 /* pkhbt */
22478e79
AZ
9160 if (shift)
9161 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 9162 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
9163 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9164 }
9165 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9166 tcg_temp_free_i32(tmp2);
3670669c 9167 store_reg(s, rd, tmp);
9ee6e8bb
PB
9168 } else if ((insn & 0x00200020) == 0x00200000) {
9169 /* [us]sat */
6ddbc6e4 9170 tmp = load_reg(s, rm);
9ee6e8bb
PB
9171 shift = (insn >> 7) & 0x1f;
9172 if (insn & (1 << 6)) {
9173 if (shift == 0)
9174 shift = 31;
6ddbc6e4 9175 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9176 } else {
6ddbc6e4 9177 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
9178 }
9179 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9180 tmp2 = tcg_const_i32(sh);
9181 if (insn & (1 << 22))
9ef39277 9182 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 9183 else
9ef39277 9184 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 9185 tcg_temp_free_i32(tmp2);
6ddbc6e4 9186 store_reg(s, rd, tmp);
9ee6e8bb
PB
9187 } else if ((insn & 0x00300fe0) == 0x00200f20) {
9188 /* [us]sat16 */
6ddbc6e4 9189 tmp = load_reg(s, rm);
9ee6e8bb 9190 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9191 tmp2 = tcg_const_i32(sh);
9192 if (insn & (1 << 22))
9ef39277 9193 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9194 else
9ef39277 9195 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9196 tcg_temp_free_i32(tmp2);
6ddbc6e4 9197 store_reg(s, rd, tmp);
9ee6e8bb
PB
9198 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
9199 /* Select bytes. */
6ddbc6e4
PB
9200 tmp = load_reg(s, rn);
9201 tmp2 = load_reg(s, rm);
7d1b0095 9202 tmp3 = tcg_temp_new_i32();
0ecb72a5 9203 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 9204 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9205 tcg_temp_free_i32(tmp3);
9206 tcg_temp_free_i32(tmp2);
6ddbc6e4 9207 store_reg(s, rd, tmp);
9ee6e8bb 9208 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 9209 tmp = load_reg(s, rm);
9ee6e8bb 9210 shift = (insn >> 10) & 3;
1301f322 9211 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9212 rotate, a shift is sufficient. */
9213 if (shift != 0)
f669df27 9214 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9215 op1 = (insn >> 20) & 7;
9216 switch (op1) {
5e3f878a
PB
9217 case 0: gen_sxtb16(tmp); break;
9218 case 2: gen_sxtb(tmp); break;
9219 case 3: gen_sxth(tmp); break;
9220 case 4: gen_uxtb16(tmp); break;
9221 case 6: gen_uxtb(tmp); break;
9222 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
9223 default: goto illegal_op;
9224 }
9225 if (rn != 15) {
5e3f878a 9226 tmp2 = load_reg(s, rn);
9ee6e8bb 9227 if ((op1 & 3) == 0) {
5e3f878a 9228 gen_add16(tmp, tmp2);
9ee6e8bb 9229 } else {
5e3f878a 9230 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9231 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9232 }
9233 }
6c95676b 9234 store_reg(s, rd, tmp);
9ee6e8bb
PB
9235 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9236 /* rev */
b0109805 9237 tmp = load_reg(s, rm);
9ee6e8bb
PB
9238 if (insn & (1 << 22)) {
9239 if (insn & (1 << 7)) {
b0109805 9240 gen_revsh(tmp);
9ee6e8bb
PB
9241 } else {
9242 ARCH(6T2);
b0109805 9243 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9244 }
9245 } else {
9246 if (insn & (1 << 7))
b0109805 9247 gen_rev16(tmp);
9ee6e8bb 9248 else
66896cb8 9249 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 9250 }
b0109805 9251 store_reg(s, rd, tmp);
9ee6e8bb
PB
9252 } else {
9253 goto illegal_op;
9254 }
9255 break;
9256 case 2: /* Multiplies (Type 3). */
41e9564d
PM
9257 switch ((insn >> 20) & 0x7) {
9258 case 5:
9259 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9260 /* op2 not 00x or 11x : UNDEF */
9261 goto illegal_op;
9262 }
838fa72d
AJ
9263 /* Signed multiply most significant [accumulate].
9264 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
9265 tmp = load_reg(s, rm);
9266 tmp2 = load_reg(s, rs);
a7812ae4 9267 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 9268
955a7dd5 9269 if (rd != 15) {
838fa72d 9270 tmp = load_reg(s, rd);
9ee6e8bb 9271 if (insn & (1 << 6)) {
838fa72d 9272 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 9273 } else {
838fa72d 9274 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
9275 }
9276 }
838fa72d
AJ
9277 if (insn & (1 << 5)) {
9278 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9279 }
9280 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9281 tmp = tcg_temp_new_i32();
ecc7b3aa 9282 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 9283 tcg_temp_free_i64(tmp64);
955a7dd5 9284 store_reg(s, rn, tmp);
41e9564d
PM
9285 break;
9286 case 0:
9287 case 4:
9288 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9289 if (insn & (1 << 7)) {
9290 goto illegal_op;
9291 }
9292 tmp = load_reg(s, rm);
9293 tmp2 = load_reg(s, rs);
9ee6e8bb 9294 if (insn & (1 << 5))
5e3f878a
PB
9295 gen_swap_half(tmp2);
9296 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9297 if (insn & (1 << 22)) {
5e3f878a 9298 /* smlald, smlsld */
33bbd75a
PC
9299 TCGv_i64 tmp64_2;
9300
a7812ae4 9301 tmp64 = tcg_temp_new_i64();
33bbd75a 9302 tmp64_2 = tcg_temp_new_i64();
a7812ae4 9303 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 9304 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 9305 tcg_temp_free_i32(tmp);
33bbd75a
PC
9306 tcg_temp_free_i32(tmp2);
9307 if (insn & (1 << 6)) {
9308 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9309 } else {
9310 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9311 }
9312 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
9313 gen_addq(s, tmp64, rd, rn);
9314 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 9315 tcg_temp_free_i64(tmp64);
9ee6e8bb 9316 } else {
5e3f878a 9317 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
9318 if (insn & (1 << 6)) {
9319 /* This subtraction cannot overflow. */
9320 tcg_gen_sub_i32(tmp, tmp, tmp2);
9321 } else {
9322 /* This addition cannot overflow 32 bits;
9323 * however it may overflow considered as a
9324 * signed operation, in which case we must set
9325 * the Q flag.
9326 */
9327 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9328 }
9329 tcg_temp_free_i32(tmp2);
22478e79 9330 if (rd != 15)
9ee6e8bb 9331 {
22478e79 9332 tmp2 = load_reg(s, rd);
9ef39277 9333 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9334 tcg_temp_free_i32(tmp2);
9ee6e8bb 9335 }
22478e79 9336 store_reg(s, rn, tmp);
9ee6e8bb 9337 }
41e9564d 9338 break;
b8b8ea05
PM
9339 case 1:
9340 case 3:
9341 /* SDIV, UDIV */
d614a513 9342 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
b8b8ea05
PM
9343 goto illegal_op;
9344 }
9345 if (((insn >> 5) & 7) || (rd != 15)) {
9346 goto illegal_op;
9347 }
9348 tmp = load_reg(s, rm);
9349 tmp2 = load_reg(s, rs);
9350 if (insn & (1 << 21)) {
9351 gen_helper_udiv(tmp, tmp, tmp2);
9352 } else {
9353 gen_helper_sdiv(tmp, tmp, tmp2);
9354 }
9355 tcg_temp_free_i32(tmp2);
9356 store_reg(s, rn, tmp);
9357 break;
41e9564d
PM
9358 default:
9359 goto illegal_op;
9ee6e8bb
PB
9360 }
9361 break;
9362 case 3:
9363 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9364 switch (op1) {
9365 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
9366 ARCH(6);
9367 tmp = load_reg(s, rm);
9368 tmp2 = load_reg(s, rs);
9369 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9370 tcg_temp_free_i32(tmp2);
ded9d295
AZ
9371 if (rd != 15) {
9372 tmp2 = load_reg(s, rd);
6ddbc6e4 9373 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9374 tcg_temp_free_i32(tmp2);
9ee6e8bb 9375 }
ded9d295 9376 store_reg(s, rn, tmp);
9ee6e8bb
PB
9377 break;
9378 case 0x20: case 0x24: case 0x28: case 0x2c:
9379 /* Bitfield insert/clear. */
9380 ARCH(6T2);
9381 shift = (insn >> 7) & 0x1f;
9382 i = (insn >> 16) & 0x1f;
45140a57
KB
9383 if (i < shift) {
9384 /* UNPREDICTABLE; we choose to UNDEF */
9385 goto illegal_op;
9386 }
9ee6e8bb
PB
9387 i = i + 1 - shift;
9388 if (rm == 15) {
7d1b0095 9389 tmp = tcg_temp_new_i32();
5e3f878a 9390 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 9391 } else {
5e3f878a 9392 tmp = load_reg(s, rm);
9ee6e8bb
PB
9393 }
9394 if (i != 32) {
5e3f878a 9395 tmp2 = load_reg(s, rd);
d593c48e 9396 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 9397 tcg_temp_free_i32(tmp2);
9ee6e8bb 9398 }
5e3f878a 9399 store_reg(s, rd, tmp);
9ee6e8bb
PB
9400 break;
9401 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9402 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 9403 ARCH(6T2);
5e3f878a 9404 tmp = load_reg(s, rm);
9ee6e8bb
PB
9405 shift = (insn >> 7) & 0x1f;
9406 i = ((insn >> 16) & 0x1f) + 1;
9407 if (shift + i > 32)
9408 goto illegal_op;
9409 if (i < 32) {
9410 if (op1 & 0x20) {
59a71b4c 9411 tcg_gen_extract_i32(tmp, tmp, shift, i);
9ee6e8bb 9412 } else {
59a71b4c 9413 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9ee6e8bb
PB
9414 }
9415 }
5e3f878a 9416 store_reg(s, rd, tmp);
9ee6e8bb
PB
9417 break;
9418 default:
9419 goto illegal_op;
9420 }
9421 break;
9422 }
9423 break;
9424 }
9425 do_ldst:
9426 /* Check for undefined extension instructions
9427 * per the ARM Bible IE:
9428 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9429 */
9430 sh = (0xf << 20) | (0xf << 4);
9431 if (op1 == 0x7 && ((insn & sh) == sh))
9432 {
9433 goto illegal_op;
9434 }
9435 /* load/store byte/word */
9436 rn = (insn >> 16) & 0xf;
9437 rd = (insn >> 12) & 0xf;
b0109805 9438 tmp2 = load_reg(s, rn);
a99caa48
PM
9439 if ((insn & 0x01200000) == 0x00200000) {
9440 /* ldrt/strt */
579d21cc 9441 i = get_a32_user_mem_index(s);
a99caa48
PM
9442 } else {
9443 i = get_mem_index(s);
9444 }
9ee6e8bb 9445 if (insn & (1 << 24))
b0109805 9446 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
9447 if (insn & (1 << 20)) {
9448 /* load */
5a839c0d 9449 tmp = tcg_temp_new_i32();
9ee6e8bb 9450 if (insn & (1 << 22)) {
9bb6558a 9451 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9452 } else {
9bb6558a 9453 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9454 }
9ee6e8bb
PB
9455 } else {
9456 /* store */
b0109805 9457 tmp = load_reg(s, rd);
5a839c0d 9458 if (insn & (1 << 22)) {
9bb6558a 9459 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
5a839c0d 9460 } else {
9bb6558a 9461 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
5a839c0d
PM
9462 }
9463 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9464 }
9465 if (!(insn & (1 << 24))) {
b0109805
PB
9466 gen_add_data_offset(s, insn, tmp2);
9467 store_reg(s, rn, tmp2);
9468 } else if (insn & (1 << 21)) {
9469 store_reg(s, rn, tmp2);
9470 } else {
7d1b0095 9471 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9472 }
9473 if (insn & (1 << 20)) {
9474 /* Complete the load. */
7dcc1f89 9475 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
9476 }
9477 break;
9478 case 0x08:
9479 case 0x09:
9480 {
da3e53dd
PM
9481 int j, n, loaded_base;
9482 bool exc_return = false;
9483 bool is_load = extract32(insn, 20, 1);
9484 bool user = false;
39d5492a 9485 TCGv_i32 loaded_var;
9ee6e8bb
PB
9486 /* load/store multiple words */
9487 /* XXX: store correct base if write back */
9ee6e8bb 9488 if (insn & (1 << 22)) {
da3e53dd 9489 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
9490 if (IS_USER(s))
9491 goto illegal_op; /* only usable in supervisor mode */
9492
da3e53dd
PM
9493 if (is_load && extract32(insn, 15, 1)) {
9494 exc_return = true;
9495 } else {
9496 user = true;
9497 }
9ee6e8bb
PB
9498 }
9499 rn = (insn >> 16) & 0xf;
b0109805 9500 addr = load_reg(s, rn);
9ee6e8bb
PB
9501
9502 /* compute total size */
9503 loaded_base = 0;
39d5492a 9504 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9505 n = 0;
9506 for(i=0;i<16;i++) {
9507 if (insn & (1 << i))
9508 n++;
9509 }
9510 /* XXX: test invalid n == 0 case ? */
9511 if (insn & (1 << 23)) {
9512 if (insn & (1 << 24)) {
9513 /* pre increment */
b0109805 9514 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9515 } else {
9516 /* post increment */
9517 }
9518 } else {
9519 if (insn & (1 << 24)) {
9520 /* pre decrement */
b0109805 9521 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9522 } else {
9523 /* post decrement */
9524 if (n != 1)
b0109805 9525 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9526 }
9527 }
9528 j = 0;
9529 for(i=0;i<16;i++) {
9530 if (insn & (1 << i)) {
da3e53dd 9531 if (is_load) {
9ee6e8bb 9532 /* load */
5a839c0d 9533 tmp = tcg_temp_new_i32();
12dcc321 9534 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 9535 if (user) {
b75263d6 9536 tmp2 = tcg_const_i32(i);
1ce94f81 9537 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 9538 tcg_temp_free_i32(tmp2);
7d1b0095 9539 tcg_temp_free_i32(tmp);
9ee6e8bb 9540 } else if (i == rn) {
b0109805 9541 loaded_var = tmp;
9ee6e8bb 9542 loaded_base = 1;
fb0e8e79
PM
9543 } else if (rn == 15 && exc_return) {
9544 store_pc_exc_ret(s, tmp);
9ee6e8bb 9545 } else {
7dcc1f89 9546 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
9547 }
9548 } else {
9549 /* store */
9550 if (i == 15) {
9551 /* special case: r15 = PC + 8 */
9552 val = (long)s->pc + 4;
7d1b0095 9553 tmp = tcg_temp_new_i32();
b0109805 9554 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 9555 } else if (user) {
7d1b0095 9556 tmp = tcg_temp_new_i32();
b75263d6 9557 tmp2 = tcg_const_i32(i);
9ef39277 9558 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 9559 tcg_temp_free_i32(tmp2);
9ee6e8bb 9560 } else {
b0109805 9561 tmp = load_reg(s, i);
9ee6e8bb 9562 }
12dcc321 9563 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9564 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9565 }
9566 j++;
9567 /* no need to add after the last transfer */
9568 if (j != n)
b0109805 9569 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9570 }
9571 }
9572 if (insn & (1 << 21)) {
9573 /* write back */
9574 if (insn & (1 << 23)) {
9575 if (insn & (1 << 24)) {
9576 /* pre increment */
9577 } else {
9578 /* post increment */
b0109805 9579 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9580 }
9581 } else {
9582 if (insn & (1 << 24)) {
9583 /* pre decrement */
9584 if (n != 1)
b0109805 9585 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9586 } else {
9587 /* post decrement */
b0109805 9588 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9589 }
9590 }
b0109805
PB
9591 store_reg(s, rn, addr);
9592 } else {
7d1b0095 9593 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9594 }
9595 if (loaded_base) {
b0109805 9596 store_reg(s, rn, loaded_var);
9ee6e8bb 9597 }
da3e53dd 9598 if (exc_return) {
9ee6e8bb 9599 /* Restore CPSR from SPSR. */
d9ba4830 9600 tmp = load_cpu_field(spsr);
235ea1f5 9601 gen_helper_cpsr_write_eret(cpu_env, tmp);
7d1b0095 9602 tcg_temp_free_i32(tmp);
b29fd33d 9603 /* Must exit loop to check un-masked IRQs */
dcba3a8d 9604 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb
PB
9605 }
9606 }
9607 break;
9608 case 0xa:
9609 case 0xb:
9610 {
9611 int32_t offset;
9612
9613 /* branch (and link) */
9614 val = (int32_t)s->pc;
9615 if (insn & (1 << 24)) {
7d1b0095 9616 tmp = tcg_temp_new_i32();
5e3f878a
PB
9617 tcg_gen_movi_i32(tmp, val);
9618 store_reg(s, 14, tmp);
9ee6e8bb 9619 }
534df156
PM
9620 offset = sextract32(insn << 2, 0, 26);
9621 val += offset + 4;
9ee6e8bb
PB
9622 gen_jmp(s, val);
9623 }
9624 break;
9625 case 0xc:
9626 case 0xd:
9627 case 0xe:
6a57f3eb
WN
9628 if (((insn >> 8) & 0xe) == 10) {
9629 /* VFP. */
7dcc1f89 9630 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9631 goto illegal_op;
9632 }
7dcc1f89 9633 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 9634 /* Coprocessor. */
9ee6e8bb 9635 goto illegal_op;
6a57f3eb 9636 }
9ee6e8bb
PB
9637 break;
9638 case 0xf:
9639 /* swi */
eaed129d 9640 gen_set_pc_im(s, s->pc);
d4a2dc67 9641 s->svc_imm = extract32(insn, 0, 24);
dcba3a8d 9642 s->base.is_jmp = DISAS_SWI;
9ee6e8bb
PB
9643 break;
9644 default:
9645 illegal_op:
73710361
GB
9646 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9647 default_exception_el(s));
9ee6e8bb
PB
9648 break;
9649 }
9650 }
9651}
9652
296e5a0a
PM
9653static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
9654{
9655 /* Return true if this is a 16 bit instruction. We must be precise
9656 * about this (matching the decode). We assume that s->pc still
9657 * points to the first 16 bits of the insn.
9658 */
9659 if ((insn >> 11) < 0x1d) {
9660 /* Definitely a 16-bit instruction */
9661 return true;
9662 }
9663
9664 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
9665 * first half of a 32-bit Thumb insn. Thumb-1 cores might
9666 * end up actually treating this as two 16-bit insns, though,
9667 * if it's half of a bl/blx pair that might span a page boundary.
9668 */
9669 if (arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
9670 /* Thumb2 cores (including all M profile ones) always treat
9671 * 32-bit insns as 32-bit.
9672 */
9673 return false;
9674 }
9675
9676 if ((insn >> 11) == 0x1e && (s->pc < s->next_page_start - 3)) {
9677 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
9678 * is not on the next page; we merge this into a 32-bit
9679 * insn.
9680 */
9681 return false;
9682 }
9683 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
9684 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
9685 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
9686 * -- handle as single 16 bit insn
9687 */
9688 return true;
9689}
9690
9ee6e8bb
PB
9691/* Return true if this is a Thumb-2 logical op. */
9692static int
9693thumb2_logic_op(int op)
9694{
9695 return (op < 8);
9696}
9697
9698/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9699 then set condition code flags based on the result of the operation.
9700 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9701 to the high bit of T1.
9702 Returns zero if the opcode is valid. */
9703
9704static int
39d5492a
PM
9705gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9706 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
9707{
9708 int logic_cc;
9709
9710 logic_cc = 0;
9711 switch (op) {
9712 case 0: /* and */
396e467c 9713 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
9714 logic_cc = conds;
9715 break;
9716 case 1: /* bic */
f669df27 9717 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
9718 logic_cc = conds;
9719 break;
9720 case 2: /* orr */
396e467c 9721 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
9722 logic_cc = conds;
9723 break;
9724 case 3: /* orn */
29501f1b 9725 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
9726 logic_cc = conds;
9727 break;
9728 case 4: /* eor */
396e467c 9729 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
9730 logic_cc = conds;
9731 break;
9732 case 8: /* add */
9733 if (conds)
72485ec4 9734 gen_add_CC(t0, t0, t1);
9ee6e8bb 9735 else
396e467c 9736 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
9737 break;
9738 case 10: /* adc */
9739 if (conds)
49b4c31e 9740 gen_adc_CC(t0, t0, t1);
9ee6e8bb 9741 else
396e467c 9742 gen_adc(t0, t1);
9ee6e8bb
PB
9743 break;
9744 case 11: /* sbc */
2de68a49
RH
9745 if (conds) {
9746 gen_sbc_CC(t0, t0, t1);
9747 } else {
396e467c 9748 gen_sub_carry(t0, t0, t1);
2de68a49 9749 }
9ee6e8bb
PB
9750 break;
9751 case 13: /* sub */
9752 if (conds)
72485ec4 9753 gen_sub_CC(t0, t0, t1);
9ee6e8bb 9754 else
396e467c 9755 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
9756 break;
9757 case 14: /* rsb */
9758 if (conds)
72485ec4 9759 gen_sub_CC(t0, t1, t0);
9ee6e8bb 9760 else
396e467c 9761 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
9762 break;
9763 default: /* 5, 6, 7, 9, 12, 15. */
9764 return 1;
9765 }
9766 if (logic_cc) {
396e467c 9767 gen_logic_CC(t0);
9ee6e8bb 9768 if (shifter_out)
396e467c 9769 gen_set_CF_bit31(t1);
9ee6e8bb
PB
9770 }
9771 return 0;
9772}
9773
9774/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9775 is not legal. */
296e5a0a 9776static int disas_thumb2_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 9777{
296e5a0a 9778 uint32_t imm, shift, offset;
9ee6e8bb 9779 uint32_t rd, rn, rm, rs;
39d5492a
PM
9780 TCGv_i32 tmp;
9781 TCGv_i32 tmp2;
9782 TCGv_i32 tmp3;
9783 TCGv_i32 addr;
a7812ae4 9784 TCGv_i64 tmp64;
9ee6e8bb
PB
9785 int op;
9786 int shiftop;
9787 int conds;
9788 int logic_cc;
9789
296e5a0a
PM
9790 /* The only 32 bit insn that's allowed for Thumb1 is the combined
9791 * BL/BLX prefix and suffix.
9792 */
9ee6e8bb
PB
9793 if ((insn & 0xf800e800) != 0xf000e800) {
9794 ARCH(6T2);
9795 }
9796
9797 rn = (insn >> 16) & 0xf;
9798 rs = (insn >> 12) & 0xf;
9799 rd = (insn >> 8) & 0xf;
9800 rm = insn & 0xf;
9801 switch ((insn >> 25) & 0xf) {
9802 case 0: case 1: case 2: case 3:
9803 /* 16-bit instructions. Should never happen. */
9804 abort();
9805 case 4:
9806 if (insn & (1 << 22)) {
ebfe27c5
PM
9807 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
9808 * - load/store doubleword, load/store exclusive, ldacq/strel,
9809 * table branch.
9810 */
76eff04d
PM
9811 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
9812 arm_dc_feature(s, ARM_FEATURE_V8)) {
9813 /* 0b1110_1001_0111_1111_1110_1001_0111_111
9814 * - SG (v8M only)
9815 * The bulk of the behaviour for this instruction is implemented
9816 * in v7m_handle_execute_nsc(), which deals with the insn when
9817 * it is executed by a CPU in non-secure state from memory
9818 * which is Secure & NonSecure-Callable.
9819 * Here we only need to handle the remaining cases:
9820 * * in NS memory (including the "security extension not
9821 * implemented" case) : NOP
9822 * * in S memory but CPU already secure (clear IT bits)
9823 * We know that the attribute for the memory this insn is
9824 * in must match the current CPU state, because otherwise
9825 * get_phys_addr_pmsav8 would have generated an exception.
9826 */
9827 if (s->v8m_secure) {
9828 /* Like the IT insn, we don't need to generate any code */
9829 s->condexec_cond = 0;
9830 s->condexec_mask = 0;
9831 }
9832 } else if (insn & 0x01200000) {
ebfe27c5
PM
9833 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9834 * - load/store dual (post-indexed)
9835 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
9836 * - load/store dual (literal and immediate)
9837 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9838 * - load/store dual (pre-indexed)
9839 */
9ee6e8bb 9840 if (rn == 15) {
ebfe27c5
PM
9841 if (insn & (1 << 21)) {
9842 /* UNPREDICTABLE */
9843 goto illegal_op;
9844 }
7d1b0095 9845 addr = tcg_temp_new_i32();
b0109805 9846 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 9847 } else {
b0109805 9848 addr = load_reg(s, rn);
9ee6e8bb
PB
9849 }
9850 offset = (insn & 0xff) * 4;
9851 if ((insn & (1 << 23)) == 0)
9852 offset = -offset;
9853 if (insn & (1 << 24)) {
b0109805 9854 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
9855 offset = 0;
9856 }
9857 if (insn & (1 << 20)) {
9858 /* ldrd */
e2592fad 9859 tmp = tcg_temp_new_i32();
12dcc321 9860 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9861 store_reg(s, rs, tmp);
9862 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9863 tmp = tcg_temp_new_i32();
12dcc321 9864 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9865 store_reg(s, rd, tmp);
9ee6e8bb
PB
9866 } else {
9867 /* strd */
b0109805 9868 tmp = load_reg(s, rs);
12dcc321 9869 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9870 tcg_temp_free_i32(tmp);
b0109805
PB
9871 tcg_gen_addi_i32(addr, addr, 4);
9872 tmp = load_reg(s, rd);
12dcc321 9873 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9874 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9875 }
9876 if (insn & (1 << 21)) {
9877 /* Base writeback. */
b0109805
PB
9878 tcg_gen_addi_i32(addr, addr, offset - 4);
9879 store_reg(s, rn, addr);
9880 } else {
7d1b0095 9881 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9882 }
9883 } else if ((insn & (1 << 23)) == 0) {
ebfe27c5
PM
9884 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
9885 * - load/store exclusive word
9886 */
9887 if (rs == 15) {
9888 goto illegal_op;
9889 }
39d5492a 9890 addr = tcg_temp_local_new_i32();
98a46317 9891 load_reg_var(s, addr, rn);
426f5abc 9892 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 9893 if (insn & (1 << 20)) {
426f5abc 9894 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 9895 } else {
426f5abc 9896 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 9897 }
39d5492a 9898 tcg_temp_free_i32(addr);
2359bf80 9899 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
9900 /* Table Branch. */
9901 if (rn == 15) {
7d1b0095 9902 addr = tcg_temp_new_i32();
b0109805 9903 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 9904 } else {
b0109805 9905 addr = load_reg(s, rn);
9ee6e8bb 9906 }
b26eefb6 9907 tmp = load_reg(s, rm);
b0109805 9908 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
9909 if (insn & (1 << 4)) {
9910 /* tbh */
b0109805 9911 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9912 tcg_temp_free_i32(tmp);
e2592fad 9913 tmp = tcg_temp_new_i32();
12dcc321 9914 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9915 } else { /* tbb */
7d1b0095 9916 tcg_temp_free_i32(tmp);
e2592fad 9917 tmp = tcg_temp_new_i32();
12dcc321 9918 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9919 }
7d1b0095 9920 tcg_temp_free_i32(addr);
b0109805
PB
9921 tcg_gen_shli_i32(tmp, tmp, 1);
9922 tcg_gen_addi_i32(tmp, tmp, s->pc);
9923 store_reg(s, 15, tmp);
9ee6e8bb 9924 } else {
2359bf80 9925 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 9926 op = (insn >> 4) & 0x3;
2359bf80
MR
9927 switch (op2) {
9928 case 0:
426f5abc 9929 goto illegal_op;
2359bf80
MR
9930 case 1:
9931 /* Load/store exclusive byte/halfword/doubleword */
9932 if (op == 2) {
9933 goto illegal_op;
9934 }
9935 ARCH(7);
9936 break;
9937 case 2:
9938 /* Load-acquire/store-release */
9939 if (op == 3) {
9940 goto illegal_op;
9941 }
9942 /* Fall through */
9943 case 3:
9944 /* Load-acquire/store-release exclusive */
9945 ARCH(8);
9946 break;
426f5abc 9947 }
39d5492a 9948 addr = tcg_temp_local_new_i32();
98a46317 9949 load_reg_var(s, addr, rn);
2359bf80
MR
9950 if (!(op2 & 1)) {
9951 if (insn & (1 << 20)) {
9952 tmp = tcg_temp_new_i32();
9953 switch (op) {
9954 case 0: /* ldab */
9bb6558a
PM
9955 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
9956 rs | ISSIsAcqRel);
2359bf80
MR
9957 break;
9958 case 1: /* ldah */
9bb6558a
PM
9959 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9960 rs | ISSIsAcqRel);
2359bf80
MR
9961 break;
9962 case 2: /* lda */
9bb6558a
PM
9963 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
9964 rs | ISSIsAcqRel);
2359bf80
MR
9965 break;
9966 default:
9967 abort();
9968 }
9969 store_reg(s, rs, tmp);
9970 } else {
9971 tmp = load_reg(s, rs);
9972 switch (op) {
9973 case 0: /* stlb */
9bb6558a
PM
9974 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
9975 rs | ISSIsAcqRel);
2359bf80
MR
9976 break;
9977 case 1: /* stlh */
9bb6558a
PM
9978 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
9979 rs | ISSIsAcqRel);
2359bf80
MR
9980 break;
9981 case 2: /* stl */
9bb6558a
PM
9982 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
9983 rs | ISSIsAcqRel);
2359bf80
MR
9984 break;
9985 default:
9986 abort();
9987 }
9988 tcg_temp_free_i32(tmp);
9989 }
9990 } else if (insn & (1 << 20)) {
426f5abc 9991 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 9992 } else {
426f5abc 9993 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 9994 }
39d5492a 9995 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9996 }
9997 } else {
9998 /* Load/store multiple, RFE, SRS. */
9999 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 10000 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 10001 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10002 goto illegal_op;
00115976 10003 }
9ee6e8bb
PB
10004 if (insn & (1 << 20)) {
10005 /* rfe */
b0109805
PB
10006 addr = load_reg(s, rn);
10007 if ((insn & (1 << 24)) == 0)
10008 tcg_gen_addi_i32(addr, addr, -8);
10009 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 10010 tmp = tcg_temp_new_i32();
12dcc321 10011 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 10012 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 10013 tmp2 = tcg_temp_new_i32();
12dcc321 10014 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
10015 if (insn & (1 << 21)) {
10016 /* Base writeback. */
b0109805
PB
10017 if (insn & (1 << 24)) {
10018 tcg_gen_addi_i32(addr, addr, 4);
10019 } else {
10020 tcg_gen_addi_i32(addr, addr, -4);
10021 }
10022 store_reg(s, rn, addr);
10023 } else {
7d1b0095 10024 tcg_temp_free_i32(addr);
9ee6e8bb 10025 }
b0109805 10026 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
10027 } else {
10028 /* srs */
81465888
PM
10029 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
10030 insn & (1 << 21));
9ee6e8bb
PB
10031 }
10032 } else {
5856d44e 10033 int i, loaded_base = 0;
39d5492a 10034 TCGv_i32 loaded_var;
9ee6e8bb 10035 /* Load/store multiple. */
b0109805 10036 addr = load_reg(s, rn);
9ee6e8bb
PB
10037 offset = 0;
10038 for (i = 0; i < 16; i++) {
10039 if (insn & (1 << i))
10040 offset += 4;
10041 }
10042 if (insn & (1 << 24)) {
b0109805 10043 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
10044 }
10045
39d5492a 10046 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
10047 for (i = 0; i < 16; i++) {
10048 if ((insn & (1 << i)) == 0)
10049 continue;
10050 if (insn & (1 << 20)) {
10051 /* Load. */
e2592fad 10052 tmp = tcg_temp_new_i32();
12dcc321 10053 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10054 if (i == 15) {
3bb8a96f 10055 gen_bx_excret(s, tmp);
5856d44e
YO
10056 } else if (i == rn) {
10057 loaded_var = tmp;
10058 loaded_base = 1;
9ee6e8bb 10059 } else {
b0109805 10060 store_reg(s, i, tmp);
9ee6e8bb
PB
10061 }
10062 } else {
10063 /* Store. */
b0109805 10064 tmp = load_reg(s, i);
12dcc321 10065 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10066 tcg_temp_free_i32(tmp);
9ee6e8bb 10067 }
b0109805 10068 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 10069 }
5856d44e
YO
10070 if (loaded_base) {
10071 store_reg(s, rn, loaded_var);
10072 }
9ee6e8bb
PB
10073 if (insn & (1 << 21)) {
10074 /* Base register writeback. */
10075 if (insn & (1 << 24)) {
b0109805 10076 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
10077 }
10078 /* Fault if writeback register is in register list. */
10079 if (insn & (1 << rn))
10080 goto illegal_op;
b0109805
PB
10081 store_reg(s, rn, addr);
10082 } else {
7d1b0095 10083 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10084 }
10085 }
10086 }
10087 break;
2af9ab77
JB
10088 case 5:
10089
9ee6e8bb 10090 op = (insn >> 21) & 0xf;
2af9ab77 10091 if (op == 6) {
62b44f05
AR
10092 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10093 goto illegal_op;
10094 }
2af9ab77
JB
10095 /* Halfword pack. */
10096 tmp = load_reg(s, rn);
10097 tmp2 = load_reg(s, rm);
10098 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
10099 if (insn & (1 << 5)) {
10100 /* pkhtb */
10101 if (shift == 0)
10102 shift = 31;
10103 tcg_gen_sari_i32(tmp2, tmp2, shift);
10104 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
10105 tcg_gen_ext16u_i32(tmp2, tmp2);
10106 } else {
10107 /* pkhbt */
10108 if (shift)
10109 tcg_gen_shli_i32(tmp2, tmp2, shift);
10110 tcg_gen_ext16u_i32(tmp, tmp);
10111 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
10112 }
10113 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 10114 tcg_temp_free_i32(tmp2);
3174f8e9
FN
10115 store_reg(s, rd, tmp);
10116 } else {
2af9ab77
JB
10117 /* Data processing register constant shift. */
10118 if (rn == 15) {
7d1b0095 10119 tmp = tcg_temp_new_i32();
2af9ab77
JB
10120 tcg_gen_movi_i32(tmp, 0);
10121 } else {
10122 tmp = load_reg(s, rn);
10123 }
10124 tmp2 = load_reg(s, rm);
10125
10126 shiftop = (insn >> 4) & 3;
10127 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10128 conds = (insn & (1 << 20)) != 0;
10129 logic_cc = (conds && thumb2_logic_op(op));
10130 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
10131 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
10132 goto illegal_op;
7d1b0095 10133 tcg_temp_free_i32(tmp2);
2af9ab77
JB
10134 if (rd != 15) {
10135 store_reg(s, rd, tmp);
10136 } else {
7d1b0095 10137 tcg_temp_free_i32(tmp);
2af9ab77 10138 }
3174f8e9 10139 }
9ee6e8bb
PB
10140 break;
10141 case 13: /* Misc data processing. */
10142 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
10143 if (op < 4 && (insn & 0xf000) != 0xf000)
10144 goto illegal_op;
10145 switch (op) {
10146 case 0: /* Register controlled shift. */
8984bd2e
PB
10147 tmp = load_reg(s, rn);
10148 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10149 if ((insn & 0x70) != 0)
10150 goto illegal_op;
10151 op = (insn >> 21) & 3;
8984bd2e
PB
10152 logic_cc = (insn & (1 << 20)) != 0;
10153 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
10154 if (logic_cc)
10155 gen_logic_CC(tmp);
bedb8a6b 10156 store_reg(s, rd, tmp);
9ee6e8bb
PB
10157 break;
10158 case 1: /* Sign/zero extend. */
62b44f05
AR
10159 op = (insn >> 20) & 7;
10160 switch (op) {
10161 case 0: /* SXTAH, SXTH */
10162 case 1: /* UXTAH, UXTH */
10163 case 4: /* SXTAB, SXTB */
10164 case 5: /* UXTAB, UXTB */
10165 break;
10166 case 2: /* SXTAB16, SXTB16 */
10167 case 3: /* UXTAB16, UXTB16 */
10168 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10169 goto illegal_op;
10170 }
10171 break;
10172 default:
10173 goto illegal_op;
10174 }
10175 if (rn != 15) {
10176 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10177 goto illegal_op;
10178 }
10179 }
5e3f878a 10180 tmp = load_reg(s, rm);
9ee6e8bb 10181 shift = (insn >> 4) & 3;
1301f322 10182 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
10183 rotate, a shift is sufficient. */
10184 if (shift != 0)
f669df27 10185 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
10186 op = (insn >> 20) & 7;
10187 switch (op) {
5e3f878a
PB
10188 case 0: gen_sxth(tmp); break;
10189 case 1: gen_uxth(tmp); break;
10190 case 2: gen_sxtb16(tmp); break;
10191 case 3: gen_uxtb16(tmp); break;
10192 case 4: gen_sxtb(tmp); break;
10193 case 5: gen_uxtb(tmp); break;
62b44f05
AR
10194 default:
10195 g_assert_not_reached();
9ee6e8bb
PB
10196 }
10197 if (rn != 15) {
5e3f878a 10198 tmp2 = load_reg(s, rn);
9ee6e8bb 10199 if ((op >> 1) == 1) {
5e3f878a 10200 gen_add16(tmp, tmp2);
9ee6e8bb 10201 } else {
5e3f878a 10202 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10203 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10204 }
10205 }
5e3f878a 10206 store_reg(s, rd, tmp);
9ee6e8bb
PB
10207 break;
10208 case 2: /* SIMD add/subtract. */
62b44f05
AR
10209 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10210 goto illegal_op;
10211 }
9ee6e8bb
PB
10212 op = (insn >> 20) & 7;
10213 shift = (insn >> 4) & 7;
10214 if ((op & 3) == 3 || (shift & 3) == 3)
10215 goto illegal_op;
6ddbc6e4
PB
10216 tmp = load_reg(s, rn);
10217 tmp2 = load_reg(s, rm);
10218 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 10219 tcg_temp_free_i32(tmp2);
6ddbc6e4 10220 store_reg(s, rd, tmp);
9ee6e8bb
PB
10221 break;
10222 case 3: /* Other data processing. */
10223 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
10224 if (op < 4) {
10225 /* Saturating add/subtract. */
62b44f05
AR
10226 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10227 goto illegal_op;
10228 }
d9ba4830
PB
10229 tmp = load_reg(s, rn);
10230 tmp2 = load_reg(s, rm);
9ee6e8bb 10231 if (op & 1)
9ef39277 10232 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 10233 if (op & 2)
9ef39277 10234 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 10235 else
9ef39277 10236 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 10237 tcg_temp_free_i32(tmp2);
9ee6e8bb 10238 } else {
62b44f05
AR
10239 switch (op) {
10240 case 0x0a: /* rbit */
10241 case 0x08: /* rev */
10242 case 0x09: /* rev16 */
10243 case 0x0b: /* revsh */
10244 case 0x18: /* clz */
10245 break;
10246 case 0x10: /* sel */
10247 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10248 goto illegal_op;
10249 }
10250 break;
10251 case 0x20: /* crc32/crc32c */
10252 case 0x21:
10253 case 0x22:
10254 case 0x28:
10255 case 0x29:
10256 case 0x2a:
10257 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
10258 goto illegal_op;
10259 }
10260 break;
10261 default:
10262 goto illegal_op;
10263 }
d9ba4830 10264 tmp = load_reg(s, rn);
9ee6e8bb
PB
10265 switch (op) {
10266 case 0x0a: /* rbit */
d9ba4830 10267 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
10268 break;
10269 case 0x08: /* rev */
66896cb8 10270 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
10271 break;
10272 case 0x09: /* rev16 */
d9ba4830 10273 gen_rev16(tmp);
9ee6e8bb
PB
10274 break;
10275 case 0x0b: /* revsh */
d9ba4830 10276 gen_revsh(tmp);
9ee6e8bb
PB
10277 break;
10278 case 0x10: /* sel */
d9ba4830 10279 tmp2 = load_reg(s, rm);
7d1b0095 10280 tmp3 = tcg_temp_new_i32();
0ecb72a5 10281 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 10282 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10283 tcg_temp_free_i32(tmp3);
10284 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10285 break;
10286 case 0x18: /* clz */
7539a012 10287 tcg_gen_clzi_i32(tmp, tmp, 32);
9ee6e8bb 10288 break;
eb0ecd5a
WN
10289 case 0x20:
10290 case 0x21:
10291 case 0x22:
10292 case 0x28:
10293 case 0x29:
10294 case 0x2a:
10295 {
10296 /* crc32/crc32c */
10297 uint32_t sz = op & 0x3;
10298 uint32_t c = op & 0x8;
10299
eb0ecd5a 10300 tmp2 = load_reg(s, rm);
aa633469
PM
10301 if (sz == 0) {
10302 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10303 } else if (sz == 1) {
10304 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10305 }
eb0ecd5a
WN
10306 tmp3 = tcg_const_i32(1 << sz);
10307 if (c) {
10308 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10309 } else {
10310 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10311 }
10312 tcg_temp_free_i32(tmp2);
10313 tcg_temp_free_i32(tmp3);
10314 break;
10315 }
9ee6e8bb 10316 default:
62b44f05 10317 g_assert_not_reached();
9ee6e8bb
PB
10318 }
10319 }
d9ba4830 10320 store_reg(s, rd, tmp);
9ee6e8bb
PB
10321 break;
10322 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
10323 switch ((insn >> 20) & 7) {
10324 case 0: /* 32 x 32 -> 32 */
10325 case 7: /* Unsigned sum of absolute differences. */
10326 break;
10327 case 1: /* 16 x 16 -> 32 */
10328 case 2: /* Dual multiply add. */
10329 case 3: /* 32 * 16 -> 32msb */
10330 case 4: /* Dual multiply subtract. */
10331 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10332 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10333 goto illegal_op;
10334 }
10335 break;
10336 }
9ee6e8bb 10337 op = (insn >> 4) & 0xf;
d9ba4830
PB
10338 tmp = load_reg(s, rn);
10339 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10340 switch ((insn >> 20) & 7) {
10341 case 0: /* 32 x 32 -> 32 */
d9ba4830 10342 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 10343 tcg_temp_free_i32(tmp2);
9ee6e8bb 10344 if (rs != 15) {
d9ba4830 10345 tmp2 = load_reg(s, rs);
9ee6e8bb 10346 if (op)
d9ba4830 10347 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 10348 else
d9ba4830 10349 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10350 tcg_temp_free_i32(tmp2);
9ee6e8bb 10351 }
9ee6e8bb
PB
10352 break;
10353 case 1: /* 16 x 16 -> 32 */
d9ba4830 10354 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10355 tcg_temp_free_i32(tmp2);
9ee6e8bb 10356 if (rs != 15) {
d9ba4830 10357 tmp2 = load_reg(s, rs);
9ef39277 10358 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10359 tcg_temp_free_i32(tmp2);
9ee6e8bb 10360 }
9ee6e8bb
PB
10361 break;
10362 case 2: /* Dual multiply add. */
10363 case 4: /* Dual multiply subtract. */
10364 if (op)
d9ba4830
PB
10365 gen_swap_half(tmp2);
10366 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10367 if (insn & (1 << 22)) {
e1d177b9 10368 /* This subtraction cannot overflow. */
d9ba4830 10369 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10370 } else {
e1d177b9
PM
10371 /* This addition cannot overflow 32 bits;
10372 * however it may overflow considered as a signed
10373 * operation, in which case we must set the Q flag.
10374 */
9ef39277 10375 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10376 }
7d1b0095 10377 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10378 if (rs != 15)
10379 {
d9ba4830 10380 tmp2 = load_reg(s, rs);
9ef39277 10381 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10382 tcg_temp_free_i32(tmp2);
9ee6e8bb 10383 }
9ee6e8bb
PB
10384 break;
10385 case 3: /* 32 * 16 -> 32msb */
10386 if (op)
d9ba4830 10387 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 10388 else
d9ba4830 10389 gen_sxth(tmp2);
a7812ae4
PB
10390 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10391 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 10392 tmp = tcg_temp_new_i32();
ecc7b3aa 10393 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 10394 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10395 if (rs != 15)
10396 {
d9ba4830 10397 tmp2 = load_reg(s, rs);
9ef39277 10398 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10399 tcg_temp_free_i32(tmp2);
9ee6e8bb 10400 }
9ee6e8bb 10401 break;
838fa72d
AJ
10402 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10403 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10404 if (rs != 15) {
838fa72d
AJ
10405 tmp = load_reg(s, rs);
10406 if (insn & (1 << 20)) {
10407 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 10408 } else {
838fa72d 10409 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 10410 }
2c0262af 10411 }
838fa72d
AJ
10412 if (insn & (1 << 4)) {
10413 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10414 }
10415 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 10416 tmp = tcg_temp_new_i32();
ecc7b3aa 10417 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 10418 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10419 break;
10420 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 10421 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10422 tcg_temp_free_i32(tmp2);
9ee6e8bb 10423 if (rs != 15) {
d9ba4830
PB
10424 tmp2 = load_reg(s, rs);
10425 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10426 tcg_temp_free_i32(tmp2);
5fd46862 10427 }
9ee6e8bb 10428 break;
2c0262af 10429 }
d9ba4830 10430 store_reg(s, rd, tmp);
2c0262af 10431 break;
9ee6e8bb
PB
10432 case 6: case 7: /* 64-bit multiply, Divide. */
10433 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
10434 tmp = load_reg(s, rn);
10435 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10436 if ((op & 0x50) == 0x10) {
10437 /* sdiv, udiv */
d614a513 10438 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 10439 goto illegal_op;
47789990 10440 }
9ee6e8bb 10441 if (op & 0x20)
5e3f878a 10442 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 10443 else
5e3f878a 10444 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 10445 tcg_temp_free_i32(tmp2);
5e3f878a 10446 store_reg(s, rd, tmp);
9ee6e8bb
PB
10447 } else if ((op & 0xe) == 0xc) {
10448 /* Dual multiply accumulate long. */
62b44f05
AR
10449 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10450 tcg_temp_free_i32(tmp);
10451 tcg_temp_free_i32(tmp2);
10452 goto illegal_op;
10453 }
9ee6e8bb 10454 if (op & 1)
5e3f878a
PB
10455 gen_swap_half(tmp2);
10456 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10457 if (op & 0x10) {
5e3f878a 10458 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 10459 } else {
5e3f878a 10460 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 10461 }
7d1b0095 10462 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10463 /* BUGFIX */
10464 tmp64 = tcg_temp_new_i64();
10465 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10466 tcg_temp_free_i32(tmp);
a7812ae4
PB
10467 gen_addq(s, tmp64, rs, rd);
10468 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10469 tcg_temp_free_i64(tmp64);
2c0262af 10470 } else {
9ee6e8bb
PB
10471 if (op & 0x20) {
10472 /* Unsigned 64-bit multiply */
a7812ae4 10473 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 10474 } else {
9ee6e8bb
PB
10475 if (op & 8) {
10476 /* smlalxy */
62b44f05
AR
10477 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10478 tcg_temp_free_i32(tmp2);
10479 tcg_temp_free_i32(tmp);
10480 goto illegal_op;
10481 }
5e3f878a 10482 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10483 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10484 tmp64 = tcg_temp_new_i64();
10485 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10486 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10487 } else {
10488 /* Signed 64-bit multiply */
a7812ae4 10489 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10490 }
b5ff1b31 10491 }
9ee6e8bb
PB
10492 if (op & 4) {
10493 /* umaal */
62b44f05
AR
10494 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10495 tcg_temp_free_i64(tmp64);
10496 goto illegal_op;
10497 }
a7812ae4
PB
10498 gen_addq_lo(s, tmp64, rs);
10499 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
10500 } else if (op & 0x40) {
10501 /* 64-bit accumulate. */
a7812ae4 10502 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 10503 }
a7812ae4 10504 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10505 tcg_temp_free_i64(tmp64);
5fd46862 10506 }
2c0262af 10507 break;
9ee6e8bb
PB
10508 }
10509 break;
10510 case 6: case 7: case 14: case 15:
10511 /* Coprocessor. */
7517748e
PM
10512 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10513 /* We don't currently implement M profile FP support,
10514 * so this entire space should give a NOCP fault.
10515 */
10516 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
10517 default_exception_el(s));
10518 break;
10519 }
9ee6e8bb
PB
10520 if (((insn >> 24) & 3) == 3) {
10521 /* Translate into the equivalent ARM encoding. */
f06053e3 10522 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 10523 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 10524 goto illegal_op;
7dcc1f89 10525 }
6a57f3eb 10526 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 10527 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10528 goto illegal_op;
10529 }
9ee6e8bb
PB
10530 } else {
10531 if (insn & (1 << 28))
10532 goto illegal_op;
7dcc1f89 10533 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 10534 goto illegal_op;
7dcc1f89 10535 }
9ee6e8bb
PB
10536 }
10537 break;
10538 case 8: case 9: case 10: case 11:
10539 if (insn & (1 << 15)) {
10540 /* Branches, misc control. */
10541 if (insn & 0x5000) {
10542 /* Unconditional branch. */
10543 /* signextend(hw1[10:0]) -> offset[:12]. */
10544 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10545 /* hw1[10:0] -> offset[11:1]. */
10546 offset |= (insn & 0x7ff) << 1;
10547 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10548 offset[24:22] already have the same value because of the
10549 sign extension above. */
10550 offset ^= ((~insn) & (1 << 13)) << 10;
10551 offset ^= ((~insn) & (1 << 11)) << 11;
10552
9ee6e8bb
PB
10553 if (insn & (1 << 14)) {
10554 /* Branch and link. */
3174f8e9 10555 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 10556 }
3b46e624 10557
b0109805 10558 offset += s->pc;
9ee6e8bb
PB
10559 if (insn & (1 << 12)) {
10560 /* b/bl */
b0109805 10561 gen_jmp(s, offset);
9ee6e8bb
PB
10562 } else {
10563 /* blx */
b0109805 10564 offset &= ~(uint32_t)2;
be5e7a76 10565 /* thumb2 bx, no need to check */
b0109805 10566 gen_bx_im(s, offset);
2c0262af 10567 }
9ee6e8bb
PB
10568 } else if (((insn >> 23) & 7) == 7) {
10569 /* Misc control */
10570 if (insn & (1 << 13))
10571 goto illegal_op;
10572
10573 if (insn & (1 << 26)) {
001b3cab
PM
10574 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10575 goto illegal_op;
10576 }
37e6456e
PM
10577 if (!(insn & (1 << 20))) {
10578 /* Hypervisor call (v7) */
10579 int imm16 = extract32(insn, 16, 4) << 12
10580 | extract32(insn, 0, 12);
10581 ARCH(7);
10582 if (IS_USER(s)) {
10583 goto illegal_op;
10584 }
10585 gen_hvc(s, imm16);
10586 } else {
10587 /* Secure monitor call (v6+) */
10588 ARCH(6K);
10589 if (IS_USER(s)) {
10590 goto illegal_op;
10591 }
10592 gen_smc(s);
10593 }
2c0262af 10594 } else {
9ee6e8bb
PB
10595 op = (insn >> 20) & 7;
10596 switch (op) {
10597 case 0: /* msr cpsr. */
b53d8923 10598 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e 10599 tmp = load_reg(s, rn);
b28b3377
PM
10600 /* the constant is the mask and SYSm fields */
10601 addr = tcg_const_i32(insn & 0xfff);
8984bd2e 10602 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 10603 tcg_temp_free_i32(addr);
7d1b0095 10604 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10605 gen_lookup_tb(s);
10606 break;
10607 }
10608 /* fall through */
10609 case 1: /* msr spsr. */
b53d8923 10610 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10611 goto illegal_op;
b53d8923 10612 }
8bfd0550
PM
10613
10614 if (extract32(insn, 5, 1)) {
10615 /* MSR (banked) */
10616 int sysm = extract32(insn, 8, 4) |
10617 (extract32(insn, 4, 1) << 4);
10618 int r = op & 1;
10619
10620 gen_msr_banked(s, r, sysm, rm);
10621 break;
10622 }
10623
10624 /* MSR (for PSRs) */
2fbac54b
FN
10625 tmp = load_reg(s, rn);
10626 if (gen_set_psr(s,
7dcc1f89 10627 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 10628 op == 1, tmp))
9ee6e8bb
PB
10629 goto illegal_op;
10630 break;
10631 case 2: /* cps, nop-hint. */
10632 if (((insn >> 8) & 7) == 0) {
10633 gen_nop_hint(s, insn & 0xff);
10634 }
10635 /* Implemented as NOP in user mode. */
10636 if (IS_USER(s))
10637 break;
10638 offset = 0;
10639 imm = 0;
10640 if (insn & (1 << 10)) {
10641 if (insn & (1 << 7))
10642 offset |= CPSR_A;
10643 if (insn & (1 << 6))
10644 offset |= CPSR_I;
10645 if (insn & (1 << 5))
10646 offset |= CPSR_F;
10647 if (insn & (1 << 9))
10648 imm = CPSR_A | CPSR_I | CPSR_F;
10649 }
10650 if (insn & (1 << 8)) {
10651 offset |= 0x1f;
10652 imm |= (insn & 0x1f);
10653 }
10654 if (offset) {
2fbac54b 10655 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
10656 }
10657 break;
10658 case 3: /* Special control operations. */
426f5abc 10659 ARCH(7);
9ee6e8bb
PB
10660 op = (insn >> 4) & 0xf;
10661 switch (op) {
10662 case 2: /* clrex */
426f5abc 10663 gen_clrex(s);
9ee6e8bb
PB
10664 break;
10665 case 4: /* dsb */
10666 case 5: /* dmb */
61e4c432 10667 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 10668 break;
6df99dec
SS
10669 case 6: /* isb */
10670 /* We need to break the TB after this insn
10671 * to execute self-modifying code correctly
10672 * and also to take any pending interrupts
10673 * immediately.
10674 */
0b609cc1 10675 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 10676 break;
9ee6e8bb
PB
10677 default:
10678 goto illegal_op;
10679 }
10680 break;
10681 case 4: /* bxj */
9d7c59c8
PM
10682 /* Trivial implementation equivalent to bx.
10683 * This instruction doesn't exist at all for M-profile.
10684 */
10685 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10686 goto illegal_op;
10687 }
d9ba4830
PB
10688 tmp = load_reg(s, rn);
10689 gen_bx(s, tmp);
9ee6e8bb
PB
10690 break;
10691 case 5: /* Exception return. */
b8b45b68
RV
10692 if (IS_USER(s)) {
10693 goto illegal_op;
10694 }
10695 if (rn != 14 || rd != 15) {
10696 goto illegal_op;
10697 }
10698 tmp = load_reg(s, rn);
10699 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10700 gen_exception_return(s, tmp);
10701 break;
8bfd0550 10702 case 6: /* MRS */
43ac6574
PM
10703 if (extract32(insn, 5, 1) &&
10704 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10705 /* MRS (banked) */
10706 int sysm = extract32(insn, 16, 4) |
10707 (extract32(insn, 4, 1) << 4);
10708
10709 gen_mrs_banked(s, 0, sysm, rd);
10710 break;
10711 }
10712
3d54026f
PM
10713 if (extract32(insn, 16, 4) != 0xf) {
10714 goto illegal_op;
10715 }
10716 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
10717 extract32(insn, 0, 8) != 0) {
10718 goto illegal_op;
10719 }
10720
8bfd0550 10721 /* mrs cpsr */
7d1b0095 10722 tmp = tcg_temp_new_i32();
b53d8923 10723 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10724 addr = tcg_const_i32(insn & 0xff);
10725 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 10726 tcg_temp_free_i32(addr);
9ee6e8bb 10727 } else {
9ef39277 10728 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 10729 }
8984bd2e 10730 store_reg(s, rd, tmp);
9ee6e8bb 10731 break;
8bfd0550 10732 case 7: /* MRS */
43ac6574
PM
10733 if (extract32(insn, 5, 1) &&
10734 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10735 /* MRS (banked) */
10736 int sysm = extract32(insn, 16, 4) |
10737 (extract32(insn, 4, 1) << 4);
10738
10739 gen_mrs_banked(s, 1, sysm, rd);
10740 break;
10741 }
10742
10743 /* mrs spsr. */
9ee6e8bb 10744 /* Not accessible in user mode. */
b53d8923 10745 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10746 goto illegal_op;
b53d8923 10747 }
3d54026f
PM
10748
10749 if (extract32(insn, 16, 4) != 0xf ||
10750 extract32(insn, 0, 8) != 0) {
10751 goto illegal_op;
10752 }
10753
d9ba4830
PB
10754 tmp = load_cpu_field(spsr);
10755 store_reg(s, rd, tmp);
9ee6e8bb 10756 break;
2c0262af
FB
10757 }
10758 }
9ee6e8bb
PB
10759 } else {
10760 /* Conditional branch. */
10761 op = (insn >> 22) & 0xf;
10762 /* Generate a conditional jump to next instruction. */
10763 s->condlabel = gen_new_label();
39fb730a 10764 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
10765 s->condjmp = 1;
10766
10767 /* offset[11:1] = insn[10:0] */
10768 offset = (insn & 0x7ff) << 1;
10769 /* offset[17:12] = insn[21:16]. */
10770 offset |= (insn & 0x003f0000) >> 4;
10771 /* offset[31:20] = insn[26]. */
10772 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10773 /* offset[18] = insn[13]. */
10774 offset |= (insn & (1 << 13)) << 5;
10775 /* offset[19] = insn[11]. */
10776 offset |= (insn & (1 << 11)) << 8;
10777
10778 /* jump to the offset */
b0109805 10779 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
10780 }
10781 } else {
10782 /* Data processing immediate. */
10783 if (insn & (1 << 25)) {
10784 if (insn & (1 << 24)) {
10785 if (insn & (1 << 20))
10786 goto illegal_op;
10787 /* Bitfield/Saturate. */
10788 op = (insn >> 21) & 7;
10789 imm = insn & 0x1f;
10790 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 10791 if (rn == 15) {
7d1b0095 10792 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
10793 tcg_gen_movi_i32(tmp, 0);
10794 } else {
10795 tmp = load_reg(s, rn);
10796 }
9ee6e8bb
PB
10797 switch (op) {
10798 case 2: /* Signed bitfield extract. */
10799 imm++;
10800 if (shift + imm > 32)
10801 goto illegal_op;
59a71b4c
RH
10802 if (imm < 32) {
10803 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
10804 }
9ee6e8bb
PB
10805 break;
10806 case 6: /* Unsigned bitfield extract. */
10807 imm++;
10808 if (shift + imm > 32)
10809 goto illegal_op;
59a71b4c
RH
10810 if (imm < 32) {
10811 tcg_gen_extract_i32(tmp, tmp, shift, imm);
10812 }
9ee6e8bb
PB
10813 break;
10814 case 3: /* Bitfield insert/clear. */
10815 if (imm < shift)
10816 goto illegal_op;
10817 imm = imm + 1 - shift;
10818 if (imm != 32) {
6ddbc6e4 10819 tmp2 = load_reg(s, rd);
d593c48e 10820 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 10821 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10822 }
10823 break;
10824 case 7:
10825 goto illegal_op;
10826 default: /* Saturate. */
9ee6e8bb
PB
10827 if (shift) {
10828 if (op & 1)
6ddbc6e4 10829 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 10830 else
6ddbc6e4 10831 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 10832 }
6ddbc6e4 10833 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
10834 if (op & 4) {
10835 /* Unsigned. */
62b44f05
AR
10836 if ((op & 1) && shift == 0) {
10837 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10838 tcg_temp_free_i32(tmp);
10839 tcg_temp_free_i32(tmp2);
10840 goto illegal_op;
10841 }
9ef39277 10842 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10843 } else {
9ef39277 10844 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 10845 }
2c0262af 10846 } else {
9ee6e8bb 10847 /* Signed. */
62b44f05
AR
10848 if ((op & 1) && shift == 0) {
10849 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10850 tcg_temp_free_i32(tmp);
10851 tcg_temp_free_i32(tmp2);
10852 goto illegal_op;
10853 }
9ef39277 10854 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10855 } else {
9ef39277 10856 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 10857 }
2c0262af 10858 }
b75263d6 10859 tcg_temp_free_i32(tmp2);
9ee6e8bb 10860 break;
2c0262af 10861 }
6ddbc6e4 10862 store_reg(s, rd, tmp);
9ee6e8bb
PB
10863 } else {
10864 imm = ((insn & 0x04000000) >> 15)
10865 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10866 if (insn & (1 << 22)) {
10867 /* 16-bit immediate. */
10868 imm |= (insn >> 4) & 0xf000;
10869 if (insn & (1 << 23)) {
10870 /* movt */
5e3f878a 10871 tmp = load_reg(s, rd);
86831435 10872 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 10873 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 10874 } else {
9ee6e8bb 10875 /* movw */
7d1b0095 10876 tmp = tcg_temp_new_i32();
5e3f878a 10877 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
10878 }
10879 } else {
9ee6e8bb
PB
10880 /* Add/sub 12-bit immediate. */
10881 if (rn == 15) {
b0109805 10882 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 10883 if (insn & (1 << 23))
b0109805 10884 offset -= imm;
9ee6e8bb 10885 else
b0109805 10886 offset += imm;
7d1b0095 10887 tmp = tcg_temp_new_i32();
5e3f878a 10888 tcg_gen_movi_i32(tmp, offset);
2c0262af 10889 } else {
5e3f878a 10890 tmp = load_reg(s, rn);
9ee6e8bb 10891 if (insn & (1 << 23))
5e3f878a 10892 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 10893 else
5e3f878a 10894 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 10895 }
9ee6e8bb 10896 }
5e3f878a 10897 store_reg(s, rd, tmp);
191abaa2 10898 }
9ee6e8bb
PB
10899 } else {
10900 int shifter_out = 0;
10901 /* modified 12-bit immediate. */
10902 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10903 imm = (insn & 0xff);
10904 switch (shift) {
10905 case 0: /* XY */
10906 /* Nothing to do. */
10907 break;
10908 case 1: /* 00XY00XY */
10909 imm |= imm << 16;
10910 break;
10911 case 2: /* XY00XY00 */
10912 imm |= imm << 16;
10913 imm <<= 8;
10914 break;
10915 case 3: /* XYXYXYXY */
10916 imm |= imm << 16;
10917 imm |= imm << 8;
10918 break;
10919 default: /* Rotated constant. */
10920 shift = (shift << 1) | (imm >> 7);
10921 imm |= 0x80;
10922 imm = imm << (32 - shift);
10923 shifter_out = 1;
10924 break;
b5ff1b31 10925 }
7d1b0095 10926 tmp2 = tcg_temp_new_i32();
3174f8e9 10927 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 10928 rn = (insn >> 16) & 0xf;
3174f8e9 10929 if (rn == 15) {
7d1b0095 10930 tmp = tcg_temp_new_i32();
3174f8e9
FN
10931 tcg_gen_movi_i32(tmp, 0);
10932 } else {
10933 tmp = load_reg(s, rn);
10934 }
9ee6e8bb
PB
10935 op = (insn >> 21) & 0xf;
10936 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 10937 shifter_out, tmp, tmp2))
9ee6e8bb 10938 goto illegal_op;
7d1b0095 10939 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10940 rd = (insn >> 8) & 0xf;
10941 if (rd != 15) {
3174f8e9
FN
10942 store_reg(s, rd, tmp);
10943 } else {
7d1b0095 10944 tcg_temp_free_i32(tmp);
2c0262af 10945 }
2c0262af 10946 }
9ee6e8bb
PB
10947 }
10948 break;
10949 case 12: /* Load/store single data item. */
10950 {
10951 int postinc = 0;
10952 int writeback = 0;
a99caa48 10953 int memidx;
9bb6558a
PM
10954 ISSInfo issinfo;
10955
9ee6e8bb 10956 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 10957 if (disas_neon_ls_insn(s, insn)) {
c1713132 10958 goto illegal_op;
7dcc1f89 10959 }
9ee6e8bb
PB
10960 break;
10961 }
a2fdc890
PM
10962 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10963 if (rs == 15) {
10964 if (!(insn & (1 << 20))) {
10965 goto illegal_op;
10966 }
10967 if (op != 2) {
10968 /* Byte or halfword load space with dest == r15 : memory hints.
10969 * Catch them early so we don't emit pointless addressing code.
10970 * This space is a mix of:
10971 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10972 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10973 * cores)
10974 * unallocated hints, which must be treated as NOPs
10975 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10976 * which is easiest for the decoding logic
10977 * Some space which must UNDEF
10978 */
10979 int op1 = (insn >> 23) & 3;
10980 int op2 = (insn >> 6) & 0x3f;
10981 if (op & 2) {
10982 goto illegal_op;
10983 }
10984 if (rn == 15) {
02afbf64
PM
10985 /* UNPREDICTABLE, unallocated hint or
10986 * PLD/PLDW/PLI (literal)
10987 */
a2fdc890
PM
10988 return 0;
10989 }
10990 if (op1 & 1) {
02afbf64 10991 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10992 }
10993 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 10994 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10995 }
10996 /* UNDEF space, or an UNPREDICTABLE */
10997 return 1;
10998 }
10999 }
a99caa48 11000 memidx = get_mem_index(s);
9ee6e8bb 11001 if (rn == 15) {
7d1b0095 11002 addr = tcg_temp_new_i32();
9ee6e8bb
PB
11003 /* PC relative. */
11004 /* s->pc has already been incremented by 4. */
11005 imm = s->pc & 0xfffffffc;
11006 if (insn & (1 << 23))
11007 imm += insn & 0xfff;
11008 else
11009 imm -= insn & 0xfff;
b0109805 11010 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 11011 } else {
b0109805 11012 addr = load_reg(s, rn);
9ee6e8bb
PB
11013 if (insn & (1 << 23)) {
11014 /* Positive offset. */
11015 imm = insn & 0xfff;
b0109805 11016 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 11017 } else {
9ee6e8bb 11018 imm = insn & 0xff;
2a0308c5
PM
11019 switch ((insn >> 8) & 0xf) {
11020 case 0x0: /* Shifted Register. */
9ee6e8bb 11021 shift = (insn >> 4) & 0xf;
2a0308c5
PM
11022 if (shift > 3) {
11023 tcg_temp_free_i32(addr);
18c9b560 11024 goto illegal_op;
2a0308c5 11025 }
b26eefb6 11026 tmp = load_reg(s, rm);
9ee6e8bb 11027 if (shift)
b26eefb6 11028 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 11029 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11030 tcg_temp_free_i32(tmp);
9ee6e8bb 11031 break;
2a0308c5 11032 case 0xc: /* Negative offset. */
b0109805 11033 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 11034 break;
2a0308c5 11035 case 0xe: /* User privilege. */
b0109805 11036 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 11037 memidx = get_a32_user_mem_index(s);
9ee6e8bb 11038 break;
2a0308c5 11039 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
11040 imm = -imm;
11041 /* Fall through. */
2a0308c5 11042 case 0xb: /* Post-increment. */
9ee6e8bb
PB
11043 postinc = 1;
11044 writeback = 1;
11045 break;
2a0308c5 11046 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
11047 imm = -imm;
11048 /* Fall through. */
2a0308c5 11049 case 0xf: /* Pre-increment. */
b0109805 11050 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
11051 writeback = 1;
11052 break;
11053 default:
2a0308c5 11054 tcg_temp_free_i32(addr);
b7bcbe95 11055 goto illegal_op;
9ee6e8bb
PB
11056 }
11057 }
11058 }
9bb6558a
PM
11059
11060 issinfo = writeback ? ISSInvalid : rs;
11061
9ee6e8bb
PB
11062 if (insn & (1 << 20)) {
11063 /* Load. */
5a839c0d 11064 tmp = tcg_temp_new_i32();
a2fdc890 11065 switch (op) {
5a839c0d 11066 case 0:
9bb6558a 11067 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11068 break;
11069 case 4:
9bb6558a 11070 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11071 break;
11072 case 1:
9bb6558a 11073 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11074 break;
11075 case 5:
9bb6558a 11076 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11077 break;
11078 case 2:
9bb6558a 11079 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 11080 break;
2a0308c5 11081 default:
5a839c0d 11082 tcg_temp_free_i32(tmp);
2a0308c5
PM
11083 tcg_temp_free_i32(addr);
11084 goto illegal_op;
a2fdc890
PM
11085 }
11086 if (rs == 15) {
3bb8a96f 11087 gen_bx_excret(s, tmp);
9ee6e8bb 11088 } else {
a2fdc890 11089 store_reg(s, rs, tmp);
9ee6e8bb
PB
11090 }
11091 } else {
11092 /* Store. */
b0109805 11093 tmp = load_reg(s, rs);
9ee6e8bb 11094 switch (op) {
5a839c0d 11095 case 0:
9bb6558a 11096 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11097 break;
11098 case 1:
9bb6558a 11099 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11100 break;
11101 case 2:
9bb6558a 11102 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 11103 break;
2a0308c5 11104 default:
5a839c0d 11105 tcg_temp_free_i32(tmp);
2a0308c5
PM
11106 tcg_temp_free_i32(addr);
11107 goto illegal_op;
b7bcbe95 11108 }
5a839c0d 11109 tcg_temp_free_i32(tmp);
2c0262af 11110 }
9ee6e8bb 11111 if (postinc)
b0109805
PB
11112 tcg_gen_addi_i32(addr, addr, imm);
11113 if (writeback) {
11114 store_reg(s, rn, addr);
11115 } else {
7d1b0095 11116 tcg_temp_free_i32(addr);
b0109805 11117 }
9ee6e8bb
PB
11118 }
11119 break;
11120 default:
11121 goto illegal_op;
2c0262af 11122 }
9ee6e8bb
PB
11123 return 0;
11124illegal_op:
11125 return 1;
2c0262af
FB
11126}
11127
296e5a0a 11128static void disas_thumb_insn(DisasContext *s, uint32_t insn)
99c475ab 11129{
296e5a0a 11130 uint32_t val, op, rm, rn, rd, shift, cond;
99c475ab
FB
11131 int32_t offset;
11132 int i;
39d5492a
PM
11133 TCGv_i32 tmp;
11134 TCGv_i32 tmp2;
11135 TCGv_i32 addr;
99c475ab 11136
99c475ab
FB
11137 switch (insn >> 12) {
11138 case 0: case 1:
396e467c 11139
99c475ab
FB
11140 rd = insn & 7;
11141 op = (insn >> 11) & 3;
11142 if (op == 3) {
11143 /* add/subtract */
11144 rn = (insn >> 3) & 7;
396e467c 11145 tmp = load_reg(s, rn);
99c475ab
FB
11146 if (insn & (1 << 10)) {
11147 /* immediate */
7d1b0095 11148 tmp2 = tcg_temp_new_i32();
396e467c 11149 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
11150 } else {
11151 /* reg */
11152 rm = (insn >> 6) & 7;
396e467c 11153 tmp2 = load_reg(s, rm);
99c475ab 11154 }
9ee6e8bb
PB
11155 if (insn & (1 << 9)) {
11156 if (s->condexec_mask)
396e467c 11157 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 11158 else
72485ec4 11159 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
11160 } else {
11161 if (s->condexec_mask)
396e467c 11162 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 11163 else
72485ec4 11164 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 11165 }
7d1b0095 11166 tcg_temp_free_i32(tmp2);
396e467c 11167 store_reg(s, rd, tmp);
99c475ab
FB
11168 } else {
11169 /* shift immediate */
11170 rm = (insn >> 3) & 7;
11171 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
11172 tmp = load_reg(s, rm);
11173 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
11174 if (!s->condexec_mask)
11175 gen_logic_CC(tmp);
11176 store_reg(s, rd, tmp);
99c475ab
FB
11177 }
11178 break;
11179 case 2: case 3:
11180 /* arithmetic large immediate */
11181 op = (insn >> 11) & 3;
11182 rd = (insn >> 8) & 0x7;
396e467c 11183 if (op == 0) { /* mov */
7d1b0095 11184 tmp = tcg_temp_new_i32();
396e467c 11185 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 11186 if (!s->condexec_mask)
396e467c
FN
11187 gen_logic_CC(tmp);
11188 store_reg(s, rd, tmp);
11189 } else {
11190 tmp = load_reg(s, rd);
7d1b0095 11191 tmp2 = tcg_temp_new_i32();
396e467c
FN
11192 tcg_gen_movi_i32(tmp2, insn & 0xff);
11193 switch (op) {
11194 case 1: /* cmp */
72485ec4 11195 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11196 tcg_temp_free_i32(tmp);
11197 tcg_temp_free_i32(tmp2);
396e467c
FN
11198 break;
11199 case 2: /* add */
11200 if (s->condexec_mask)
11201 tcg_gen_add_i32(tmp, tmp, tmp2);
11202 else
72485ec4 11203 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 11204 tcg_temp_free_i32(tmp2);
396e467c
FN
11205 store_reg(s, rd, tmp);
11206 break;
11207 case 3: /* sub */
11208 if (s->condexec_mask)
11209 tcg_gen_sub_i32(tmp, tmp, tmp2);
11210 else
72485ec4 11211 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 11212 tcg_temp_free_i32(tmp2);
396e467c
FN
11213 store_reg(s, rd, tmp);
11214 break;
11215 }
99c475ab 11216 }
99c475ab
FB
11217 break;
11218 case 4:
11219 if (insn & (1 << 11)) {
11220 rd = (insn >> 8) & 7;
5899f386
FB
11221 /* load pc-relative. Bit 1 of PC is ignored. */
11222 val = s->pc + 2 + ((insn & 0xff) * 4);
11223 val &= ~(uint32_t)2;
7d1b0095 11224 addr = tcg_temp_new_i32();
b0109805 11225 tcg_gen_movi_i32(addr, val);
c40c8556 11226 tmp = tcg_temp_new_i32();
9bb6558a
PM
11227 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11228 rd | ISSIs16Bit);
7d1b0095 11229 tcg_temp_free_i32(addr);
b0109805 11230 store_reg(s, rd, tmp);
99c475ab
FB
11231 break;
11232 }
11233 if (insn & (1 << 10)) {
ebfe27c5
PM
11234 /* 0b0100_01xx_xxxx_xxxx
11235 * - data processing extended, branch and exchange
11236 */
99c475ab
FB
11237 rd = (insn & 7) | ((insn >> 4) & 8);
11238 rm = (insn >> 3) & 0xf;
11239 op = (insn >> 8) & 3;
11240 switch (op) {
11241 case 0: /* add */
396e467c
FN
11242 tmp = load_reg(s, rd);
11243 tmp2 = load_reg(s, rm);
11244 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11245 tcg_temp_free_i32(tmp2);
396e467c 11246 store_reg(s, rd, tmp);
99c475ab
FB
11247 break;
11248 case 1: /* cmp */
396e467c
FN
11249 tmp = load_reg(s, rd);
11250 tmp2 = load_reg(s, rm);
72485ec4 11251 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11252 tcg_temp_free_i32(tmp2);
11253 tcg_temp_free_i32(tmp);
99c475ab
FB
11254 break;
11255 case 2: /* mov/cpy */
396e467c
FN
11256 tmp = load_reg(s, rm);
11257 store_reg(s, rd, tmp);
99c475ab 11258 break;
ebfe27c5
PM
11259 case 3:
11260 {
11261 /* 0b0100_0111_xxxx_xxxx
11262 * - branch [and link] exchange thumb register
11263 */
11264 bool link = insn & (1 << 7);
11265
fb602cb7 11266 if (insn & 3) {
ebfe27c5
PM
11267 goto undef;
11268 }
11269 if (link) {
be5e7a76 11270 ARCH(5);
ebfe27c5 11271 }
fb602cb7
PM
11272 if ((insn & 4)) {
11273 /* BXNS/BLXNS: only exists for v8M with the
11274 * security extensions, and always UNDEF if NonSecure.
11275 * We don't implement these in the user-only mode
11276 * either (in theory you can use them from Secure User
11277 * mode but they are too tied in to system emulation.)
11278 */
11279 if (!s->v8m_secure || IS_USER_ONLY) {
11280 goto undef;
11281 }
11282 if (link) {
3e3fa230 11283 gen_blxns(s, rm);
fb602cb7
PM
11284 } else {
11285 gen_bxns(s, rm);
11286 }
11287 break;
11288 }
11289 /* BLX/BX */
ebfe27c5
PM
11290 tmp = load_reg(s, rm);
11291 if (link) {
99c475ab 11292 val = (uint32_t)s->pc | 1;
7d1b0095 11293 tmp2 = tcg_temp_new_i32();
b0109805
PB
11294 tcg_gen_movi_i32(tmp2, val);
11295 store_reg(s, 14, tmp2);
3bb8a96f
PM
11296 gen_bx(s, tmp);
11297 } else {
11298 /* Only BX works as exception-return, not BLX */
11299 gen_bx_excret(s, tmp);
99c475ab 11300 }
99c475ab
FB
11301 break;
11302 }
ebfe27c5 11303 }
99c475ab
FB
11304 break;
11305 }
11306
11307 /* data processing register */
11308 rd = insn & 7;
11309 rm = (insn >> 3) & 7;
11310 op = (insn >> 6) & 0xf;
11311 if (op == 2 || op == 3 || op == 4 || op == 7) {
11312 /* the shift/rotate ops want the operands backwards */
11313 val = rm;
11314 rm = rd;
11315 rd = val;
11316 val = 1;
11317 } else {
11318 val = 0;
11319 }
11320
396e467c 11321 if (op == 9) { /* neg */
7d1b0095 11322 tmp = tcg_temp_new_i32();
396e467c
FN
11323 tcg_gen_movi_i32(tmp, 0);
11324 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11325 tmp = load_reg(s, rd);
11326 } else {
39d5492a 11327 TCGV_UNUSED_I32(tmp);
396e467c 11328 }
99c475ab 11329
396e467c 11330 tmp2 = load_reg(s, rm);
5899f386 11331 switch (op) {
99c475ab 11332 case 0x0: /* and */
396e467c 11333 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 11334 if (!s->condexec_mask)
396e467c 11335 gen_logic_CC(tmp);
99c475ab
FB
11336 break;
11337 case 0x1: /* eor */
396e467c 11338 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 11339 if (!s->condexec_mask)
396e467c 11340 gen_logic_CC(tmp);
99c475ab
FB
11341 break;
11342 case 0x2: /* lsl */
9ee6e8bb 11343 if (s->condexec_mask) {
365af80e 11344 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 11345 } else {
9ef39277 11346 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11347 gen_logic_CC(tmp2);
9ee6e8bb 11348 }
99c475ab
FB
11349 break;
11350 case 0x3: /* lsr */
9ee6e8bb 11351 if (s->condexec_mask) {
365af80e 11352 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 11353 } else {
9ef39277 11354 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11355 gen_logic_CC(tmp2);
9ee6e8bb 11356 }
99c475ab
FB
11357 break;
11358 case 0x4: /* asr */
9ee6e8bb 11359 if (s->condexec_mask) {
365af80e 11360 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 11361 } else {
9ef39277 11362 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11363 gen_logic_CC(tmp2);
9ee6e8bb 11364 }
99c475ab
FB
11365 break;
11366 case 0x5: /* adc */
49b4c31e 11367 if (s->condexec_mask) {
396e467c 11368 gen_adc(tmp, tmp2);
49b4c31e
RH
11369 } else {
11370 gen_adc_CC(tmp, tmp, tmp2);
11371 }
99c475ab
FB
11372 break;
11373 case 0x6: /* sbc */
2de68a49 11374 if (s->condexec_mask) {
396e467c 11375 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
11376 } else {
11377 gen_sbc_CC(tmp, tmp, tmp2);
11378 }
99c475ab
FB
11379 break;
11380 case 0x7: /* ror */
9ee6e8bb 11381 if (s->condexec_mask) {
f669df27
AJ
11382 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11383 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 11384 } else {
9ef39277 11385 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11386 gen_logic_CC(tmp2);
9ee6e8bb 11387 }
99c475ab
FB
11388 break;
11389 case 0x8: /* tst */
396e467c
FN
11390 tcg_gen_and_i32(tmp, tmp, tmp2);
11391 gen_logic_CC(tmp);
99c475ab 11392 rd = 16;
5899f386 11393 break;
99c475ab 11394 case 0x9: /* neg */
9ee6e8bb 11395 if (s->condexec_mask)
396e467c 11396 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 11397 else
72485ec4 11398 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11399 break;
11400 case 0xa: /* cmp */
72485ec4 11401 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11402 rd = 16;
11403 break;
11404 case 0xb: /* cmn */
72485ec4 11405 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
11406 rd = 16;
11407 break;
11408 case 0xc: /* orr */
396e467c 11409 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 11410 if (!s->condexec_mask)
396e467c 11411 gen_logic_CC(tmp);
99c475ab
FB
11412 break;
11413 case 0xd: /* mul */
7b2919a0 11414 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 11415 if (!s->condexec_mask)
396e467c 11416 gen_logic_CC(tmp);
99c475ab
FB
11417 break;
11418 case 0xe: /* bic */
f669df27 11419 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 11420 if (!s->condexec_mask)
396e467c 11421 gen_logic_CC(tmp);
99c475ab
FB
11422 break;
11423 case 0xf: /* mvn */
396e467c 11424 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 11425 if (!s->condexec_mask)
396e467c 11426 gen_logic_CC(tmp2);
99c475ab 11427 val = 1;
5899f386 11428 rm = rd;
99c475ab
FB
11429 break;
11430 }
11431 if (rd != 16) {
396e467c
FN
11432 if (val) {
11433 store_reg(s, rm, tmp2);
11434 if (op != 0xf)
7d1b0095 11435 tcg_temp_free_i32(tmp);
396e467c
FN
11436 } else {
11437 store_reg(s, rd, tmp);
7d1b0095 11438 tcg_temp_free_i32(tmp2);
396e467c
FN
11439 }
11440 } else {
7d1b0095
PM
11441 tcg_temp_free_i32(tmp);
11442 tcg_temp_free_i32(tmp2);
99c475ab
FB
11443 }
11444 break;
11445
11446 case 5:
11447 /* load/store register offset. */
11448 rd = insn & 7;
11449 rn = (insn >> 3) & 7;
11450 rm = (insn >> 6) & 7;
11451 op = (insn >> 9) & 7;
b0109805 11452 addr = load_reg(s, rn);
b26eefb6 11453 tmp = load_reg(s, rm);
b0109805 11454 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11455 tcg_temp_free_i32(tmp);
99c475ab 11456
c40c8556 11457 if (op < 3) { /* store */
b0109805 11458 tmp = load_reg(s, rd);
c40c8556
PM
11459 } else {
11460 tmp = tcg_temp_new_i32();
11461 }
99c475ab
FB
11462
11463 switch (op) {
11464 case 0: /* str */
9bb6558a 11465 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11466 break;
11467 case 1: /* strh */
9bb6558a 11468 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11469 break;
11470 case 2: /* strb */
9bb6558a 11471 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11472 break;
11473 case 3: /* ldrsb */
9bb6558a 11474 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11475 break;
11476 case 4: /* ldr */
9bb6558a 11477 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11478 break;
11479 case 5: /* ldrh */
9bb6558a 11480 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11481 break;
11482 case 6: /* ldrb */
9bb6558a 11483 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11484 break;
11485 case 7: /* ldrsh */
9bb6558a 11486 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11487 break;
11488 }
c40c8556 11489 if (op >= 3) { /* load */
b0109805 11490 store_reg(s, rd, tmp);
c40c8556
PM
11491 } else {
11492 tcg_temp_free_i32(tmp);
11493 }
7d1b0095 11494 tcg_temp_free_i32(addr);
99c475ab
FB
11495 break;
11496
11497 case 6:
11498 /* load/store word immediate offset */
11499 rd = insn & 7;
11500 rn = (insn >> 3) & 7;
b0109805 11501 addr = load_reg(s, rn);
99c475ab 11502 val = (insn >> 4) & 0x7c;
b0109805 11503 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11504
11505 if (insn & (1 << 11)) {
11506 /* load */
c40c8556 11507 tmp = tcg_temp_new_i32();
12dcc321 11508 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11509 store_reg(s, rd, tmp);
99c475ab
FB
11510 } else {
11511 /* store */
b0109805 11512 tmp = load_reg(s, rd);
12dcc321 11513 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11514 tcg_temp_free_i32(tmp);
99c475ab 11515 }
7d1b0095 11516 tcg_temp_free_i32(addr);
99c475ab
FB
11517 break;
11518
11519 case 7:
11520 /* load/store byte immediate offset */
11521 rd = insn & 7;
11522 rn = (insn >> 3) & 7;
b0109805 11523 addr = load_reg(s, rn);
99c475ab 11524 val = (insn >> 6) & 0x1f;
b0109805 11525 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11526
11527 if (insn & (1 << 11)) {
11528 /* load */
c40c8556 11529 tmp = tcg_temp_new_i32();
9bb6558a 11530 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11531 store_reg(s, rd, tmp);
99c475ab
FB
11532 } else {
11533 /* store */
b0109805 11534 tmp = load_reg(s, rd);
9bb6558a 11535 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11536 tcg_temp_free_i32(tmp);
99c475ab 11537 }
7d1b0095 11538 tcg_temp_free_i32(addr);
99c475ab
FB
11539 break;
11540
11541 case 8:
11542 /* load/store halfword immediate offset */
11543 rd = insn & 7;
11544 rn = (insn >> 3) & 7;
b0109805 11545 addr = load_reg(s, rn);
99c475ab 11546 val = (insn >> 5) & 0x3e;
b0109805 11547 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11548
11549 if (insn & (1 << 11)) {
11550 /* load */
c40c8556 11551 tmp = tcg_temp_new_i32();
9bb6558a 11552 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11553 store_reg(s, rd, tmp);
99c475ab
FB
11554 } else {
11555 /* store */
b0109805 11556 tmp = load_reg(s, rd);
9bb6558a 11557 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11558 tcg_temp_free_i32(tmp);
99c475ab 11559 }
7d1b0095 11560 tcg_temp_free_i32(addr);
99c475ab
FB
11561 break;
11562
11563 case 9:
11564 /* load/store from stack */
11565 rd = (insn >> 8) & 7;
b0109805 11566 addr = load_reg(s, 13);
99c475ab 11567 val = (insn & 0xff) * 4;
b0109805 11568 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11569
11570 if (insn & (1 << 11)) {
11571 /* load */
c40c8556 11572 tmp = tcg_temp_new_i32();
9bb6558a 11573 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11574 store_reg(s, rd, tmp);
99c475ab
FB
11575 } else {
11576 /* store */
b0109805 11577 tmp = load_reg(s, rd);
9bb6558a 11578 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11579 tcg_temp_free_i32(tmp);
99c475ab 11580 }
7d1b0095 11581 tcg_temp_free_i32(addr);
99c475ab
FB
11582 break;
11583
11584 case 10:
11585 /* add to high reg */
11586 rd = (insn >> 8) & 7;
5899f386
FB
11587 if (insn & (1 << 11)) {
11588 /* SP */
5e3f878a 11589 tmp = load_reg(s, 13);
5899f386
FB
11590 } else {
11591 /* PC. bit 1 is ignored. */
7d1b0095 11592 tmp = tcg_temp_new_i32();
5e3f878a 11593 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 11594 }
99c475ab 11595 val = (insn & 0xff) * 4;
5e3f878a
PB
11596 tcg_gen_addi_i32(tmp, tmp, val);
11597 store_reg(s, rd, tmp);
99c475ab
FB
11598 break;
11599
11600 case 11:
11601 /* misc */
11602 op = (insn >> 8) & 0xf;
11603 switch (op) {
11604 case 0:
11605 /* adjust stack pointer */
b26eefb6 11606 tmp = load_reg(s, 13);
99c475ab
FB
11607 val = (insn & 0x7f) * 4;
11608 if (insn & (1 << 7))
6a0d8a1d 11609 val = -(int32_t)val;
b26eefb6
PB
11610 tcg_gen_addi_i32(tmp, tmp, val);
11611 store_reg(s, 13, tmp);
99c475ab
FB
11612 break;
11613
9ee6e8bb
PB
11614 case 2: /* sign/zero extend. */
11615 ARCH(6);
11616 rd = insn & 7;
11617 rm = (insn >> 3) & 7;
b0109805 11618 tmp = load_reg(s, rm);
9ee6e8bb 11619 switch ((insn >> 6) & 3) {
b0109805
PB
11620 case 0: gen_sxth(tmp); break;
11621 case 1: gen_sxtb(tmp); break;
11622 case 2: gen_uxth(tmp); break;
11623 case 3: gen_uxtb(tmp); break;
9ee6e8bb 11624 }
b0109805 11625 store_reg(s, rd, tmp);
9ee6e8bb 11626 break;
99c475ab
FB
11627 case 4: case 5: case 0xc: case 0xd:
11628 /* push/pop */
b0109805 11629 addr = load_reg(s, 13);
5899f386
FB
11630 if (insn & (1 << 8))
11631 offset = 4;
99c475ab 11632 else
5899f386
FB
11633 offset = 0;
11634 for (i = 0; i < 8; i++) {
11635 if (insn & (1 << i))
11636 offset += 4;
11637 }
11638 if ((insn & (1 << 11)) == 0) {
b0109805 11639 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11640 }
99c475ab
FB
11641 for (i = 0; i < 8; i++) {
11642 if (insn & (1 << i)) {
11643 if (insn & (1 << 11)) {
11644 /* pop */
c40c8556 11645 tmp = tcg_temp_new_i32();
12dcc321 11646 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11647 store_reg(s, i, tmp);
99c475ab
FB
11648 } else {
11649 /* push */
b0109805 11650 tmp = load_reg(s, i);
12dcc321 11651 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11652 tcg_temp_free_i32(tmp);
99c475ab 11653 }
5899f386 11654 /* advance to the next address. */
b0109805 11655 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11656 }
11657 }
39d5492a 11658 TCGV_UNUSED_I32(tmp);
99c475ab
FB
11659 if (insn & (1 << 8)) {
11660 if (insn & (1 << 11)) {
11661 /* pop pc */
c40c8556 11662 tmp = tcg_temp_new_i32();
12dcc321 11663 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11664 /* don't set the pc until the rest of the instruction
11665 has completed */
11666 } else {
11667 /* push lr */
b0109805 11668 tmp = load_reg(s, 14);
12dcc321 11669 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11670 tcg_temp_free_i32(tmp);
99c475ab 11671 }
b0109805 11672 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 11673 }
5899f386 11674 if ((insn & (1 << 11)) == 0) {
b0109805 11675 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11676 }
99c475ab 11677 /* write back the new stack pointer */
b0109805 11678 store_reg(s, 13, addr);
99c475ab 11679 /* set the new PC value */
be5e7a76 11680 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 11681 store_reg_from_load(s, 15, tmp);
be5e7a76 11682 }
99c475ab
FB
11683 break;
11684
9ee6e8bb
PB
11685 case 1: case 3: case 9: case 11: /* czb */
11686 rm = insn & 7;
d9ba4830 11687 tmp = load_reg(s, rm);
9ee6e8bb
PB
11688 s->condlabel = gen_new_label();
11689 s->condjmp = 1;
11690 if (insn & (1 << 11))
cb63669a 11691 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 11692 else
cb63669a 11693 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 11694 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11695 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11696 val = (uint32_t)s->pc + 2;
11697 val += offset;
11698 gen_jmp(s, val);
11699 break;
11700
11701 case 15: /* IT, nop-hint. */
11702 if ((insn & 0xf) == 0) {
11703 gen_nop_hint(s, (insn >> 4) & 0xf);
11704 break;
11705 }
11706 /* If Then. */
11707 s->condexec_cond = (insn >> 4) & 0xe;
11708 s->condexec_mask = insn & 0x1f;
11709 /* No actual code generated for this insn, just setup state. */
11710 break;
11711
06c949e6 11712 case 0xe: /* bkpt */
d4a2dc67
PM
11713 {
11714 int imm8 = extract32(insn, 0, 8);
be5e7a76 11715 ARCH(5);
73710361
GB
11716 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
11717 default_exception_el(s));
06c949e6 11718 break;
d4a2dc67 11719 }
06c949e6 11720
19a6e31c
PM
11721 case 0xa: /* rev, and hlt */
11722 {
11723 int op1 = extract32(insn, 6, 2);
11724
11725 if (op1 == 2) {
11726 /* HLT */
11727 int imm6 = extract32(insn, 0, 6);
11728
11729 gen_hlt(s, imm6);
11730 break;
11731 }
11732
11733 /* Otherwise this is rev */
9ee6e8bb
PB
11734 ARCH(6);
11735 rn = (insn >> 3) & 0x7;
11736 rd = insn & 0x7;
b0109805 11737 tmp = load_reg(s, rn);
19a6e31c 11738 switch (op1) {
66896cb8 11739 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
11740 case 1: gen_rev16(tmp); break;
11741 case 3: gen_revsh(tmp); break;
19a6e31c
PM
11742 default:
11743 g_assert_not_reached();
9ee6e8bb 11744 }
b0109805 11745 store_reg(s, rd, tmp);
9ee6e8bb 11746 break;
19a6e31c 11747 }
9ee6e8bb 11748
d9e028c1
PM
11749 case 6:
11750 switch ((insn >> 5) & 7) {
11751 case 2:
11752 /* setend */
11753 ARCH(6);
9886ecdf
PB
11754 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11755 gen_helper_setend(cpu_env);
dcba3a8d 11756 s->base.is_jmp = DISAS_UPDATE;
d9e028c1 11757 }
9ee6e8bb 11758 break;
d9e028c1
PM
11759 case 3:
11760 /* cps */
11761 ARCH(6);
11762 if (IS_USER(s)) {
11763 break;
8984bd2e 11764 }
b53d8923 11765 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
11766 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11767 /* FAULTMASK */
11768 if (insn & 1) {
11769 addr = tcg_const_i32(19);
11770 gen_helper_v7m_msr(cpu_env, addr, tmp);
11771 tcg_temp_free_i32(addr);
11772 }
11773 /* PRIMASK */
11774 if (insn & 2) {
11775 addr = tcg_const_i32(16);
11776 gen_helper_v7m_msr(cpu_env, addr, tmp);
11777 tcg_temp_free_i32(addr);
11778 }
11779 tcg_temp_free_i32(tmp);
11780 gen_lookup_tb(s);
11781 } else {
11782 if (insn & (1 << 4)) {
11783 shift = CPSR_A | CPSR_I | CPSR_F;
11784 } else {
11785 shift = 0;
11786 }
11787 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 11788 }
d9e028c1
PM
11789 break;
11790 default:
11791 goto undef;
9ee6e8bb
PB
11792 }
11793 break;
11794
99c475ab
FB
11795 default:
11796 goto undef;
11797 }
11798 break;
11799
11800 case 12:
a7d3970d 11801 {
99c475ab 11802 /* load/store multiple */
39d5492a
PM
11803 TCGv_i32 loaded_var;
11804 TCGV_UNUSED_I32(loaded_var);
99c475ab 11805 rn = (insn >> 8) & 0x7;
b0109805 11806 addr = load_reg(s, rn);
99c475ab
FB
11807 for (i = 0; i < 8; i++) {
11808 if (insn & (1 << i)) {
99c475ab
FB
11809 if (insn & (1 << 11)) {
11810 /* load */
c40c8556 11811 tmp = tcg_temp_new_i32();
12dcc321 11812 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
11813 if (i == rn) {
11814 loaded_var = tmp;
11815 } else {
11816 store_reg(s, i, tmp);
11817 }
99c475ab
FB
11818 } else {
11819 /* store */
b0109805 11820 tmp = load_reg(s, i);
12dcc321 11821 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11822 tcg_temp_free_i32(tmp);
99c475ab 11823 }
5899f386 11824 /* advance to the next address */
b0109805 11825 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11826 }
11827 }
b0109805 11828 if ((insn & (1 << rn)) == 0) {
a7d3970d 11829 /* base reg not in list: base register writeback */
b0109805
PB
11830 store_reg(s, rn, addr);
11831 } else {
a7d3970d
PM
11832 /* base reg in list: if load, complete it now */
11833 if (insn & (1 << 11)) {
11834 store_reg(s, rn, loaded_var);
11835 }
7d1b0095 11836 tcg_temp_free_i32(addr);
b0109805 11837 }
99c475ab 11838 break;
a7d3970d 11839 }
99c475ab
FB
11840 case 13:
11841 /* conditional branch or swi */
11842 cond = (insn >> 8) & 0xf;
11843 if (cond == 0xe)
11844 goto undef;
11845
11846 if (cond == 0xf) {
11847 /* swi */
eaed129d 11848 gen_set_pc_im(s, s->pc);
d4a2dc67 11849 s->svc_imm = extract32(insn, 0, 8);
dcba3a8d 11850 s->base.is_jmp = DISAS_SWI;
99c475ab
FB
11851 break;
11852 }
11853 /* generate a conditional jump to next instruction */
e50e6a20 11854 s->condlabel = gen_new_label();
39fb730a 11855 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 11856 s->condjmp = 1;
99c475ab
FB
11857
11858 /* jump to the offset */
5899f386 11859 val = (uint32_t)s->pc + 2;
99c475ab 11860 offset = ((int32_t)insn << 24) >> 24;
5899f386 11861 val += offset << 1;
8aaca4c0 11862 gen_jmp(s, val);
99c475ab
FB
11863 break;
11864
11865 case 14:
358bf29e 11866 if (insn & (1 << 11)) {
296e5a0a
PM
11867 /* thumb_insn_is_16bit() ensures we can't get here for
11868 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
11869 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
11870 */
11871 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
11872 ARCH(5);
11873 offset = ((insn & 0x7ff) << 1);
11874 tmp = load_reg(s, 14);
11875 tcg_gen_addi_i32(tmp, tmp, offset);
11876 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
11877
11878 tmp2 = tcg_temp_new_i32();
11879 tcg_gen_movi_i32(tmp2, s->pc | 1);
11880 store_reg(s, 14, tmp2);
11881 gen_bx(s, tmp);
358bf29e
PB
11882 break;
11883 }
9ee6e8bb 11884 /* unconditional branch */
99c475ab
FB
11885 val = (uint32_t)s->pc;
11886 offset = ((int32_t)insn << 21) >> 21;
11887 val += (offset << 1) + 2;
8aaca4c0 11888 gen_jmp(s, val);
99c475ab
FB
11889 break;
11890
11891 case 15:
296e5a0a
PM
11892 /* thumb_insn_is_16bit() ensures we can't get here for
11893 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
11894 */
11895 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
11896
11897 if (insn & (1 << 11)) {
11898 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
11899 offset = ((insn & 0x7ff) << 1) | 1;
11900 tmp = load_reg(s, 14);
11901 tcg_gen_addi_i32(tmp, tmp, offset);
11902
11903 tmp2 = tcg_temp_new_i32();
11904 tcg_gen_movi_i32(tmp2, s->pc | 1);
11905 store_reg(s, 14, tmp2);
11906 gen_bx(s, tmp);
11907 } else {
11908 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
11909 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
11910
11911 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
11912 }
9ee6e8bb 11913 break;
99c475ab
FB
11914 }
11915 return;
9ee6e8bb 11916illegal_op:
99c475ab 11917undef:
73710361
GB
11918 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11919 default_exception_el(s));
99c475ab
FB
11920}
11921
541ebcd4
PM
11922static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11923{
11924 /* Return true if the insn at dc->pc might cross a page boundary.
11925 * (False positives are OK, false negatives are not.)
5b8d7289
PM
11926 * We know this is a Thumb insn, and our caller ensures we are
11927 * only called if dc->pc is less than 4 bytes from the page
11928 * boundary, so we cross the page if the first 16 bits indicate
11929 * that this is a 32 bit insn.
541ebcd4 11930 */
5b8d7289 11931 uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4 11932
5b8d7289 11933 return !thumb_insn_is_16bit(s, insn);
541ebcd4
PM
11934}
11935
1d8a5535
LV
11936static int arm_tr_init_disas_context(DisasContextBase *dcbase,
11937 CPUState *cs, int max_insns)
2c0262af 11938{
1d8a5535 11939 DisasContext *dc = container_of(dcbase, DisasContext, base);
9c489ea6 11940 CPUARMState *env = cs->env_ptr;
4e5e1215 11941 ARMCPU *cpu = arm_env_get_cpu(env);
3b46e624 11942
dcba3a8d 11943 dc->pc = dc->base.pc_first;
e50e6a20 11944 dc->condjmp = 0;
3926cc84 11945
40f860cd 11946 dc->aarch64 = 0;
cef9ee70
SS
11947 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11948 * there is no secure EL1, so we route exceptions to EL3.
11949 */
11950 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11951 !arm_el_is_aa64(env, 3);
1d8a5535
LV
11952 dc->thumb = ARM_TBFLAG_THUMB(dc->base.tb->flags);
11953 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(dc->base.tb->flags);
11954 dc->be_data = ARM_TBFLAG_BE_DATA(dc->base.tb->flags) ? MO_BE : MO_LE;
11955 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) & 0xf) << 1;
11956 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) >> 4;
11957 dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(dc->base.tb->flags));
c1e37810 11958 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 11959#if !defined(CONFIG_USER_ONLY)
c1e37810 11960 dc->user = (dc->current_el == 0);
3926cc84 11961#endif
1d8a5535
LV
11962 dc->ns = ARM_TBFLAG_NS(dc->base.tb->flags);
11963 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(dc->base.tb->flags);
11964 dc->vfp_enabled = ARM_TBFLAG_VFPEN(dc->base.tb->flags);
11965 dc->vec_len = ARM_TBFLAG_VECLEN(dc->base.tb->flags);
11966 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(dc->base.tb->flags);
11967 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(dc->base.tb->flags);
11968 dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(dc->base.tb->flags);
fb602cb7
PM
11969 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
11970 regime_is_secure(env, dc->mmu_idx);
60322b39 11971 dc->cp_regs = cpu->cp_regs;
a984e42c 11972 dc->features = env->features;
40f860cd 11973
50225ad0
PM
11974 /* Single step state. The code-generation logic here is:
11975 * SS_ACTIVE == 0:
11976 * generate code with no special handling for single-stepping (except
11977 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11978 * this happens anyway because those changes are all system register or
11979 * PSTATE writes).
11980 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11981 * emit code for one insn
11982 * emit code to clear PSTATE.SS
11983 * emit code to generate software step exception for completed step
11984 * end TB (as usual for having generated an exception)
11985 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11986 * emit code to generate a software step exception
11987 * end the TB
11988 */
1d8a5535
LV
11989 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(dc->base.tb->flags);
11990 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(dc->base.tb->flags);
50225ad0
PM
11991 dc->is_ldex = false;
11992 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11993
13189a90
LV
11994 dc->next_page_start =
11995 (dc->base.pc_first & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1d8a5535 11996
f7708456
RH
11997 /* If architectural single step active, limit to 1. */
11998 if (is_singlestepping(dc)) {
11999 max_insns = 1;
12000 }
12001
d0264d86
RH
12002 /* ARM is a fixed-length ISA. Bound the number of insns to execute
12003 to those left on the page. */
12004 if (!dc->thumb) {
12005 int bound = (dc->next_page_start - dc->base.pc_first) / 4;
12006 max_insns = MIN(max_insns, bound);
12007 }
12008
a7812ae4
PB
12009 cpu_F0s = tcg_temp_new_i32();
12010 cpu_F1s = tcg_temp_new_i32();
12011 cpu_F0d = tcg_temp_new_i64();
12012 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
12013 cpu_V0 = cpu_F0d;
12014 cpu_V1 = cpu_F1d;
e677137d 12015 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 12016 cpu_M0 = tcg_temp_new_i64();
1d8a5535
LV
12017
12018 return max_insns;
12019}
12020
b1476854
LV
12021static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
12022{
12023 DisasContext *dc = container_of(dcbase, DisasContext, base);
12024
12025 /* A note on handling of the condexec (IT) bits:
12026 *
12027 * We want to avoid the overhead of having to write the updated condexec
12028 * bits back to the CPUARMState for every instruction in an IT block. So:
12029 * (1) if the condexec bits are not already zero then we write
12030 * zero back into the CPUARMState now. This avoids complications trying
12031 * to do it at the end of the block. (For example if we don't do this
12032 * it's hard to identify whether we can safely skip writing condexec
12033 * at the end of the TB, which we definitely want to do for the case
12034 * where a TB doesn't do anything with the IT state at all.)
12035 * (2) if we are going to leave the TB then we call gen_set_condexec()
12036 * which will write the correct value into CPUARMState if zero is wrong.
12037 * This is done both for leaving the TB at the end, and for leaving
12038 * it because of an exception we know will happen, which is done in
12039 * gen_exception_insn(). The latter is necessary because we need to
12040 * leave the TB with the PC/IT state just prior to execution of the
12041 * instruction which caused the exception.
12042 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
12043 * then the CPUARMState will be wrong and we need to reset it.
12044 * This is handled in the same way as restoration of the
12045 * PC in these situations; we save the value of the condexec bits
12046 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
12047 * then uses this to restore them after an exception.
12048 *
12049 * Note that there are no instructions which can read the condexec
12050 * bits, and none which can write non-static values to them, so
12051 * we don't need to care about whether CPUARMState is correct in the
12052 * middle of a TB.
12053 */
12054
12055 /* Reset the conditional execution bits immediately. This avoids
12056 complications trying to do it at the end of the block. */
12057 if (dc->condexec_mask || dc->condexec_cond) {
12058 TCGv_i32 tmp = tcg_temp_new_i32();
12059 tcg_gen_movi_i32(tmp, 0);
12060 store_cpu_field(tmp, condexec_bits);
12061 }
23169224 12062 tcg_clear_temp_count();
b1476854
LV
12063}
12064
f62bd897
LV
12065static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
12066{
12067 DisasContext *dc = container_of(dcbase, DisasContext, base);
12068
12069 dc->insn_start_idx = tcg_op_buf_count();
12070 tcg_gen_insn_start(dc->pc,
12071 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
12072 0);
12073}
12074
a68956ad
LV
12075static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
12076 const CPUBreakpoint *bp)
12077{
12078 DisasContext *dc = container_of(dcbase, DisasContext, base);
12079
12080 if (bp->flags & BP_CPU) {
12081 gen_set_condexec(dc);
12082 gen_set_pc_im(dc, dc->pc);
12083 gen_helper_check_breakpoints(cpu_env);
12084 /* End the TB early; it's likely not going to be executed */
12085 dc->base.is_jmp = DISAS_TOO_MANY;
12086 } else {
12087 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
12088 /* The address covered by the breakpoint must be
12089 included in [tb->pc, tb->pc + tb->size) in order
12090 to for it to be properly cleared -- thus we
12091 increment the PC here so that the logic setting
12092 tb->size below does the right thing. */
12093 /* TODO: Advance PC by correct instruction length to
12094 * avoid disassembler error messages */
12095 dc->pc += 2;
12096 dc->base.is_jmp = DISAS_NORETURN;
12097 }
12098
12099 return true;
12100}
12101
722ef0a5 12102static bool arm_pre_translate_insn(DisasContext *dc)
13189a90 12103{
13189a90
LV
12104#ifdef CONFIG_USER_ONLY
12105 /* Intercept jump to the magic kernel page. */
12106 if (dc->pc >= 0xffff0000) {
12107 /* We always get here via a jump, so know we are not in a
12108 conditional execution block. */
12109 gen_exception_internal(EXCP_KERNEL_TRAP);
12110 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 12111 return true;
13189a90
LV
12112 }
12113#endif
12114
12115 if (dc->ss_active && !dc->pstate_ss) {
12116 /* Singlestep state is Active-pending.
12117 * If we're in this state at the start of a TB then either
12118 * a) we just took an exception to an EL which is being debugged
12119 * and this is the first insn in the exception handler
12120 * b) debug exceptions were masked and we just unmasked them
12121 * without changing EL (eg by clearing PSTATE.D)
12122 * In either case we're going to take a swstep exception in the
12123 * "did not step an insn" case, and so the syndrome ISV and EX
12124 * bits should be zero.
12125 */
12126 assert(dc->base.num_insns == 1);
12127 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
12128 default_exception_el(dc));
12129 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 12130 return true;
13189a90
LV
12131 }
12132
722ef0a5
RH
12133 return false;
12134}
13189a90 12135
d0264d86 12136static void arm_post_translate_insn(DisasContext *dc)
722ef0a5 12137{
13189a90
LV
12138 if (dc->condjmp && !dc->base.is_jmp) {
12139 gen_set_label(dc->condlabel);
12140 dc->condjmp = 0;
12141 }
13189a90 12142 dc->base.pc_next = dc->pc;
23169224 12143 translator_loop_temp_check(&dc->base);
13189a90
LV
12144}
12145
722ef0a5
RH
12146static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12147{
12148 DisasContext *dc = container_of(dcbase, DisasContext, base);
12149 CPUARMState *env = cpu->env_ptr;
12150 unsigned int insn;
12151
12152 if (arm_pre_translate_insn(dc)) {
12153 return;
12154 }
12155
12156 insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
58803318 12157 dc->insn = insn;
722ef0a5
RH
12158 dc->pc += 4;
12159 disas_arm_insn(dc, insn);
12160
d0264d86
RH
12161 arm_post_translate_insn(dc);
12162
12163 /* ARM is a fixed-length ISA. We performed the cross-page check
12164 in init_disas_context by adjusting max_insns. */
722ef0a5
RH
12165}
12166
dcf14dfb
PM
12167static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
12168{
12169 /* Return true if this Thumb insn is always unconditional,
12170 * even inside an IT block. This is true of only a very few
12171 * instructions: BKPT, HLT, and SG.
12172 *
12173 * A larger class of instructions are UNPREDICTABLE if used
12174 * inside an IT block; we do not need to detect those here, because
12175 * what we do by default (perform the cc check and update the IT
12176 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
12177 * choice for those situations.
12178 *
12179 * insn is either a 16-bit or a 32-bit instruction; the two are
12180 * distinguishable because for the 16-bit case the top 16 bits
12181 * are zeroes, and that isn't a valid 32-bit encoding.
12182 */
12183 if ((insn & 0xffffff00) == 0xbe00) {
12184 /* BKPT */
12185 return true;
12186 }
12187
12188 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
12189 !arm_dc_feature(s, ARM_FEATURE_M)) {
12190 /* HLT: v8A only. This is unconditional even when it is going to
12191 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
12192 * For v7 cores this was a plain old undefined encoding and so
12193 * honours its cc check. (We might be using the encoding as
12194 * a semihosting trap, but we don't change the cc check behaviour
12195 * on that account, because a debugger connected to a real v7A
12196 * core and emulating semihosting traps by catching the UNDEF
12197 * exception would also only see cases where the cc check passed.
12198 * No guest code should be trying to do a HLT semihosting trap
12199 * in an IT block anyway.
12200 */
12201 return true;
12202 }
12203
12204 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
12205 arm_dc_feature(s, ARM_FEATURE_M)) {
12206 /* SG: v8M only */
12207 return true;
12208 }
12209
12210 return false;
12211}
12212
722ef0a5
RH
12213static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12214{
12215 DisasContext *dc = container_of(dcbase, DisasContext, base);
12216 CPUARMState *env = cpu->env_ptr;
296e5a0a
PM
12217 uint32_t insn;
12218 bool is_16bit;
722ef0a5
RH
12219
12220 if (arm_pre_translate_insn(dc)) {
12221 return;
12222 }
12223
296e5a0a
PM
12224 insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12225 is_16bit = thumb_insn_is_16bit(dc, insn);
12226 dc->pc += 2;
12227 if (!is_16bit) {
12228 uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12229
12230 insn = insn << 16 | insn2;
12231 dc->pc += 2;
12232 }
58803318 12233 dc->insn = insn;
296e5a0a 12234
dcf14dfb 12235 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
296e5a0a
PM
12236 uint32_t cond = dc->condexec_cond;
12237
12238 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
12239 dc->condlabel = gen_new_label();
12240 arm_gen_test_cc(cond ^ 1, dc->condlabel);
12241 dc->condjmp = 1;
12242 }
12243 }
12244
12245 if (is_16bit) {
12246 disas_thumb_insn(dc, insn);
12247 } else {
12248 disas_thumb2_insn(dc, insn);
12249 }
722ef0a5
RH
12250
12251 /* Advance the Thumb condexec condition. */
12252 if (dc->condexec_mask) {
12253 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
12254 ((dc->condexec_mask >> 4) & 1));
12255 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
12256 if (dc->condexec_mask == 0) {
12257 dc->condexec_cond = 0;
12258 }
12259 }
12260
d0264d86
RH
12261 arm_post_translate_insn(dc);
12262
12263 /* Thumb is a variable-length ISA. Stop translation when the next insn
12264 * will touch a new page. This ensures that prefetch aborts occur at
12265 * the right place.
12266 *
12267 * We want to stop the TB if the next insn starts in a new page,
12268 * or if it spans between this page and the next. This means that
12269 * if we're looking at the last halfword in the page we need to
12270 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12271 * or a 32-bit Thumb insn (which won't).
12272 * This is to avoid generating a silly TB with a single 16-bit insn
12273 * in it at the end of this page (which would execute correctly
12274 * but isn't very efficient).
12275 */
12276 if (dc->base.is_jmp == DISAS_NEXT
12277 && (dc->pc >= dc->next_page_start
12278 || (dc->pc >= dc->next_page_start - 3
12279 && insn_crosses_page(env, dc)))) {
12280 dc->base.is_jmp = DISAS_TOO_MANY;
12281 }
722ef0a5
RH
12282}
12283
70d3c035 12284static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1d8a5535 12285{
70d3c035 12286 DisasContext *dc = container_of(dcbase, DisasContext, base);
2e70f6ef 12287
c5a49c63 12288 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
70d3c035
LV
12289 /* FIXME: This can theoretically happen with self-modifying code. */
12290 cpu_abort(cpu, "IO on conditional branch instruction");
2e70f6ef 12291 }
9ee6e8bb 12292
b5ff1b31 12293 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
12294 instruction was a conditional branch or trap, and the PC has
12295 already been written. */
f021b2c4 12296 gen_set_condexec(dc);
dcba3a8d 12297 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
3bb8a96f
PM
12298 /* Exception return branches need some special case code at the
12299 * end of the TB, which is complex enough that it has to
12300 * handle the single-step vs not and the condition-failed
12301 * insn codepath itself.
12302 */
12303 gen_bx_excret_final_code(dc);
12304 } else if (unlikely(is_singlestepping(dc))) {
7999a5c8 12305 /* Unconditional and "condition passed" instruction codepath. */
dcba3a8d 12306 switch (dc->base.is_jmp) {
7999a5c8 12307 case DISAS_SWI:
50225ad0 12308 gen_ss_advance(dc);
73710361
GB
12309 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12310 default_exception_el(dc));
7999a5c8
SF
12311 break;
12312 case DISAS_HVC:
37e6456e 12313 gen_ss_advance(dc);
73710361 12314 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
12315 break;
12316 case DISAS_SMC:
37e6456e 12317 gen_ss_advance(dc);
73710361 12318 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
12319 break;
12320 case DISAS_NEXT:
a68956ad 12321 case DISAS_TOO_MANY:
7999a5c8
SF
12322 case DISAS_UPDATE:
12323 gen_set_pc_im(dc, dc->pc);
12324 /* fall through */
12325 default:
5425415e
PM
12326 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12327 gen_singlestep_exception(dc);
a0c231e6
RH
12328 break;
12329 case DISAS_NORETURN:
12330 break;
7999a5c8 12331 }
8aaca4c0 12332 } else {
9ee6e8bb
PB
12333 /* While branches must always occur at the end of an IT block,
12334 there are a few other things that can cause us to terminate
65626741 12335 the TB in the middle of an IT block:
9ee6e8bb
PB
12336 - Exception generating instructions (bkpt, swi, undefined).
12337 - Page boundaries.
12338 - Hardware watchpoints.
12339 Hardware breakpoints have already been handled and skip this code.
12340 */
dcba3a8d 12341 switch(dc->base.is_jmp) {
8aaca4c0 12342 case DISAS_NEXT:
a68956ad 12343 case DISAS_TOO_MANY:
6e256c93 12344 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 12345 break;
577bf808 12346 case DISAS_JUMP:
8a6b28c7
EC
12347 gen_goto_ptr();
12348 break;
e8d52302
AB
12349 case DISAS_UPDATE:
12350 gen_set_pc_im(dc, dc->pc);
12351 /* fall through */
577bf808 12352 default:
8aaca4c0 12353 /* indicate that the hash table must be used to find the next TB */
57fec1fe 12354 tcg_gen_exit_tb(0);
8aaca4c0 12355 break;
a0c231e6 12356 case DISAS_NORETURN:
8aaca4c0
FB
12357 /* nothing more to generate */
12358 break;
9ee6e8bb 12359 case DISAS_WFI:
58803318
SS
12360 {
12361 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
12362 !(dc->insn & (1U << 31))) ? 2 : 4);
12363
12364 gen_helper_wfi(cpu_env, tmp);
12365 tcg_temp_free_i32(tmp);
84549b6d
PM
12366 /* The helper doesn't necessarily throw an exception, but we
12367 * must go back to the main loop to check for interrupts anyway.
12368 */
12369 tcg_gen_exit_tb(0);
9ee6e8bb 12370 break;
58803318 12371 }
72c1d3af
PM
12372 case DISAS_WFE:
12373 gen_helper_wfe(cpu_env);
12374 break;
c87e5a61
PM
12375 case DISAS_YIELD:
12376 gen_helper_yield(cpu_env);
12377 break;
9ee6e8bb 12378 case DISAS_SWI:
73710361
GB
12379 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12380 default_exception_el(dc));
9ee6e8bb 12381 break;
37e6456e 12382 case DISAS_HVC:
73710361 12383 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
12384 break;
12385 case DISAS_SMC:
73710361 12386 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 12387 break;
8aaca4c0 12388 }
f021b2c4
PM
12389 }
12390
12391 if (dc->condjmp) {
12392 /* "Condition failed" instruction codepath for the branch/trap insn */
12393 gen_set_label(dc->condlabel);
12394 gen_set_condexec(dc);
b636649f 12395 if (unlikely(is_singlestepping(dc))) {
f021b2c4
PM
12396 gen_set_pc_im(dc, dc->pc);
12397 gen_singlestep_exception(dc);
12398 } else {
6e256c93 12399 gen_goto_tb(dc, 1, dc->pc);
e50e6a20 12400 }
2c0262af 12401 }
23169224
LV
12402
12403 /* Functions above can change dc->pc, so re-align db->pc_next */
12404 dc->base.pc_next = dc->pc;
70d3c035
LV
12405}
12406
4013f7fc
LV
12407static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
12408{
12409 DisasContext *dc = container_of(dcbase, DisasContext, base);
12410
12411 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
1d48474d 12412 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
4013f7fc
LV
12413}
12414
23169224
LV
12415static const TranslatorOps arm_translator_ops = {
12416 .init_disas_context = arm_tr_init_disas_context,
12417 .tb_start = arm_tr_tb_start,
12418 .insn_start = arm_tr_insn_start,
12419 .breakpoint_check = arm_tr_breakpoint_check,
12420 .translate_insn = arm_tr_translate_insn,
12421 .tb_stop = arm_tr_tb_stop,
12422 .disas_log = arm_tr_disas_log,
12423};
12424
722ef0a5
RH
12425static const TranslatorOps thumb_translator_ops = {
12426 .init_disas_context = arm_tr_init_disas_context,
12427 .tb_start = arm_tr_tb_start,
12428 .insn_start = arm_tr_insn_start,
12429 .breakpoint_check = arm_tr_breakpoint_check,
12430 .translate_insn = thumb_tr_translate_insn,
12431 .tb_stop = arm_tr_tb_stop,
12432 .disas_log = arm_tr_disas_log,
12433};
12434
70d3c035 12435/* generate intermediate code for basic block 'tb'. */
23169224 12436void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
70d3c035 12437{
23169224
LV
12438 DisasContext dc;
12439 const TranslatorOps *ops = &arm_translator_ops;
70d3c035 12440
722ef0a5
RH
12441 if (ARM_TBFLAG_THUMB(tb->flags)) {
12442 ops = &thumb_translator_ops;
12443 }
23169224 12444#ifdef TARGET_AARCH64
70d3c035 12445 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
23169224 12446 ops = &aarch64_translator_ops;
2c0262af
FB
12447 }
12448#endif
23169224
LV
12449
12450 translator_loop(ops, &dc.base, cpu, tb);
2c0262af
FB
12451}
12452
b5ff1b31 12453static const char *cpu_mode_names[16] = {
28c9457d
EI
12454 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
12455 "???", "???", "hyp", "und", "???", "???", "???", "sys"
b5ff1b31 12456};
9ee6e8bb 12457
878096ee
AF
12458void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
12459 int flags)
2c0262af 12460{
878096ee
AF
12461 ARMCPU *cpu = ARM_CPU(cs);
12462 CPUARMState *env = &cpu->env;
2c0262af
FB
12463 int i;
12464
17731115
PM
12465 if (is_a64(env)) {
12466 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
12467 return;
12468 }
12469
2c0262af 12470 for(i=0;i<16;i++) {
7fe48483 12471 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 12472 if ((i % 4) == 3)
7fe48483 12473 cpu_fprintf(f, "\n");
2c0262af 12474 else
7fe48483 12475 cpu_fprintf(f, " ");
2c0262af 12476 }
06e5cf7a 12477
5b906f35
PM
12478 if (arm_feature(env, ARM_FEATURE_M)) {
12479 uint32_t xpsr = xpsr_read(env);
12480 const char *mode;
1e577cc7
PM
12481 const char *ns_status = "";
12482
12483 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
12484 ns_status = env->v7m.secure ? "S " : "NS ";
12485 }
5b906f35
PM
12486
12487 if (xpsr & XPSR_EXCP) {
12488 mode = "handler";
12489 } else {
8bfc26ea 12490 if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
5b906f35
PM
12491 mode = "unpriv-thread";
12492 } else {
12493 mode = "priv-thread";
12494 }
12495 }
12496
1e577cc7 12497 cpu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
5b906f35
PM
12498 xpsr,
12499 xpsr & XPSR_N ? 'N' : '-',
12500 xpsr & XPSR_Z ? 'Z' : '-',
12501 xpsr & XPSR_C ? 'C' : '-',
12502 xpsr & XPSR_V ? 'V' : '-',
12503 xpsr & XPSR_T ? 'T' : 'A',
1e577cc7 12504 ns_status,
5b906f35 12505 mode);
06e5cf7a 12506 } else {
5b906f35
PM
12507 uint32_t psr = cpsr_read(env);
12508 const char *ns_status = "";
12509
12510 if (arm_feature(env, ARM_FEATURE_EL3) &&
12511 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
12512 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
12513 }
12514
12515 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
12516 psr,
12517 psr & CPSR_N ? 'N' : '-',
12518 psr & CPSR_Z ? 'Z' : '-',
12519 psr & CPSR_C ? 'C' : '-',
12520 psr & CPSR_V ? 'V' : '-',
12521 psr & CPSR_T ? 'T' : 'A',
12522 ns_status,
12523 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
12524 }
b7bcbe95 12525
f2617cfc
PM
12526 if (flags & CPU_DUMP_FPU) {
12527 int numvfpregs = 0;
12528 if (arm_feature(env, ARM_FEATURE_VFP)) {
12529 numvfpregs += 16;
12530 }
12531 if (arm_feature(env, ARM_FEATURE_VFP3)) {
12532 numvfpregs += 16;
12533 }
12534 for (i = 0; i < numvfpregs; i++) {
12535 uint64_t v = float64_val(env->vfp.regs[i]);
12536 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
12537 i * 2, (uint32_t)v,
12538 i * 2 + 1, (uint32_t)(v >> 32),
12539 i, v);
12540 }
12541 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 12542 }
2c0262af 12543}
a6b025d3 12544
bad729e2
RH
12545void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12546 target_ulong *data)
d2856f1a 12547{
3926cc84 12548 if (is_a64(env)) {
bad729e2 12549 env->pc = data[0];
40f860cd 12550 env->condexec_bits = 0;
aaa1f954 12551 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12552 } else {
bad729e2
RH
12553 env->regs[15] = data[0];
12554 env->condexec_bits = data[1];
aaa1f954 12555 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12556 }
d2856f1a 12557}
This page took 3.620108 seconds and 4 git commands to generate.