]> Git Repo - qemu.git/blame - target-arm/translate.c
trace: add "-trace enable=..."
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
57fec1fe 26#include "tcg-op.h"
1de7afc9 27#include "qemu/log.h"
534df156 28#include "qemu/bitops.h"
1d854765 29#include "arm_ldst.h"
1497c961 30
2ef6175a
RH
31#include "exec/helper-proto.h"
32#include "exec/helper-gen.h"
2c0262af 33
a7e30d84
LV
34#include "trace-tcg.h"
35
36
2b51668f
PM
37#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
38#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 39/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 40#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
9ee6e8bb 41#define ENABLE_ARCH_5J 0
2b51668f
PM
42#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
43#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
44#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
45#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
46#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 47
86753403 48#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 49
f570c61e 50#include "translate.h"
e12ce78d 51
b5ff1b31
FB
52#if defined(CONFIG_USER_ONLY)
53#define IS_USER(s) 1
54#else
55#define IS_USER(s) (s->user)
56#endif
57
3407ad0e 58TCGv_ptr cpu_env;
ad69471c 59/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 60static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 61static TCGv_i32 cpu_R[16];
78bcaa3e
RH
62TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
63TCGv_i64 cpu_exclusive_addr;
64TCGv_i64 cpu_exclusive_val;
426f5abc 65#ifdef CONFIG_USER_ONLY
78bcaa3e
RH
66TCGv_i64 cpu_exclusive_test;
67TCGv_i32 cpu_exclusive_info;
426f5abc 68#endif
ad69471c 69
b26eefb6 70/* FIXME: These should be removed. */
39d5492a 71static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 72static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 73
022c62cb 74#include "exec/gen-icount.h"
2e70f6ef 75
155c3eac
FN
76static const char *regnames[] =
77 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
78 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
79
b26eefb6
PB
80/* initialize TCG globals. */
81void arm_translate_init(void)
82{
155c3eac
FN
83 int i;
84
a7812ae4
PB
85 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
86
155c3eac
FN
87 for (i = 0; i < 16; i++) {
88 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 89 offsetof(CPUARMState, regs[i]),
155c3eac
FN
90 regnames[i]);
91 }
66c374de
AJ
92 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
93 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
94 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
95 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
96
03d05e2d 97 cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 98 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
03d05e2d 99 cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 100 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 101#ifdef CONFIG_USER_ONLY
03d05e2d 102 cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 103 offsetof(CPUARMState, exclusive_test), "exclusive_test");
426f5abc 104 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 105 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 106#endif
155c3eac 107
14ade10f 108 a64_translate_init();
b26eefb6
PB
109}
110
579d21cc
PM
111static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
112{
113 /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
114 * insns:
115 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
116 * otherwise, access as if at PL0.
117 */
118 switch (s->mmu_idx) {
119 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
120 case ARMMMUIdx_S12NSE0:
121 case ARMMMUIdx_S12NSE1:
122 return ARMMMUIdx_S12NSE0;
123 case ARMMMUIdx_S1E3:
124 case ARMMMUIdx_S1SE0:
125 case ARMMMUIdx_S1SE1:
126 return ARMMMUIdx_S1SE0;
127 case ARMMMUIdx_S2NS:
128 default:
129 g_assert_not_reached();
130 }
131}
132
39d5492a 133static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 134{
39d5492a 135 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
136 tcg_gen_ld_i32(tmp, cpu_env, offset);
137 return tmp;
138}
139
0ecb72a5 140#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 141
39d5492a 142static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
143{
144 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 145 tcg_temp_free_i32(var);
d9ba4830
PB
146}
147
148#define store_cpu_field(var, name) \
0ecb72a5 149 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 150
b26eefb6 151/* Set a variable to the value of a CPU register. */
39d5492a 152static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
153{
154 if (reg == 15) {
155 uint32_t addr;
b90372ad 156 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
157 if (s->thumb)
158 addr = (long)s->pc + 2;
159 else
160 addr = (long)s->pc + 4;
161 tcg_gen_movi_i32(var, addr);
162 } else {
155c3eac 163 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
164 }
165}
166
167/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 168static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 169{
39d5492a 170 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
171 load_reg_var(s, tmp, reg);
172 return tmp;
173}
174
175/* Set a CPU register. The source must be a temporary and will be
176 marked as dead. */
39d5492a 177static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
178{
179 if (reg == 15) {
180 tcg_gen_andi_i32(var, var, ~1);
181 s->is_jmp = DISAS_JUMP;
182 }
155c3eac 183 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 184 tcg_temp_free_i32(var);
b26eefb6
PB
185}
186
b26eefb6 187/* Value extensions. */
86831435
PB
188#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
189#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
190#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
191#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
192
1497c961
PB
193#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
194#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 195
b26eefb6 196
39d5492a 197static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 198{
39d5492a 199 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 200 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
201 tcg_temp_free_i32(tmp_mask);
202}
d9ba4830
PB
203/* Set NZCV flags from the high 4 bits of var. */
204#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
205
d4a2dc67 206static void gen_exception_internal(int excp)
d9ba4830 207{
d4a2dc67
PM
208 TCGv_i32 tcg_excp = tcg_const_i32(excp);
209
210 assert(excp_is_internal(excp));
211 gen_helper_exception_internal(cpu_env, tcg_excp);
212 tcg_temp_free_i32(tcg_excp);
213}
214
73710361 215static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
216{
217 TCGv_i32 tcg_excp = tcg_const_i32(excp);
218 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 219 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 220
73710361
GB
221 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
222 tcg_syn, tcg_el);
223
224 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
225 tcg_temp_free_i32(tcg_syn);
226 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
227}
228
50225ad0
PM
229static void gen_ss_advance(DisasContext *s)
230{
231 /* If the singlestep state is Active-not-pending, advance to
232 * Active-pending.
233 */
234 if (s->ss_active) {
235 s->pstate_ss = 0;
236 gen_helper_clear_pstate_ss(cpu_env);
237 }
238}
239
240static void gen_step_complete_exception(DisasContext *s)
241{
242 /* We just completed step of an insn. Move from Active-not-pending
243 * to Active-pending, and then also take the swstep exception.
244 * This corresponds to making the (IMPDEF) choice to prioritize
245 * swstep exceptions over asynchronous exceptions taken to an exception
246 * level where debug is disabled. This choice has the advantage that
247 * we do not need to maintain internal state corresponding to the
248 * ISV/EX syndrome bits between completion of the step and generation
249 * of the exception, and our syndrome information is always correct.
250 */
251 gen_ss_advance(s);
73710361
GB
252 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
253 default_exception_el(s));
50225ad0
PM
254 s->is_jmp = DISAS_EXC;
255}
256
39d5492a 257static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 258{
39d5492a
PM
259 TCGv_i32 tmp1 = tcg_temp_new_i32();
260 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
261 tcg_gen_ext16s_i32(tmp1, a);
262 tcg_gen_ext16s_i32(tmp2, b);
3670669c 263 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 264 tcg_temp_free_i32(tmp2);
3670669c
PB
265 tcg_gen_sari_i32(a, a, 16);
266 tcg_gen_sari_i32(b, b, 16);
267 tcg_gen_mul_i32(b, b, a);
268 tcg_gen_mov_i32(a, tmp1);
7d1b0095 269 tcg_temp_free_i32(tmp1);
3670669c
PB
270}
271
272/* Byteswap each halfword. */
39d5492a 273static void gen_rev16(TCGv_i32 var)
3670669c 274{
39d5492a 275 TCGv_i32 tmp = tcg_temp_new_i32();
3670669c
PB
276 tcg_gen_shri_i32(tmp, var, 8);
277 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
278 tcg_gen_shli_i32(var, var, 8);
279 tcg_gen_andi_i32(var, var, 0xff00ff00);
280 tcg_gen_or_i32(var, var, tmp);
7d1b0095 281 tcg_temp_free_i32(tmp);
3670669c
PB
282}
283
284/* Byteswap low halfword and sign extend. */
39d5492a 285static void gen_revsh(TCGv_i32 var)
3670669c 286{
1a855029
AJ
287 tcg_gen_ext16u_i32(var, var);
288 tcg_gen_bswap16_i32(var, var);
289 tcg_gen_ext16s_i32(var, var);
3670669c
PB
290}
291
292/* Unsigned bitfield extract. */
39d5492a 293static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
3670669c
PB
294{
295 if (shift)
296 tcg_gen_shri_i32(var, var, shift);
297 tcg_gen_andi_i32(var, var, mask);
298}
299
300/* Signed bitfield extract. */
39d5492a 301static void gen_sbfx(TCGv_i32 var, int shift, int width)
3670669c
PB
302{
303 uint32_t signbit;
304
305 if (shift)
306 tcg_gen_sari_i32(var, var, shift);
307 if (shift + width < 32) {
308 signbit = 1u << (width - 1);
309 tcg_gen_andi_i32(var, var, (1u << width) - 1);
310 tcg_gen_xori_i32(var, var, signbit);
311 tcg_gen_subi_i32(var, var, signbit);
312 }
313}
314
838fa72d 315/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 316static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 317{
838fa72d
AJ
318 TCGv_i64 tmp64 = tcg_temp_new_i64();
319
320 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 321 tcg_temp_free_i32(b);
838fa72d
AJ
322 tcg_gen_shli_i64(tmp64, tmp64, 32);
323 tcg_gen_add_i64(a, tmp64, a);
324
325 tcg_temp_free_i64(tmp64);
326 return a;
327}
328
329/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 330static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
331{
332 TCGv_i64 tmp64 = tcg_temp_new_i64();
333
334 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 335 tcg_temp_free_i32(b);
838fa72d
AJ
336 tcg_gen_shli_i64(tmp64, tmp64, 32);
337 tcg_gen_sub_i64(a, tmp64, a);
338
339 tcg_temp_free_i64(tmp64);
340 return a;
3670669c
PB
341}
342
5e3f878a 343/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 344static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 345{
39d5492a
PM
346 TCGv_i32 lo = tcg_temp_new_i32();
347 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 348 TCGv_i64 ret;
5e3f878a 349
831d7fe8 350 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 351 tcg_temp_free_i32(a);
7d1b0095 352 tcg_temp_free_i32(b);
831d7fe8
RH
353
354 ret = tcg_temp_new_i64();
355 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
356 tcg_temp_free_i32(lo);
357 tcg_temp_free_i32(hi);
831d7fe8
RH
358
359 return ret;
5e3f878a
PB
360}
361
39d5492a 362static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 363{
39d5492a
PM
364 TCGv_i32 lo = tcg_temp_new_i32();
365 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 366 TCGv_i64 ret;
5e3f878a 367
831d7fe8 368 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 369 tcg_temp_free_i32(a);
7d1b0095 370 tcg_temp_free_i32(b);
831d7fe8
RH
371
372 ret = tcg_temp_new_i64();
373 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
374 tcg_temp_free_i32(lo);
375 tcg_temp_free_i32(hi);
831d7fe8
RH
376
377 return ret;
5e3f878a
PB
378}
379
8f01245e 380/* Swap low and high halfwords. */
39d5492a 381static void gen_swap_half(TCGv_i32 var)
8f01245e 382{
39d5492a 383 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
384 tcg_gen_shri_i32(tmp, var, 16);
385 tcg_gen_shli_i32(var, var, 16);
386 tcg_gen_or_i32(var, var, tmp);
7d1b0095 387 tcg_temp_free_i32(tmp);
8f01245e
PB
388}
389
b26eefb6
PB
390/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
391 tmp = (t0 ^ t1) & 0x8000;
392 t0 &= ~0x8000;
393 t1 &= ~0x8000;
394 t0 = (t0 + t1) ^ tmp;
395 */
396
39d5492a 397static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 398{
39d5492a 399 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
400 tcg_gen_xor_i32(tmp, t0, t1);
401 tcg_gen_andi_i32(tmp, tmp, 0x8000);
402 tcg_gen_andi_i32(t0, t0, ~0x8000);
403 tcg_gen_andi_i32(t1, t1, ~0x8000);
404 tcg_gen_add_i32(t0, t0, t1);
405 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
406 tcg_temp_free_i32(tmp);
407 tcg_temp_free_i32(t1);
b26eefb6
PB
408}
409
410/* Set CF to the top bit of var. */
39d5492a 411static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 412{
66c374de 413 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
414}
415
416/* Set N and Z flags from var. */
39d5492a 417static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 418{
66c374de
AJ
419 tcg_gen_mov_i32(cpu_NF, var);
420 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
421}
422
423/* T0 += T1 + CF. */
39d5492a 424static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 425{
396e467c 426 tcg_gen_add_i32(t0, t0, t1);
66c374de 427 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
428}
429
e9bb4aa9 430/* dest = T0 + T1 + CF. */
39d5492a 431static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 432{
e9bb4aa9 433 tcg_gen_add_i32(dest, t0, t1);
66c374de 434 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
435}
436
3670669c 437/* dest = T0 - T1 + CF - 1. */
39d5492a 438static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 439{
3670669c 440 tcg_gen_sub_i32(dest, t0, t1);
66c374de 441 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 442 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
443}
444
72485ec4 445/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 446static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 447{
39d5492a 448 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
449 tcg_gen_movi_i32(tmp, 0);
450 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 451 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 452 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
453 tcg_gen_xor_i32(tmp, t0, t1);
454 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
455 tcg_temp_free_i32(tmp);
456 tcg_gen_mov_i32(dest, cpu_NF);
457}
458
49b4c31e 459/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 460static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 461{
39d5492a 462 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
463 if (TCG_TARGET_HAS_add2_i32) {
464 tcg_gen_movi_i32(tmp, 0);
465 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 466 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
467 } else {
468 TCGv_i64 q0 = tcg_temp_new_i64();
469 TCGv_i64 q1 = tcg_temp_new_i64();
470 tcg_gen_extu_i32_i64(q0, t0);
471 tcg_gen_extu_i32_i64(q1, t1);
472 tcg_gen_add_i64(q0, q0, q1);
473 tcg_gen_extu_i32_i64(q1, cpu_CF);
474 tcg_gen_add_i64(q0, q0, q1);
475 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
476 tcg_temp_free_i64(q0);
477 tcg_temp_free_i64(q1);
478 }
479 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
480 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
481 tcg_gen_xor_i32(tmp, t0, t1);
482 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
483 tcg_temp_free_i32(tmp);
484 tcg_gen_mov_i32(dest, cpu_NF);
485}
486
72485ec4 487/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 488static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 489{
39d5492a 490 TCGv_i32 tmp;
72485ec4
AJ
491 tcg_gen_sub_i32(cpu_NF, t0, t1);
492 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
493 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
494 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
495 tmp = tcg_temp_new_i32();
496 tcg_gen_xor_i32(tmp, t0, t1);
497 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
498 tcg_temp_free_i32(tmp);
499 tcg_gen_mov_i32(dest, cpu_NF);
500}
501
e77f0832 502/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 503static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 504{
39d5492a 505 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
506 tcg_gen_not_i32(tmp, t1);
507 gen_adc_CC(dest, t0, tmp);
39d5492a 508 tcg_temp_free_i32(tmp);
2de68a49
RH
509}
510
365af80e 511#define GEN_SHIFT(name) \
39d5492a 512static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 513{ \
39d5492a 514 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
515 tmp1 = tcg_temp_new_i32(); \
516 tcg_gen_andi_i32(tmp1, t1, 0xff); \
517 tmp2 = tcg_const_i32(0); \
518 tmp3 = tcg_const_i32(0x1f); \
519 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
520 tcg_temp_free_i32(tmp3); \
521 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
522 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
523 tcg_temp_free_i32(tmp2); \
524 tcg_temp_free_i32(tmp1); \
525}
526GEN_SHIFT(shl)
527GEN_SHIFT(shr)
528#undef GEN_SHIFT
529
39d5492a 530static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 531{
39d5492a 532 TCGv_i32 tmp1, tmp2;
365af80e
AJ
533 tmp1 = tcg_temp_new_i32();
534 tcg_gen_andi_i32(tmp1, t1, 0xff);
535 tmp2 = tcg_const_i32(0x1f);
536 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
537 tcg_temp_free_i32(tmp2);
538 tcg_gen_sar_i32(dest, t0, tmp1);
539 tcg_temp_free_i32(tmp1);
540}
541
39d5492a 542static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 543{
39d5492a
PM
544 TCGv_i32 c0 = tcg_const_i32(0);
545 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
546 tcg_gen_neg_i32(tmp, src);
547 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
548 tcg_temp_free_i32(c0);
549 tcg_temp_free_i32(tmp);
550}
ad69471c 551
39d5492a 552static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 553{
9a119ff6 554 if (shift == 0) {
66c374de 555 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 556 } else {
66c374de
AJ
557 tcg_gen_shri_i32(cpu_CF, var, shift);
558 if (shift != 31) {
559 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
560 }
9a119ff6 561 }
9a119ff6 562}
b26eefb6 563
9a119ff6 564/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
565static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
566 int shift, int flags)
9a119ff6
PB
567{
568 switch (shiftop) {
569 case 0: /* LSL */
570 if (shift != 0) {
571 if (flags)
572 shifter_out_im(var, 32 - shift);
573 tcg_gen_shli_i32(var, var, shift);
574 }
575 break;
576 case 1: /* LSR */
577 if (shift == 0) {
578 if (flags) {
66c374de 579 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
580 }
581 tcg_gen_movi_i32(var, 0);
582 } else {
583 if (flags)
584 shifter_out_im(var, shift - 1);
585 tcg_gen_shri_i32(var, var, shift);
586 }
587 break;
588 case 2: /* ASR */
589 if (shift == 0)
590 shift = 32;
591 if (flags)
592 shifter_out_im(var, shift - 1);
593 if (shift == 32)
594 shift = 31;
595 tcg_gen_sari_i32(var, var, shift);
596 break;
597 case 3: /* ROR/RRX */
598 if (shift != 0) {
599 if (flags)
600 shifter_out_im(var, shift - 1);
f669df27 601 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 602 } else {
39d5492a 603 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 604 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
605 if (flags)
606 shifter_out_im(var, 0);
607 tcg_gen_shri_i32(var, var, 1);
b26eefb6 608 tcg_gen_or_i32(var, var, tmp);
7d1b0095 609 tcg_temp_free_i32(tmp);
b26eefb6
PB
610 }
611 }
612};
613
39d5492a
PM
614static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
615 TCGv_i32 shift, int flags)
8984bd2e
PB
616{
617 if (flags) {
618 switch (shiftop) {
9ef39277
BS
619 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
620 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
621 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
622 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
623 }
624 } else {
625 switch (shiftop) {
365af80e
AJ
626 case 0:
627 gen_shl(var, var, shift);
628 break;
629 case 1:
630 gen_shr(var, var, shift);
631 break;
632 case 2:
633 gen_sar(var, var, shift);
634 break;
f669df27
AJ
635 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
636 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
637 }
638 }
7d1b0095 639 tcg_temp_free_i32(shift);
8984bd2e
PB
640}
641
6ddbc6e4
PB
642#define PAS_OP(pfx) \
643 switch (op2) { \
644 case 0: gen_pas_helper(glue(pfx,add16)); break; \
645 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
646 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
647 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
648 case 4: gen_pas_helper(glue(pfx,add8)); break; \
649 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
650 }
39d5492a 651static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 652{
a7812ae4 653 TCGv_ptr tmp;
6ddbc6e4
PB
654
655 switch (op1) {
656#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
657 case 1:
a7812ae4 658 tmp = tcg_temp_new_ptr();
0ecb72a5 659 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 660 PAS_OP(s)
b75263d6 661 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
662 break;
663 case 5:
a7812ae4 664 tmp = tcg_temp_new_ptr();
0ecb72a5 665 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 666 PAS_OP(u)
b75263d6 667 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
668 break;
669#undef gen_pas_helper
670#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
671 case 2:
672 PAS_OP(q);
673 break;
674 case 3:
675 PAS_OP(sh);
676 break;
677 case 6:
678 PAS_OP(uq);
679 break;
680 case 7:
681 PAS_OP(uh);
682 break;
683#undef gen_pas_helper
684 }
685}
9ee6e8bb
PB
686#undef PAS_OP
687
6ddbc6e4
PB
688/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
689#define PAS_OP(pfx) \
ed89a2f1 690 switch (op1) { \
6ddbc6e4
PB
691 case 0: gen_pas_helper(glue(pfx,add8)); break; \
692 case 1: gen_pas_helper(glue(pfx,add16)); break; \
693 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
694 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
695 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
696 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
697 }
39d5492a 698static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 699{
a7812ae4 700 TCGv_ptr tmp;
6ddbc6e4 701
ed89a2f1 702 switch (op2) {
6ddbc6e4
PB
703#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
704 case 0:
a7812ae4 705 tmp = tcg_temp_new_ptr();
0ecb72a5 706 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 707 PAS_OP(s)
b75263d6 708 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
709 break;
710 case 4:
a7812ae4 711 tmp = tcg_temp_new_ptr();
0ecb72a5 712 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 713 PAS_OP(u)
b75263d6 714 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
715 break;
716#undef gen_pas_helper
717#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
718 case 1:
719 PAS_OP(q);
720 break;
721 case 2:
722 PAS_OP(sh);
723 break;
724 case 5:
725 PAS_OP(uq);
726 break;
727 case 6:
728 PAS_OP(uh);
729 break;
730#undef gen_pas_helper
731 }
732}
9ee6e8bb
PB
733#undef PAS_OP
734
39fb730a 735/*
6c2c63d3 736 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
737 * This is common between ARM and Aarch64 targets.
738 */
6c2c63d3 739void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 740{
6c2c63d3
RH
741 TCGv_i32 value;
742 TCGCond cond;
743 bool global = true;
d9ba4830 744
d9ba4830
PB
745 switch (cc) {
746 case 0: /* eq: Z */
d9ba4830 747 case 1: /* ne: !Z */
6c2c63d3
RH
748 cond = TCG_COND_EQ;
749 value = cpu_ZF;
d9ba4830 750 break;
6c2c63d3 751
d9ba4830 752 case 2: /* cs: C */
d9ba4830 753 case 3: /* cc: !C */
6c2c63d3
RH
754 cond = TCG_COND_NE;
755 value = cpu_CF;
d9ba4830 756 break;
6c2c63d3 757
d9ba4830 758 case 4: /* mi: N */
d9ba4830 759 case 5: /* pl: !N */
6c2c63d3
RH
760 cond = TCG_COND_LT;
761 value = cpu_NF;
d9ba4830 762 break;
6c2c63d3 763
d9ba4830 764 case 6: /* vs: V */
d9ba4830 765 case 7: /* vc: !V */
6c2c63d3
RH
766 cond = TCG_COND_LT;
767 value = cpu_VF;
d9ba4830 768 break;
6c2c63d3 769
d9ba4830 770 case 8: /* hi: C && !Z */
6c2c63d3
RH
771 case 9: /* ls: !C || Z -> !(C && !Z) */
772 cond = TCG_COND_NE;
773 value = tcg_temp_new_i32();
774 global = false;
775 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
776 ZF is non-zero for !Z; so AND the two subexpressions. */
777 tcg_gen_neg_i32(value, cpu_CF);
778 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 779 break;
6c2c63d3 780
d9ba4830 781 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 782 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
783 /* Since we're only interested in the sign bit, == 0 is >= 0. */
784 cond = TCG_COND_GE;
785 value = tcg_temp_new_i32();
786 global = false;
787 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 788 break;
6c2c63d3 789
d9ba4830 790 case 12: /* gt: !Z && N == V */
d9ba4830 791 case 13: /* le: Z || N != V */
6c2c63d3
RH
792 cond = TCG_COND_NE;
793 value = tcg_temp_new_i32();
794 global = false;
795 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
796 * the sign bit then AND with ZF to yield the result. */
797 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
798 tcg_gen_sari_i32(value, value, 31);
799 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 800 break;
6c2c63d3 801
9305eac0
RH
802 case 14: /* always */
803 case 15: /* always */
804 /* Use the ALWAYS condition, which will fold early.
805 * It doesn't matter what we use for the value. */
806 cond = TCG_COND_ALWAYS;
807 value = cpu_ZF;
808 goto no_invert;
809
d9ba4830
PB
810 default:
811 fprintf(stderr, "Bad condition code 0x%x\n", cc);
812 abort();
813 }
6c2c63d3
RH
814
815 if (cc & 1) {
816 cond = tcg_invert_cond(cond);
817 }
818
9305eac0 819 no_invert:
6c2c63d3
RH
820 cmp->cond = cond;
821 cmp->value = value;
822 cmp->value_global = global;
823}
824
825void arm_free_cc(DisasCompare *cmp)
826{
827 if (!cmp->value_global) {
828 tcg_temp_free_i32(cmp->value);
829 }
830}
831
832void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
833{
834 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
835}
836
837void arm_gen_test_cc(int cc, TCGLabel *label)
838{
839 DisasCompare cmp;
840 arm_test_cc(&cmp, cc);
841 arm_jump_cc(&cmp, label);
842 arm_free_cc(&cmp);
d9ba4830 843}
2c0262af 844
b1d8e52e 845static const uint8_t table_logic_cc[16] = {
2c0262af
FB
846 1, /* and */
847 1, /* xor */
848 0, /* sub */
849 0, /* rsb */
850 0, /* add */
851 0, /* adc */
852 0, /* sbc */
853 0, /* rsc */
854 1, /* andl */
855 1, /* xorl */
856 0, /* cmp */
857 0, /* cmn */
858 1, /* orr */
859 1, /* mov */
860 1, /* bic */
861 1, /* mvn */
862};
3b46e624 863
d9ba4830
PB
864/* Set PC and Thumb state from an immediate address. */
865static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 866{
39d5492a 867 TCGv_i32 tmp;
99c475ab 868
577bf808 869 s->is_jmp = DISAS_JUMP;
d9ba4830 870 if (s->thumb != (addr & 1)) {
7d1b0095 871 tmp = tcg_temp_new_i32();
d9ba4830 872 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 873 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 874 tcg_temp_free_i32(tmp);
d9ba4830 875 }
155c3eac 876 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
877}
878
879/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 880static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 881{
577bf808 882 s->is_jmp = DISAS_JUMP;
155c3eac
FN
883 tcg_gen_andi_i32(cpu_R[15], var, ~1);
884 tcg_gen_andi_i32(var, var, 1);
885 store_cpu_field(var, thumb);
d9ba4830
PB
886}
887
21aeb343
JR
888/* Variant of store_reg which uses branch&exchange logic when storing
889 to r15 in ARM architecture v7 and above. The source must be a temporary
890 and will be marked as dead. */
7dcc1f89 891static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
892{
893 if (reg == 15 && ENABLE_ARCH_7) {
894 gen_bx(s, var);
895 } else {
896 store_reg(s, reg, var);
897 }
898}
899
be5e7a76
DES
900/* Variant of store_reg which uses branch&exchange logic when storing
901 * to r15 in ARM architecture v5T and above. This is used for storing
902 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
903 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 904static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
905{
906 if (reg == 15 && ENABLE_ARCH_5) {
907 gen_bx(s, var);
908 } else {
909 store_reg(s, reg, var);
910 }
911}
912
08307563
PM
913/* Abstractions of "generate code to do a guest load/store for
914 * AArch32", where a vaddr is always 32 bits (and is zero
915 * extended if we're a 64 bit core) and data is also
916 * 32 bits unless specifically doing a 64 bit access.
917 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 918 * that the address argument is TCGv_i32 rather than TCGv.
08307563
PM
919 */
920#if TARGET_LONG_BITS == 32
921
09f78135
RH
922#define DO_GEN_LD(SUFF, OPC) \
923static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563 924{ \
30901475 925 tcg_gen_qemu_ld_i32(val, addr, index, (OPC)); \
08307563
PM
926}
927
09f78135
RH
928#define DO_GEN_ST(SUFF, OPC) \
929static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563 930{ \
30901475 931 tcg_gen_qemu_st_i32(val, addr, index, (OPC)); \
08307563
PM
932}
933
934static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
935{
09f78135 936 tcg_gen_qemu_ld_i64(val, addr, index, MO_TEQ);
08307563
PM
937}
938
939static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
940{
09f78135 941 tcg_gen_qemu_st_i64(val, addr, index, MO_TEQ);
08307563
PM
942}
943
944#else
945
09f78135
RH
946#define DO_GEN_LD(SUFF, OPC) \
947static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563
PM
948{ \
949 TCGv addr64 = tcg_temp_new(); \
08307563 950 tcg_gen_extu_i32_i64(addr64, addr); \
09f78135 951 tcg_gen_qemu_ld_i32(val, addr64, index, OPC); \
08307563 952 tcg_temp_free(addr64); \
08307563
PM
953}
954
09f78135
RH
955#define DO_GEN_ST(SUFF, OPC) \
956static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563
PM
957{ \
958 TCGv addr64 = tcg_temp_new(); \
08307563 959 tcg_gen_extu_i32_i64(addr64, addr); \
09f78135 960 tcg_gen_qemu_st_i32(val, addr64, index, OPC); \
08307563 961 tcg_temp_free(addr64); \
08307563
PM
962}
963
964static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
965{
966 TCGv addr64 = tcg_temp_new();
967 tcg_gen_extu_i32_i64(addr64, addr);
09f78135 968 tcg_gen_qemu_ld_i64(val, addr64, index, MO_TEQ);
08307563
PM
969 tcg_temp_free(addr64);
970}
971
972static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
973{
974 TCGv addr64 = tcg_temp_new();
975 tcg_gen_extu_i32_i64(addr64, addr);
09f78135 976 tcg_gen_qemu_st_i64(val, addr64, index, MO_TEQ);
08307563
PM
977 tcg_temp_free(addr64);
978}
979
980#endif
981
09f78135
RH
982DO_GEN_LD(8s, MO_SB)
983DO_GEN_LD(8u, MO_UB)
984DO_GEN_LD(16s, MO_TESW)
985DO_GEN_LD(16u, MO_TEUW)
986DO_GEN_LD(32u, MO_TEUL)
30901475
AB
987/* 'a' variants include an alignment check */
988DO_GEN_LD(16ua, MO_TEUW | MO_ALIGN)
989DO_GEN_LD(32ua, MO_TEUL | MO_ALIGN)
09f78135
RH
990DO_GEN_ST(8, MO_UB)
991DO_GEN_ST(16, MO_TEUW)
992DO_GEN_ST(32, MO_TEUL)
08307563 993
eaed129d 994static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
5e3f878a 995{
40f860cd 996 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
997}
998
37e6456e
PM
999static inline void gen_hvc(DisasContext *s, int imm16)
1000{
1001 /* The pre HVC helper handles cases when HVC gets trapped
1002 * as an undefined insn by runtime configuration (ie before
1003 * the insn really executes).
1004 */
1005 gen_set_pc_im(s, s->pc - 4);
1006 gen_helper_pre_hvc(cpu_env);
1007 /* Otherwise we will treat this as a real exception which
1008 * happens after execution of the insn. (The distinction matters
1009 * for the PC value reported to the exception handler and also
1010 * for single stepping.)
1011 */
1012 s->svc_imm = imm16;
1013 gen_set_pc_im(s, s->pc);
1014 s->is_jmp = DISAS_HVC;
1015}
1016
1017static inline void gen_smc(DisasContext *s)
1018{
1019 /* As with HVC, we may take an exception either before or after
1020 * the insn executes.
1021 */
1022 TCGv_i32 tmp;
1023
1024 gen_set_pc_im(s, s->pc - 4);
1025 tmp = tcg_const_i32(syn_aa32_smc());
1026 gen_helper_pre_smc(cpu_env, tmp);
1027 tcg_temp_free_i32(tmp);
1028 gen_set_pc_im(s, s->pc);
1029 s->is_jmp = DISAS_SMC;
1030}
1031
d4a2dc67
PM
1032static inline void
1033gen_set_condexec (DisasContext *s)
1034{
1035 if (s->condexec_mask) {
1036 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
1037 TCGv_i32 tmp = tcg_temp_new_i32();
1038 tcg_gen_movi_i32(tmp, val);
1039 store_cpu_field(tmp, condexec_bits);
1040 }
1041}
1042
1043static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1044{
1045 gen_set_condexec(s);
1046 gen_set_pc_im(s, s->pc - offset);
1047 gen_exception_internal(excp);
1048 s->is_jmp = DISAS_JUMP;
1049}
1050
73710361
GB
1051static void gen_exception_insn(DisasContext *s, int offset, int excp,
1052 int syn, uint32_t target_el)
d4a2dc67
PM
1053{
1054 gen_set_condexec(s);
1055 gen_set_pc_im(s, s->pc - offset);
73710361 1056 gen_exception(excp, syn, target_el);
d4a2dc67
PM
1057 s->is_jmp = DISAS_JUMP;
1058}
1059
b5ff1b31
FB
1060/* Force a TB lookup after an instruction that changes the CPU state. */
1061static inline void gen_lookup_tb(DisasContext *s)
1062{
a6445c52 1063 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
577bf808 1064 s->is_jmp = DISAS_JUMP;
b5ff1b31
FB
1065}
1066
b0109805 1067static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1068 TCGv_i32 var)
2c0262af 1069{
1e8d4eec 1070 int val, rm, shift, shiftop;
39d5492a 1071 TCGv_i32 offset;
2c0262af
FB
1072
1073 if (!(insn & (1 << 25))) {
1074 /* immediate */
1075 val = insn & 0xfff;
1076 if (!(insn & (1 << 23)))
1077 val = -val;
537730b9 1078 if (val != 0)
b0109805 1079 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1080 } else {
1081 /* shift/register */
1082 rm = (insn) & 0xf;
1083 shift = (insn >> 7) & 0x1f;
1e8d4eec 1084 shiftop = (insn >> 5) & 3;
b26eefb6 1085 offset = load_reg(s, rm);
9a119ff6 1086 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1087 if (!(insn & (1 << 23)))
b0109805 1088 tcg_gen_sub_i32(var, var, offset);
2c0262af 1089 else
b0109805 1090 tcg_gen_add_i32(var, var, offset);
7d1b0095 1091 tcg_temp_free_i32(offset);
2c0262af
FB
1092 }
1093}
1094
191f9a93 1095static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1096 int extra, TCGv_i32 var)
2c0262af
FB
1097{
1098 int val, rm;
39d5492a 1099 TCGv_i32 offset;
3b46e624 1100
2c0262af
FB
1101 if (insn & (1 << 22)) {
1102 /* immediate */
1103 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1104 if (!(insn & (1 << 23)))
1105 val = -val;
18acad92 1106 val += extra;
537730b9 1107 if (val != 0)
b0109805 1108 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1109 } else {
1110 /* register */
191f9a93 1111 if (extra)
b0109805 1112 tcg_gen_addi_i32(var, var, extra);
2c0262af 1113 rm = (insn) & 0xf;
b26eefb6 1114 offset = load_reg(s, rm);
2c0262af 1115 if (!(insn & (1 << 23)))
b0109805 1116 tcg_gen_sub_i32(var, var, offset);
2c0262af 1117 else
b0109805 1118 tcg_gen_add_i32(var, var, offset);
7d1b0095 1119 tcg_temp_free_i32(offset);
2c0262af
FB
1120 }
1121}
1122
5aaebd13
PM
1123static TCGv_ptr get_fpstatus_ptr(int neon)
1124{
1125 TCGv_ptr statusptr = tcg_temp_new_ptr();
1126 int offset;
1127 if (neon) {
0ecb72a5 1128 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1129 } else {
0ecb72a5 1130 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1131 }
1132 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1133 return statusptr;
1134}
1135
4373f3ce
PB
1136#define VFP_OP2(name) \
1137static inline void gen_vfp_##name(int dp) \
1138{ \
ae1857ec
PM
1139 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1140 if (dp) { \
1141 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1142 } else { \
1143 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1144 } \
1145 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1146}
1147
4373f3ce
PB
1148VFP_OP2(add)
1149VFP_OP2(sub)
1150VFP_OP2(mul)
1151VFP_OP2(div)
1152
1153#undef VFP_OP2
1154
605a6aed
PM
1155static inline void gen_vfp_F1_mul(int dp)
1156{
1157 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1158 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1159 if (dp) {
ae1857ec 1160 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1161 } else {
ae1857ec 1162 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1163 }
ae1857ec 1164 tcg_temp_free_ptr(fpst);
605a6aed
PM
1165}
1166
1167static inline void gen_vfp_F1_neg(int dp)
1168{
1169 /* Like gen_vfp_neg() but put result in F1 */
1170 if (dp) {
1171 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1172 } else {
1173 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1174 }
1175}
1176
4373f3ce
PB
1177static inline void gen_vfp_abs(int dp)
1178{
1179 if (dp)
1180 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1181 else
1182 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1183}
1184
1185static inline void gen_vfp_neg(int dp)
1186{
1187 if (dp)
1188 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1189 else
1190 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1191}
1192
1193static inline void gen_vfp_sqrt(int dp)
1194{
1195 if (dp)
1196 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1197 else
1198 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1199}
1200
1201static inline void gen_vfp_cmp(int dp)
1202{
1203 if (dp)
1204 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1205 else
1206 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1207}
1208
1209static inline void gen_vfp_cmpe(int dp)
1210{
1211 if (dp)
1212 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1213 else
1214 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1215}
1216
1217static inline void gen_vfp_F1_ld0(int dp)
1218{
1219 if (dp)
5b340b51 1220 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1221 else
5b340b51 1222 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1223}
1224
5500b06c
PM
1225#define VFP_GEN_ITOF(name) \
1226static inline void gen_vfp_##name(int dp, int neon) \
1227{ \
5aaebd13 1228 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1229 if (dp) { \
1230 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1231 } else { \
1232 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1233 } \
b7fa9214 1234 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1235}
1236
5500b06c
PM
1237VFP_GEN_ITOF(uito)
1238VFP_GEN_ITOF(sito)
1239#undef VFP_GEN_ITOF
4373f3ce 1240
5500b06c
PM
1241#define VFP_GEN_FTOI(name) \
1242static inline void gen_vfp_##name(int dp, int neon) \
1243{ \
5aaebd13 1244 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1245 if (dp) { \
1246 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1247 } else { \
1248 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1249 } \
b7fa9214 1250 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1251}
1252
5500b06c
PM
1253VFP_GEN_FTOI(toui)
1254VFP_GEN_FTOI(touiz)
1255VFP_GEN_FTOI(tosi)
1256VFP_GEN_FTOI(tosiz)
1257#undef VFP_GEN_FTOI
4373f3ce 1258
16d5b3ca 1259#define VFP_GEN_FIX(name, round) \
5500b06c 1260static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1261{ \
39d5492a 1262 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1263 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1264 if (dp) { \
16d5b3ca
WN
1265 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1266 statusptr); \
5500b06c 1267 } else { \
16d5b3ca
WN
1268 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1269 statusptr); \
5500b06c 1270 } \
b75263d6 1271 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1272 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1273}
16d5b3ca
WN
1274VFP_GEN_FIX(tosh, _round_to_zero)
1275VFP_GEN_FIX(tosl, _round_to_zero)
1276VFP_GEN_FIX(touh, _round_to_zero)
1277VFP_GEN_FIX(toul, _round_to_zero)
1278VFP_GEN_FIX(shto, )
1279VFP_GEN_FIX(slto, )
1280VFP_GEN_FIX(uhto, )
1281VFP_GEN_FIX(ulto, )
4373f3ce 1282#undef VFP_GEN_FIX
9ee6e8bb 1283
39d5492a 1284static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1285{
08307563 1286 if (dp) {
6ce2faf4 1287 gen_aa32_ld64(cpu_F0d, addr, get_mem_index(s));
08307563 1288 } else {
6ce2faf4 1289 gen_aa32_ld32u(cpu_F0s, addr, get_mem_index(s));
08307563 1290 }
b5ff1b31
FB
1291}
1292
39d5492a 1293static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1294{
08307563 1295 if (dp) {
6ce2faf4 1296 gen_aa32_st64(cpu_F0d, addr, get_mem_index(s));
08307563 1297 } else {
6ce2faf4 1298 gen_aa32_st32(cpu_F0s, addr, get_mem_index(s));
08307563 1299 }
b5ff1b31
FB
1300}
1301
8e96005d
FB
1302static inline long
1303vfp_reg_offset (int dp, int reg)
1304{
1305 if (dp)
1306 return offsetof(CPUARMState, vfp.regs[reg]);
1307 else if (reg & 1) {
1308 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1309 + offsetof(CPU_DoubleU, l.upper);
1310 } else {
1311 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1312 + offsetof(CPU_DoubleU, l.lower);
1313 }
1314}
9ee6e8bb
PB
1315
1316/* Return the offset of a 32-bit piece of a NEON register.
1317 zero is the least significant end of the register. */
1318static inline long
1319neon_reg_offset (int reg, int n)
1320{
1321 int sreg;
1322 sreg = reg * 2 + n;
1323 return vfp_reg_offset(0, sreg);
1324}
1325
39d5492a 1326static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1327{
39d5492a 1328 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1329 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1330 return tmp;
1331}
1332
39d5492a 1333static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1334{
1335 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1336 tcg_temp_free_i32(var);
8f8e3aa4
PB
1337}
1338
a7812ae4 1339static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1340{
1341 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1342}
1343
a7812ae4 1344static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1345{
1346 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1347}
1348
4373f3ce
PB
1349#define tcg_gen_ld_f32 tcg_gen_ld_i32
1350#define tcg_gen_ld_f64 tcg_gen_ld_i64
1351#define tcg_gen_st_f32 tcg_gen_st_i32
1352#define tcg_gen_st_f64 tcg_gen_st_i64
1353
b7bcbe95
FB
1354static inline void gen_mov_F0_vreg(int dp, int reg)
1355{
1356 if (dp)
4373f3ce 1357 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1358 else
4373f3ce 1359 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1360}
1361
1362static inline void gen_mov_F1_vreg(int dp, int reg)
1363{
1364 if (dp)
4373f3ce 1365 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1366 else
4373f3ce 1367 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1368}
1369
1370static inline void gen_mov_vreg_F0(int dp, int reg)
1371{
1372 if (dp)
4373f3ce 1373 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1374 else
4373f3ce 1375 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1376}
1377
18c9b560
AZ
1378#define ARM_CP_RW_BIT (1 << 20)
1379
a7812ae4 1380static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1381{
0ecb72a5 1382 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1383}
1384
a7812ae4 1385static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1386{
0ecb72a5 1387 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1388}
1389
39d5492a 1390static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1391{
39d5492a 1392 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1393 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1394 return var;
e677137d
PB
1395}
1396
39d5492a 1397static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1398{
0ecb72a5 1399 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1400 tcg_temp_free_i32(var);
e677137d
PB
1401}
1402
1403static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1404{
1405 iwmmxt_store_reg(cpu_M0, rn);
1406}
1407
1408static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1409{
1410 iwmmxt_load_reg(cpu_M0, rn);
1411}
1412
1413static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1414{
1415 iwmmxt_load_reg(cpu_V1, rn);
1416 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1417}
1418
1419static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1420{
1421 iwmmxt_load_reg(cpu_V1, rn);
1422 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1423}
1424
1425static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1426{
1427 iwmmxt_load_reg(cpu_V1, rn);
1428 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1429}
1430
1431#define IWMMXT_OP(name) \
1432static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1433{ \
1434 iwmmxt_load_reg(cpu_V1, rn); \
1435 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1436}
1437
477955bd
PM
1438#define IWMMXT_OP_ENV(name) \
1439static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1440{ \
1441 iwmmxt_load_reg(cpu_V1, rn); \
1442 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1443}
1444
1445#define IWMMXT_OP_ENV_SIZE(name) \
1446IWMMXT_OP_ENV(name##b) \
1447IWMMXT_OP_ENV(name##w) \
1448IWMMXT_OP_ENV(name##l)
e677137d 1449
477955bd 1450#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1451static inline void gen_op_iwmmxt_##name##_M0(void) \
1452{ \
477955bd 1453 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1454}
1455
1456IWMMXT_OP(maddsq)
1457IWMMXT_OP(madduq)
1458IWMMXT_OP(sadb)
1459IWMMXT_OP(sadw)
1460IWMMXT_OP(mulslw)
1461IWMMXT_OP(mulshw)
1462IWMMXT_OP(mululw)
1463IWMMXT_OP(muluhw)
1464IWMMXT_OP(macsw)
1465IWMMXT_OP(macuw)
1466
477955bd
PM
1467IWMMXT_OP_ENV_SIZE(unpackl)
1468IWMMXT_OP_ENV_SIZE(unpackh)
1469
1470IWMMXT_OP_ENV1(unpacklub)
1471IWMMXT_OP_ENV1(unpackluw)
1472IWMMXT_OP_ENV1(unpacklul)
1473IWMMXT_OP_ENV1(unpackhub)
1474IWMMXT_OP_ENV1(unpackhuw)
1475IWMMXT_OP_ENV1(unpackhul)
1476IWMMXT_OP_ENV1(unpacklsb)
1477IWMMXT_OP_ENV1(unpacklsw)
1478IWMMXT_OP_ENV1(unpacklsl)
1479IWMMXT_OP_ENV1(unpackhsb)
1480IWMMXT_OP_ENV1(unpackhsw)
1481IWMMXT_OP_ENV1(unpackhsl)
1482
1483IWMMXT_OP_ENV_SIZE(cmpeq)
1484IWMMXT_OP_ENV_SIZE(cmpgtu)
1485IWMMXT_OP_ENV_SIZE(cmpgts)
1486
1487IWMMXT_OP_ENV_SIZE(mins)
1488IWMMXT_OP_ENV_SIZE(minu)
1489IWMMXT_OP_ENV_SIZE(maxs)
1490IWMMXT_OP_ENV_SIZE(maxu)
1491
1492IWMMXT_OP_ENV_SIZE(subn)
1493IWMMXT_OP_ENV_SIZE(addn)
1494IWMMXT_OP_ENV_SIZE(subu)
1495IWMMXT_OP_ENV_SIZE(addu)
1496IWMMXT_OP_ENV_SIZE(subs)
1497IWMMXT_OP_ENV_SIZE(adds)
1498
1499IWMMXT_OP_ENV(avgb0)
1500IWMMXT_OP_ENV(avgb1)
1501IWMMXT_OP_ENV(avgw0)
1502IWMMXT_OP_ENV(avgw1)
e677137d 1503
477955bd
PM
1504IWMMXT_OP_ENV(packuw)
1505IWMMXT_OP_ENV(packul)
1506IWMMXT_OP_ENV(packuq)
1507IWMMXT_OP_ENV(packsw)
1508IWMMXT_OP_ENV(packsl)
1509IWMMXT_OP_ENV(packsq)
e677137d 1510
e677137d
PB
1511static void gen_op_iwmmxt_set_mup(void)
1512{
39d5492a 1513 TCGv_i32 tmp;
e677137d
PB
1514 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1515 tcg_gen_ori_i32(tmp, tmp, 2);
1516 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1517}
1518
1519static void gen_op_iwmmxt_set_cup(void)
1520{
39d5492a 1521 TCGv_i32 tmp;
e677137d
PB
1522 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1523 tcg_gen_ori_i32(tmp, tmp, 1);
1524 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1525}
1526
1527static void gen_op_iwmmxt_setpsr_nz(void)
1528{
39d5492a 1529 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1530 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1531 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1532}
1533
1534static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1535{
1536 iwmmxt_load_reg(cpu_V1, rn);
86831435 1537 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1538 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1539}
1540
39d5492a
PM
1541static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1542 TCGv_i32 dest)
18c9b560
AZ
1543{
1544 int rd;
1545 uint32_t offset;
39d5492a 1546 TCGv_i32 tmp;
18c9b560
AZ
1547
1548 rd = (insn >> 16) & 0xf;
da6b5335 1549 tmp = load_reg(s, rd);
18c9b560
AZ
1550
1551 offset = (insn & 0xff) << ((insn >> 7) & 2);
1552 if (insn & (1 << 24)) {
1553 /* Pre indexed */
1554 if (insn & (1 << 23))
da6b5335 1555 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1556 else
da6b5335
FN
1557 tcg_gen_addi_i32(tmp, tmp, -offset);
1558 tcg_gen_mov_i32(dest, tmp);
18c9b560 1559 if (insn & (1 << 21))
da6b5335
FN
1560 store_reg(s, rd, tmp);
1561 else
7d1b0095 1562 tcg_temp_free_i32(tmp);
18c9b560
AZ
1563 } else if (insn & (1 << 21)) {
1564 /* Post indexed */
da6b5335 1565 tcg_gen_mov_i32(dest, tmp);
18c9b560 1566 if (insn & (1 << 23))
da6b5335 1567 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1568 else
da6b5335
FN
1569 tcg_gen_addi_i32(tmp, tmp, -offset);
1570 store_reg(s, rd, tmp);
18c9b560
AZ
1571 } else if (!(insn & (1 << 23)))
1572 return 1;
1573 return 0;
1574}
1575
39d5492a 1576static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1577{
1578 int rd = (insn >> 0) & 0xf;
39d5492a 1579 TCGv_i32 tmp;
18c9b560 1580
da6b5335
FN
1581 if (insn & (1 << 8)) {
1582 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1583 return 1;
da6b5335
FN
1584 } else {
1585 tmp = iwmmxt_load_creg(rd);
1586 }
1587 } else {
7d1b0095 1588 tmp = tcg_temp_new_i32();
da6b5335 1589 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1590 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1591 }
1592 tcg_gen_andi_i32(tmp, tmp, mask);
1593 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1594 tcg_temp_free_i32(tmp);
18c9b560
AZ
1595 return 0;
1596}
1597
a1c7273b 1598/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1599 (ie. an undefined instruction). */
7dcc1f89 1600static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1601{
1602 int rd, wrd;
1603 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1604 TCGv_i32 addr;
1605 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1606
1607 if ((insn & 0x0e000e00) == 0x0c000000) {
1608 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1609 wrd = insn & 0xf;
1610 rdlo = (insn >> 12) & 0xf;
1611 rdhi = (insn >> 16) & 0xf;
1612 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1613 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1614 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1615 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1616 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1617 } else { /* TMCRR */
da6b5335
FN
1618 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1619 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1620 gen_op_iwmmxt_set_mup();
1621 }
1622 return 0;
1623 }
1624
1625 wrd = (insn >> 12) & 0xf;
7d1b0095 1626 addr = tcg_temp_new_i32();
da6b5335 1627 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1628 tcg_temp_free_i32(addr);
18c9b560 1629 return 1;
da6b5335 1630 }
18c9b560
AZ
1631 if (insn & ARM_CP_RW_BIT) {
1632 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1633 tmp = tcg_temp_new_i32();
6ce2faf4 1634 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
da6b5335 1635 iwmmxt_store_creg(wrd, tmp);
18c9b560 1636 } else {
e677137d
PB
1637 i = 1;
1638 if (insn & (1 << 8)) {
1639 if (insn & (1 << 22)) { /* WLDRD */
6ce2faf4 1640 gen_aa32_ld64(cpu_M0, addr, get_mem_index(s));
e677137d
PB
1641 i = 0;
1642 } else { /* WLDRW wRd */
29531141 1643 tmp = tcg_temp_new_i32();
6ce2faf4 1644 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
e677137d
PB
1645 }
1646 } else {
29531141 1647 tmp = tcg_temp_new_i32();
e677137d 1648 if (insn & (1 << 22)) { /* WLDRH */
6ce2faf4 1649 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
e677137d 1650 } else { /* WLDRB */
6ce2faf4 1651 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
e677137d
PB
1652 }
1653 }
1654 if (i) {
1655 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1656 tcg_temp_free_i32(tmp);
e677137d 1657 }
18c9b560
AZ
1658 gen_op_iwmmxt_movq_wRn_M0(wrd);
1659 }
1660 } else {
1661 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1662 tmp = iwmmxt_load_creg(wrd);
6ce2faf4 1663 gen_aa32_st32(tmp, addr, get_mem_index(s));
18c9b560
AZ
1664 } else {
1665 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1666 tmp = tcg_temp_new_i32();
e677137d
PB
1667 if (insn & (1 << 8)) {
1668 if (insn & (1 << 22)) { /* WSTRD */
6ce2faf4 1669 gen_aa32_st64(cpu_M0, addr, get_mem_index(s));
e677137d 1670 } else { /* WSTRW wRd */
ecc7b3aa 1671 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
6ce2faf4 1672 gen_aa32_st32(tmp, addr, get_mem_index(s));
e677137d
PB
1673 }
1674 } else {
1675 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1676 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
6ce2faf4 1677 gen_aa32_st16(tmp, addr, get_mem_index(s));
e677137d 1678 } else { /* WSTRB */
ecc7b3aa 1679 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
6ce2faf4 1680 gen_aa32_st8(tmp, addr, get_mem_index(s));
e677137d
PB
1681 }
1682 }
18c9b560 1683 }
29531141 1684 tcg_temp_free_i32(tmp);
18c9b560 1685 }
7d1b0095 1686 tcg_temp_free_i32(addr);
18c9b560
AZ
1687 return 0;
1688 }
1689
1690 if ((insn & 0x0f000000) != 0x0e000000)
1691 return 1;
1692
1693 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1694 case 0x000: /* WOR */
1695 wrd = (insn >> 12) & 0xf;
1696 rd0 = (insn >> 0) & 0xf;
1697 rd1 = (insn >> 16) & 0xf;
1698 gen_op_iwmmxt_movq_M0_wRn(rd0);
1699 gen_op_iwmmxt_orq_M0_wRn(rd1);
1700 gen_op_iwmmxt_setpsr_nz();
1701 gen_op_iwmmxt_movq_wRn_M0(wrd);
1702 gen_op_iwmmxt_set_mup();
1703 gen_op_iwmmxt_set_cup();
1704 break;
1705 case 0x011: /* TMCR */
1706 if (insn & 0xf)
1707 return 1;
1708 rd = (insn >> 12) & 0xf;
1709 wrd = (insn >> 16) & 0xf;
1710 switch (wrd) {
1711 case ARM_IWMMXT_wCID:
1712 case ARM_IWMMXT_wCASF:
1713 break;
1714 case ARM_IWMMXT_wCon:
1715 gen_op_iwmmxt_set_cup();
1716 /* Fall through. */
1717 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1718 tmp = iwmmxt_load_creg(wrd);
1719 tmp2 = load_reg(s, rd);
f669df27 1720 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1721 tcg_temp_free_i32(tmp2);
da6b5335 1722 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1723 break;
1724 case ARM_IWMMXT_wCGR0:
1725 case ARM_IWMMXT_wCGR1:
1726 case ARM_IWMMXT_wCGR2:
1727 case ARM_IWMMXT_wCGR3:
1728 gen_op_iwmmxt_set_cup();
da6b5335
FN
1729 tmp = load_reg(s, rd);
1730 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1731 break;
1732 default:
1733 return 1;
1734 }
1735 break;
1736 case 0x100: /* WXOR */
1737 wrd = (insn >> 12) & 0xf;
1738 rd0 = (insn >> 0) & 0xf;
1739 rd1 = (insn >> 16) & 0xf;
1740 gen_op_iwmmxt_movq_M0_wRn(rd0);
1741 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1742 gen_op_iwmmxt_setpsr_nz();
1743 gen_op_iwmmxt_movq_wRn_M0(wrd);
1744 gen_op_iwmmxt_set_mup();
1745 gen_op_iwmmxt_set_cup();
1746 break;
1747 case 0x111: /* TMRC */
1748 if (insn & 0xf)
1749 return 1;
1750 rd = (insn >> 12) & 0xf;
1751 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1752 tmp = iwmmxt_load_creg(wrd);
1753 store_reg(s, rd, tmp);
18c9b560
AZ
1754 break;
1755 case 0x300: /* WANDN */
1756 wrd = (insn >> 12) & 0xf;
1757 rd0 = (insn >> 0) & 0xf;
1758 rd1 = (insn >> 16) & 0xf;
1759 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1760 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1761 gen_op_iwmmxt_andq_M0_wRn(rd1);
1762 gen_op_iwmmxt_setpsr_nz();
1763 gen_op_iwmmxt_movq_wRn_M0(wrd);
1764 gen_op_iwmmxt_set_mup();
1765 gen_op_iwmmxt_set_cup();
1766 break;
1767 case 0x200: /* WAND */
1768 wrd = (insn >> 12) & 0xf;
1769 rd0 = (insn >> 0) & 0xf;
1770 rd1 = (insn >> 16) & 0xf;
1771 gen_op_iwmmxt_movq_M0_wRn(rd0);
1772 gen_op_iwmmxt_andq_M0_wRn(rd1);
1773 gen_op_iwmmxt_setpsr_nz();
1774 gen_op_iwmmxt_movq_wRn_M0(wrd);
1775 gen_op_iwmmxt_set_mup();
1776 gen_op_iwmmxt_set_cup();
1777 break;
1778 case 0x810: case 0xa10: /* WMADD */
1779 wrd = (insn >> 12) & 0xf;
1780 rd0 = (insn >> 0) & 0xf;
1781 rd1 = (insn >> 16) & 0xf;
1782 gen_op_iwmmxt_movq_M0_wRn(rd0);
1783 if (insn & (1 << 21))
1784 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1785 else
1786 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1787 gen_op_iwmmxt_movq_wRn_M0(wrd);
1788 gen_op_iwmmxt_set_mup();
1789 break;
1790 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1791 wrd = (insn >> 12) & 0xf;
1792 rd0 = (insn >> 16) & 0xf;
1793 rd1 = (insn >> 0) & 0xf;
1794 gen_op_iwmmxt_movq_M0_wRn(rd0);
1795 switch ((insn >> 22) & 3) {
1796 case 0:
1797 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1798 break;
1799 case 1:
1800 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1801 break;
1802 case 2:
1803 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1804 break;
1805 case 3:
1806 return 1;
1807 }
1808 gen_op_iwmmxt_movq_wRn_M0(wrd);
1809 gen_op_iwmmxt_set_mup();
1810 gen_op_iwmmxt_set_cup();
1811 break;
1812 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1813 wrd = (insn >> 12) & 0xf;
1814 rd0 = (insn >> 16) & 0xf;
1815 rd1 = (insn >> 0) & 0xf;
1816 gen_op_iwmmxt_movq_M0_wRn(rd0);
1817 switch ((insn >> 22) & 3) {
1818 case 0:
1819 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1820 break;
1821 case 1:
1822 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1823 break;
1824 case 2:
1825 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1826 break;
1827 case 3:
1828 return 1;
1829 }
1830 gen_op_iwmmxt_movq_wRn_M0(wrd);
1831 gen_op_iwmmxt_set_mup();
1832 gen_op_iwmmxt_set_cup();
1833 break;
1834 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1835 wrd = (insn >> 12) & 0xf;
1836 rd0 = (insn >> 16) & 0xf;
1837 rd1 = (insn >> 0) & 0xf;
1838 gen_op_iwmmxt_movq_M0_wRn(rd0);
1839 if (insn & (1 << 22))
1840 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1841 else
1842 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1843 if (!(insn & (1 << 20)))
1844 gen_op_iwmmxt_addl_M0_wRn(wrd);
1845 gen_op_iwmmxt_movq_wRn_M0(wrd);
1846 gen_op_iwmmxt_set_mup();
1847 break;
1848 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1849 wrd = (insn >> 12) & 0xf;
1850 rd0 = (insn >> 16) & 0xf;
1851 rd1 = (insn >> 0) & 0xf;
1852 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1853 if (insn & (1 << 21)) {
1854 if (insn & (1 << 20))
1855 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1856 else
1857 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1858 } else {
1859 if (insn & (1 << 20))
1860 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1861 else
1862 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1863 }
18c9b560
AZ
1864 gen_op_iwmmxt_movq_wRn_M0(wrd);
1865 gen_op_iwmmxt_set_mup();
1866 break;
1867 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1868 wrd = (insn >> 12) & 0xf;
1869 rd0 = (insn >> 16) & 0xf;
1870 rd1 = (insn >> 0) & 0xf;
1871 gen_op_iwmmxt_movq_M0_wRn(rd0);
1872 if (insn & (1 << 21))
1873 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1874 else
1875 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1876 if (!(insn & (1 << 20))) {
e677137d
PB
1877 iwmmxt_load_reg(cpu_V1, wrd);
1878 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1879 }
1880 gen_op_iwmmxt_movq_wRn_M0(wrd);
1881 gen_op_iwmmxt_set_mup();
1882 break;
1883 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1884 wrd = (insn >> 12) & 0xf;
1885 rd0 = (insn >> 16) & 0xf;
1886 rd1 = (insn >> 0) & 0xf;
1887 gen_op_iwmmxt_movq_M0_wRn(rd0);
1888 switch ((insn >> 22) & 3) {
1889 case 0:
1890 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1891 break;
1892 case 1:
1893 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1894 break;
1895 case 2:
1896 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1897 break;
1898 case 3:
1899 return 1;
1900 }
1901 gen_op_iwmmxt_movq_wRn_M0(wrd);
1902 gen_op_iwmmxt_set_mup();
1903 gen_op_iwmmxt_set_cup();
1904 break;
1905 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1906 wrd = (insn >> 12) & 0xf;
1907 rd0 = (insn >> 16) & 0xf;
1908 rd1 = (insn >> 0) & 0xf;
1909 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1910 if (insn & (1 << 22)) {
1911 if (insn & (1 << 20))
1912 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1913 else
1914 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1915 } else {
1916 if (insn & (1 << 20))
1917 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1918 else
1919 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1920 }
18c9b560
AZ
1921 gen_op_iwmmxt_movq_wRn_M0(wrd);
1922 gen_op_iwmmxt_set_mup();
1923 gen_op_iwmmxt_set_cup();
1924 break;
1925 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1926 wrd = (insn >> 12) & 0xf;
1927 rd0 = (insn >> 16) & 0xf;
1928 rd1 = (insn >> 0) & 0xf;
1929 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1930 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1931 tcg_gen_andi_i32(tmp, tmp, 7);
1932 iwmmxt_load_reg(cpu_V1, rd1);
1933 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1934 tcg_temp_free_i32(tmp);
18c9b560
AZ
1935 gen_op_iwmmxt_movq_wRn_M0(wrd);
1936 gen_op_iwmmxt_set_mup();
1937 break;
1938 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1939 if (((insn >> 6) & 3) == 3)
1940 return 1;
18c9b560
AZ
1941 rd = (insn >> 12) & 0xf;
1942 wrd = (insn >> 16) & 0xf;
da6b5335 1943 tmp = load_reg(s, rd);
18c9b560
AZ
1944 gen_op_iwmmxt_movq_M0_wRn(wrd);
1945 switch ((insn >> 6) & 3) {
1946 case 0:
da6b5335
FN
1947 tmp2 = tcg_const_i32(0xff);
1948 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1949 break;
1950 case 1:
da6b5335
FN
1951 tmp2 = tcg_const_i32(0xffff);
1952 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1953 break;
1954 case 2:
da6b5335
FN
1955 tmp2 = tcg_const_i32(0xffffffff);
1956 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1957 break;
da6b5335 1958 default:
39d5492a
PM
1959 TCGV_UNUSED_I32(tmp2);
1960 TCGV_UNUSED_I32(tmp3);
18c9b560 1961 }
da6b5335 1962 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
1963 tcg_temp_free_i32(tmp3);
1964 tcg_temp_free_i32(tmp2);
7d1b0095 1965 tcg_temp_free_i32(tmp);
18c9b560
AZ
1966 gen_op_iwmmxt_movq_wRn_M0(wrd);
1967 gen_op_iwmmxt_set_mup();
1968 break;
1969 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1970 rd = (insn >> 12) & 0xf;
1971 wrd = (insn >> 16) & 0xf;
da6b5335 1972 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1973 return 1;
1974 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1975 tmp = tcg_temp_new_i32();
18c9b560
AZ
1976 switch ((insn >> 22) & 3) {
1977 case 0:
da6b5335 1978 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 1979 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
1980 if (insn & 8) {
1981 tcg_gen_ext8s_i32(tmp, tmp);
1982 } else {
1983 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1984 }
1985 break;
1986 case 1:
da6b5335 1987 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 1988 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
1989 if (insn & 8) {
1990 tcg_gen_ext16s_i32(tmp, tmp);
1991 } else {
1992 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1993 }
1994 break;
1995 case 2:
da6b5335 1996 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 1997 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 1998 break;
18c9b560 1999 }
da6b5335 2000 store_reg(s, rd, tmp);
18c9b560
AZ
2001 break;
2002 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2003 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2004 return 1;
da6b5335 2005 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2006 switch ((insn >> 22) & 3) {
2007 case 0:
da6b5335 2008 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2009 break;
2010 case 1:
da6b5335 2011 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2012 break;
2013 case 2:
da6b5335 2014 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2015 break;
18c9b560 2016 }
da6b5335
FN
2017 tcg_gen_shli_i32(tmp, tmp, 28);
2018 gen_set_nzcv(tmp);
7d1b0095 2019 tcg_temp_free_i32(tmp);
18c9b560
AZ
2020 break;
2021 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2022 if (((insn >> 6) & 3) == 3)
2023 return 1;
18c9b560
AZ
2024 rd = (insn >> 12) & 0xf;
2025 wrd = (insn >> 16) & 0xf;
da6b5335 2026 tmp = load_reg(s, rd);
18c9b560
AZ
2027 switch ((insn >> 6) & 3) {
2028 case 0:
da6b5335 2029 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2030 break;
2031 case 1:
da6b5335 2032 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2033 break;
2034 case 2:
da6b5335 2035 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2036 break;
18c9b560 2037 }
7d1b0095 2038 tcg_temp_free_i32(tmp);
18c9b560
AZ
2039 gen_op_iwmmxt_movq_wRn_M0(wrd);
2040 gen_op_iwmmxt_set_mup();
2041 break;
2042 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2043 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2044 return 1;
da6b5335 2045 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2046 tmp2 = tcg_temp_new_i32();
da6b5335 2047 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2048 switch ((insn >> 22) & 3) {
2049 case 0:
2050 for (i = 0; i < 7; i ++) {
da6b5335
FN
2051 tcg_gen_shli_i32(tmp2, tmp2, 4);
2052 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2053 }
2054 break;
2055 case 1:
2056 for (i = 0; i < 3; i ++) {
da6b5335
FN
2057 tcg_gen_shli_i32(tmp2, tmp2, 8);
2058 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2059 }
2060 break;
2061 case 2:
da6b5335
FN
2062 tcg_gen_shli_i32(tmp2, tmp2, 16);
2063 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2064 break;
18c9b560 2065 }
da6b5335 2066 gen_set_nzcv(tmp);
7d1b0095
PM
2067 tcg_temp_free_i32(tmp2);
2068 tcg_temp_free_i32(tmp);
18c9b560
AZ
2069 break;
2070 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2071 wrd = (insn >> 12) & 0xf;
2072 rd0 = (insn >> 16) & 0xf;
2073 gen_op_iwmmxt_movq_M0_wRn(rd0);
2074 switch ((insn >> 22) & 3) {
2075 case 0:
e677137d 2076 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2077 break;
2078 case 1:
e677137d 2079 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2080 break;
2081 case 2:
e677137d 2082 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2083 break;
2084 case 3:
2085 return 1;
2086 }
2087 gen_op_iwmmxt_movq_wRn_M0(wrd);
2088 gen_op_iwmmxt_set_mup();
2089 break;
2090 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2091 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2092 return 1;
da6b5335 2093 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2094 tmp2 = tcg_temp_new_i32();
da6b5335 2095 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2096 switch ((insn >> 22) & 3) {
2097 case 0:
2098 for (i = 0; i < 7; i ++) {
da6b5335
FN
2099 tcg_gen_shli_i32(tmp2, tmp2, 4);
2100 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2101 }
2102 break;
2103 case 1:
2104 for (i = 0; i < 3; i ++) {
da6b5335
FN
2105 tcg_gen_shli_i32(tmp2, tmp2, 8);
2106 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2107 }
2108 break;
2109 case 2:
da6b5335
FN
2110 tcg_gen_shli_i32(tmp2, tmp2, 16);
2111 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2112 break;
18c9b560 2113 }
da6b5335 2114 gen_set_nzcv(tmp);
7d1b0095
PM
2115 tcg_temp_free_i32(tmp2);
2116 tcg_temp_free_i32(tmp);
18c9b560
AZ
2117 break;
2118 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2119 rd = (insn >> 12) & 0xf;
2120 rd0 = (insn >> 16) & 0xf;
da6b5335 2121 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2122 return 1;
2123 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2124 tmp = tcg_temp_new_i32();
18c9b560
AZ
2125 switch ((insn >> 22) & 3) {
2126 case 0:
da6b5335 2127 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2128 break;
2129 case 1:
da6b5335 2130 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2131 break;
2132 case 2:
da6b5335 2133 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2134 break;
18c9b560 2135 }
da6b5335 2136 store_reg(s, rd, tmp);
18c9b560
AZ
2137 break;
2138 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2139 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2140 wrd = (insn >> 12) & 0xf;
2141 rd0 = (insn >> 16) & 0xf;
2142 rd1 = (insn >> 0) & 0xf;
2143 gen_op_iwmmxt_movq_M0_wRn(rd0);
2144 switch ((insn >> 22) & 3) {
2145 case 0:
2146 if (insn & (1 << 21))
2147 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2148 else
2149 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2150 break;
2151 case 1:
2152 if (insn & (1 << 21))
2153 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2154 else
2155 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2156 break;
2157 case 2:
2158 if (insn & (1 << 21))
2159 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2160 else
2161 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2162 break;
2163 case 3:
2164 return 1;
2165 }
2166 gen_op_iwmmxt_movq_wRn_M0(wrd);
2167 gen_op_iwmmxt_set_mup();
2168 gen_op_iwmmxt_set_cup();
2169 break;
2170 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2171 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2172 wrd = (insn >> 12) & 0xf;
2173 rd0 = (insn >> 16) & 0xf;
2174 gen_op_iwmmxt_movq_M0_wRn(rd0);
2175 switch ((insn >> 22) & 3) {
2176 case 0:
2177 if (insn & (1 << 21))
2178 gen_op_iwmmxt_unpacklsb_M0();
2179 else
2180 gen_op_iwmmxt_unpacklub_M0();
2181 break;
2182 case 1:
2183 if (insn & (1 << 21))
2184 gen_op_iwmmxt_unpacklsw_M0();
2185 else
2186 gen_op_iwmmxt_unpackluw_M0();
2187 break;
2188 case 2:
2189 if (insn & (1 << 21))
2190 gen_op_iwmmxt_unpacklsl_M0();
2191 else
2192 gen_op_iwmmxt_unpacklul_M0();
2193 break;
2194 case 3:
2195 return 1;
2196 }
2197 gen_op_iwmmxt_movq_wRn_M0(wrd);
2198 gen_op_iwmmxt_set_mup();
2199 gen_op_iwmmxt_set_cup();
2200 break;
2201 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2202 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2203 wrd = (insn >> 12) & 0xf;
2204 rd0 = (insn >> 16) & 0xf;
2205 gen_op_iwmmxt_movq_M0_wRn(rd0);
2206 switch ((insn >> 22) & 3) {
2207 case 0:
2208 if (insn & (1 << 21))
2209 gen_op_iwmmxt_unpackhsb_M0();
2210 else
2211 gen_op_iwmmxt_unpackhub_M0();
2212 break;
2213 case 1:
2214 if (insn & (1 << 21))
2215 gen_op_iwmmxt_unpackhsw_M0();
2216 else
2217 gen_op_iwmmxt_unpackhuw_M0();
2218 break;
2219 case 2:
2220 if (insn & (1 << 21))
2221 gen_op_iwmmxt_unpackhsl_M0();
2222 else
2223 gen_op_iwmmxt_unpackhul_M0();
2224 break;
2225 case 3:
2226 return 1;
2227 }
2228 gen_op_iwmmxt_movq_wRn_M0(wrd);
2229 gen_op_iwmmxt_set_mup();
2230 gen_op_iwmmxt_set_cup();
2231 break;
2232 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2233 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2234 if (((insn >> 22) & 3) == 0)
2235 return 1;
18c9b560
AZ
2236 wrd = (insn >> 12) & 0xf;
2237 rd0 = (insn >> 16) & 0xf;
2238 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2239 tmp = tcg_temp_new_i32();
da6b5335 2240 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2241 tcg_temp_free_i32(tmp);
18c9b560 2242 return 1;
da6b5335 2243 }
18c9b560 2244 switch ((insn >> 22) & 3) {
18c9b560 2245 case 1:
477955bd 2246 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2247 break;
2248 case 2:
477955bd 2249 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2250 break;
2251 case 3:
477955bd 2252 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2253 break;
2254 }
7d1b0095 2255 tcg_temp_free_i32(tmp);
18c9b560
AZ
2256 gen_op_iwmmxt_movq_wRn_M0(wrd);
2257 gen_op_iwmmxt_set_mup();
2258 gen_op_iwmmxt_set_cup();
2259 break;
2260 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2261 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2262 if (((insn >> 22) & 3) == 0)
2263 return 1;
18c9b560
AZ
2264 wrd = (insn >> 12) & 0xf;
2265 rd0 = (insn >> 16) & 0xf;
2266 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2267 tmp = tcg_temp_new_i32();
da6b5335 2268 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2269 tcg_temp_free_i32(tmp);
18c9b560 2270 return 1;
da6b5335 2271 }
18c9b560 2272 switch ((insn >> 22) & 3) {
18c9b560 2273 case 1:
477955bd 2274 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2275 break;
2276 case 2:
477955bd 2277 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2278 break;
2279 case 3:
477955bd 2280 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2281 break;
2282 }
7d1b0095 2283 tcg_temp_free_i32(tmp);
18c9b560
AZ
2284 gen_op_iwmmxt_movq_wRn_M0(wrd);
2285 gen_op_iwmmxt_set_mup();
2286 gen_op_iwmmxt_set_cup();
2287 break;
2288 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2289 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2290 if (((insn >> 22) & 3) == 0)
2291 return 1;
18c9b560
AZ
2292 wrd = (insn >> 12) & 0xf;
2293 rd0 = (insn >> 16) & 0xf;
2294 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2295 tmp = tcg_temp_new_i32();
da6b5335 2296 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2297 tcg_temp_free_i32(tmp);
18c9b560 2298 return 1;
da6b5335 2299 }
18c9b560 2300 switch ((insn >> 22) & 3) {
18c9b560 2301 case 1:
477955bd 2302 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2303 break;
2304 case 2:
477955bd 2305 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2306 break;
2307 case 3:
477955bd 2308 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2309 break;
2310 }
7d1b0095 2311 tcg_temp_free_i32(tmp);
18c9b560
AZ
2312 gen_op_iwmmxt_movq_wRn_M0(wrd);
2313 gen_op_iwmmxt_set_mup();
2314 gen_op_iwmmxt_set_cup();
2315 break;
2316 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2317 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2318 if (((insn >> 22) & 3) == 0)
2319 return 1;
18c9b560
AZ
2320 wrd = (insn >> 12) & 0xf;
2321 rd0 = (insn >> 16) & 0xf;
2322 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2323 tmp = tcg_temp_new_i32();
18c9b560 2324 switch ((insn >> 22) & 3) {
18c9b560 2325 case 1:
da6b5335 2326 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2327 tcg_temp_free_i32(tmp);
18c9b560 2328 return 1;
da6b5335 2329 }
477955bd 2330 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2331 break;
2332 case 2:
da6b5335 2333 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2334 tcg_temp_free_i32(tmp);
18c9b560 2335 return 1;
da6b5335 2336 }
477955bd 2337 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2338 break;
2339 case 3:
da6b5335 2340 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2341 tcg_temp_free_i32(tmp);
18c9b560 2342 return 1;
da6b5335 2343 }
477955bd 2344 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2345 break;
2346 }
7d1b0095 2347 tcg_temp_free_i32(tmp);
18c9b560
AZ
2348 gen_op_iwmmxt_movq_wRn_M0(wrd);
2349 gen_op_iwmmxt_set_mup();
2350 gen_op_iwmmxt_set_cup();
2351 break;
2352 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2353 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2354 wrd = (insn >> 12) & 0xf;
2355 rd0 = (insn >> 16) & 0xf;
2356 rd1 = (insn >> 0) & 0xf;
2357 gen_op_iwmmxt_movq_M0_wRn(rd0);
2358 switch ((insn >> 22) & 3) {
2359 case 0:
2360 if (insn & (1 << 21))
2361 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2362 else
2363 gen_op_iwmmxt_minub_M0_wRn(rd1);
2364 break;
2365 case 1:
2366 if (insn & (1 << 21))
2367 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2368 else
2369 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2370 break;
2371 case 2:
2372 if (insn & (1 << 21))
2373 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2374 else
2375 gen_op_iwmmxt_minul_M0_wRn(rd1);
2376 break;
2377 case 3:
2378 return 1;
2379 }
2380 gen_op_iwmmxt_movq_wRn_M0(wrd);
2381 gen_op_iwmmxt_set_mup();
2382 break;
2383 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2384 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2385 wrd = (insn >> 12) & 0xf;
2386 rd0 = (insn >> 16) & 0xf;
2387 rd1 = (insn >> 0) & 0xf;
2388 gen_op_iwmmxt_movq_M0_wRn(rd0);
2389 switch ((insn >> 22) & 3) {
2390 case 0:
2391 if (insn & (1 << 21))
2392 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2393 else
2394 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2395 break;
2396 case 1:
2397 if (insn & (1 << 21))
2398 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2399 else
2400 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2401 break;
2402 case 2:
2403 if (insn & (1 << 21))
2404 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2405 else
2406 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2407 break;
2408 case 3:
2409 return 1;
2410 }
2411 gen_op_iwmmxt_movq_wRn_M0(wrd);
2412 gen_op_iwmmxt_set_mup();
2413 break;
2414 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2415 case 0x402: case 0x502: case 0x602: case 0x702:
2416 wrd = (insn >> 12) & 0xf;
2417 rd0 = (insn >> 16) & 0xf;
2418 rd1 = (insn >> 0) & 0xf;
2419 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2420 tmp = tcg_const_i32((insn >> 20) & 3);
2421 iwmmxt_load_reg(cpu_V1, rd1);
2422 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2423 tcg_temp_free_i32(tmp);
18c9b560
AZ
2424 gen_op_iwmmxt_movq_wRn_M0(wrd);
2425 gen_op_iwmmxt_set_mup();
2426 break;
2427 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2428 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2429 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2430 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2431 wrd = (insn >> 12) & 0xf;
2432 rd0 = (insn >> 16) & 0xf;
2433 rd1 = (insn >> 0) & 0xf;
2434 gen_op_iwmmxt_movq_M0_wRn(rd0);
2435 switch ((insn >> 20) & 0xf) {
2436 case 0x0:
2437 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2438 break;
2439 case 0x1:
2440 gen_op_iwmmxt_subub_M0_wRn(rd1);
2441 break;
2442 case 0x3:
2443 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2444 break;
2445 case 0x4:
2446 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2447 break;
2448 case 0x5:
2449 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2450 break;
2451 case 0x7:
2452 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2453 break;
2454 case 0x8:
2455 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2456 break;
2457 case 0x9:
2458 gen_op_iwmmxt_subul_M0_wRn(rd1);
2459 break;
2460 case 0xb:
2461 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2462 break;
2463 default:
2464 return 1;
2465 }
2466 gen_op_iwmmxt_movq_wRn_M0(wrd);
2467 gen_op_iwmmxt_set_mup();
2468 gen_op_iwmmxt_set_cup();
2469 break;
2470 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2471 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2472 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2473 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2474 wrd = (insn >> 12) & 0xf;
2475 rd0 = (insn >> 16) & 0xf;
2476 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2477 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2478 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2479 tcg_temp_free_i32(tmp);
18c9b560
AZ
2480 gen_op_iwmmxt_movq_wRn_M0(wrd);
2481 gen_op_iwmmxt_set_mup();
2482 gen_op_iwmmxt_set_cup();
2483 break;
2484 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2485 case 0x418: case 0x518: case 0x618: case 0x718:
2486 case 0x818: case 0x918: case 0xa18: case 0xb18:
2487 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2488 wrd = (insn >> 12) & 0xf;
2489 rd0 = (insn >> 16) & 0xf;
2490 rd1 = (insn >> 0) & 0xf;
2491 gen_op_iwmmxt_movq_M0_wRn(rd0);
2492 switch ((insn >> 20) & 0xf) {
2493 case 0x0:
2494 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2495 break;
2496 case 0x1:
2497 gen_op_iwmmxt_addub_M0_wRn(rd1);
2498 break;
2499 case 0x3:
2500 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2501 break;
2502 case 0x4:
2503 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2504 break;
2505 case 0x5:
2506 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2507 break;
2508 case 0x7:
2509 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2510 break;
2511 case 0x8:
2512 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2513 break;
2514 case 0x9:
2515 gen_op_iwmmxt_addul_M0_wRn(rd1);
2516 break;
2517 case 0xb:
2518 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2519 break;
2520 default:
2521 return 1;
2522 }
2523 gen_op_iwmmxt_movq_wRn_M0(wrd);
2524 gen_op_iwmmxt_set_mup();
2525 gen_op_iwmmxt_set_cup();
2526 break;
2527 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2528 case 0x408: case 0x508: case 0x608: case 0x708:
2529 case 0x808: case 0x908: case 0xa08: case 0xb08:
2530 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2531 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2532 return 1;
18c9b560
AZ
2533 wrd = (insn >> 12) & 0xf;
2534 rd0 = (insn >> 16) & 0xf;
2535 rd1 = (insn >> 0) & 0xf;
2536 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2537 switch ((insn >> 22) & 3) {
18c9b560
AZ
2538 case 1:
2539 if (insn & (1 << 21))
2540 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2541 else
2542 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2543 break;
2544 case 2:
2545 if (insn & (1 << 21))
2546 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2547 else
2548 gen_op_iwmmxt_packul_M0_wRn(rd1);
2549 break;
2550 case 3:
2551 if (insn & (1 << 21))
2552 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2553 else
2554 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2555 break;
2556 }
2557 gen_op_iwmmxt_movq_wRn_M0(wrd);
2558 gen_op_iwmmxt_set_mup();
2559 gen_op_iwmmxt_set_cup();
2560 break;
2561 case 0x201: case 0x203: case 0x205: case 0x207:
2562 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2563 case 0x211: case 0x213: case 0x215: case 0x217:
2564 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2565 wrd = (insn >> 5) & 0xf;
2566 rd0 = (insn >> 12) & 0xf;
2567 rd1 = (insn >> 0) & 0xf;
2568 if (rd0 == 0xf || rd1 == 0xf)
2569 return 1;
2570 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2571 tmp = load_reg(s, rd0);
2572 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2573 switch ((insn >> 16) & 0xf) {
2574 case 0x0: /* TMIA */
da6b5335 2575 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2576 break;
2577 case 0x8: /* TMIAPH */
da6b5335 2578 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2579 break;
2580 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2581 if (insn & (1 << 16))
da6b5335 2582 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2583 if (insn & (1 << 17))
da6b5335
FN
2584 tcg_gen_shri_i32(tmp2, tmp2, 16);
2585 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2586 break;
2587 default:
7d1b0095
PM
2588 tcg_temp_free_i32(tmp2);
2589 tcg_temp_free_i32(tmp);
18c9b560
AZ
2590 return 1;
2591 }
7d1b0095
PM
2592 tcg_temp_free_i32(tmp2);
2593 tcg_temp_free_i32(tmp);
18c9b560
AZ
2594 gen_op_iwmmxt_movq_wRn_M0(wrd);
2595 gen_op_iwmmxt_set_mup();
2596 break;
2597 default:
2598 return 1;
2599 }
2600
2601 return 0;
2602}
2603
a1c7273b 2604/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2605 (ie. an undefined instruction). */
7dcc1f89 2606static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2607{
2608 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2609 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2610
2611 if ((insn & 0x0ff00f10) == 0x0e200010) {
2612 /* Multiply with Internal Accumulate Format */
2613 rd0 = (insn >> 12) & 0xf;
2614 rd1 = insn & 0xf;
2615 acc = (insn >> 5) & 7;
2616
2617 if (acc != 0)
2618 return 1;
2619
3a554c0f
FN
2620 tmp = load_reg(s, rd0);
2621 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2622 switch ((insn >> 16) & 0xf) {
2623 case 0x0: /* MIA */
3a554c0f 2624 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2625 break;
2626 case 0x8: /* MIAPH */
3a554c0f 2627 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2628 break;
2629 case 0xc: /* MIABB */
2630 case 0xd: /* MIABT */
2631 case 0xe: /* MIATB */
2632 case 0xf: /* MIATT */
18c9b560 2633 if (insn & (1 << 16))
3a554c0f 2634 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2635 if (insn & (1 << 17))
3a554c0f
FN
2636 tcg_gen_shri_i32(tmp2, tmp2, 16);
2637 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2638 break;
2639 default:
2640 return 1;
2641 }
7d1b0095
PM
2642 tcg_temp_free_i32(tmp2);
2643 tcg_temp_free_i32(tmp);
18c9b560
AZ
2644
2645 gen_op_iwmmxt_movq_wRn_M0(acc);
2646 return 0;
2647 }
2648
2649 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2650 /* Internal Accumulator Access Format */
2651 rdhi = (insn >> 16) & 0xf;
2652 rdlo = (insn >> 12) & 0xf;
2653 acc = insn & 7;
2654
2655 if (acc != 0)
2656 return 1;
2657
2658 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2659 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2660 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 2661 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 2662 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2663 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2664 } else { /* MAR */
3a554c0f
FN
2665 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2666 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2667 }
2668 return 0;
2669 }
2670
2671 return 1;
2672}
2673
9ee6e8bb
PB
2674#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2675#define VFP_SREG(insn, bigbit, smallbit) \
2676 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2677#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2678 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2679 reg = (((insn) >> (bigbit)) & 0x0f) \
2680 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2681 } else { \
2682 if (insn & (1 << (smallbit))) \
2683 return 1; \
2684 reg = ((insn) >> (bigbit)) & 0x0f; \
2685 }} while (0)
2686
2687#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2688#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2689#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2690#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2691#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2692#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2693
4373f3ce 2694/* Move between integer and VFP cores. */
39d5492a 2695static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2696{
39d5492a 2697 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2698 tcg_gen_mov_i32(tmp, cpu_F0s);
2699 return tmp;
2700}
2701
39d5492a 2702static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2703{
2704 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2705 tcg_temp_free_i32(tmp);
4373f3ce
PB
2706}
2707
39d5492a 2708static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2709{
39d5492a 2710 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2711 if (shift)
2712 tcg_gen_shri_i32(var, var, shift);
86831435 2713 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2714 tcg_gen_shli_i32(tmp, var, 8);
2715 tcg_gen_or_i32(var, var, tmp);
2716 tcg_gen_shli_i32(tmp, var, 16);
2717 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2718 tcg_temp_free_i32(tmp);
ad69471c
PB
2719}
2720
39d5492a 2721static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2722{
39d5492a 2723 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2724 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2725 tcg_gen_shli_i32(tmp, var, 16);
2726 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2727 tcg_temp_free_i32(tmp);
ad69471c
PB
2728}
2729
39d5492a 2730static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2731{
39d5492a 2732 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2733 tcg_gen_andi_i32(var, var, 0xffff0000);
2734 tcg_gen_shri_i32(tmp, var, 16);
2735 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2736 tcg_temp_free_i32(tmp);
ad69471c
PB
2737}
2738
39d5492a 2739static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2740{
2741 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2742 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2743 switch (size) {
2744 case 0:
6ce2faf4 2745 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
8e18cde3
PM
2746 gen_neon_dup_u8(tmp, 0);
2747 break;
2748 case 1:
6ce2faf4 2749 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
8e18cde3
PM
2750 gen_neon_dup_low16(tmp);
2751 break;
2752 case 2:
6ce2faf4 2753 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8e18cde3
PM
2754 break;
2755 default: /* Avoid compiler warnings. */
2756 abort();
2757 }
2758 return tmp;
2759}
2760
04731fb5
WN
2761static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2762 uint32_t dp)
2763{
2764 uint32_t cc = extract32(insn, 20, 2);
2765
2766 if (dp) {
2767 TCGv_i64 frn, frm, dest;
2768 TCGv_i64 tmp, zero, zf, nf, vf;
2769
2770 zero = tcg_const_i64(0);
2771
2772 frn = tcg_temp_new_i64();
2773 frm = tcg_temp_new_i64();
2774 dest = tcg_temp_new_i64();
2775
2776 zf = tcg_temp_new_i64();
2777 nf = tcg_temp_new_i64();
2778 vf = tcg_temp_new_i64();
2779
2780 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2781 tcg_gen_ext_i32_i64(nf, cpu_NF);
2782 tcg_gen_ext_i32_i64(vf, cpu_VF);
2783
2784 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2785 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2786 switch (cc) {
2787 case 0: /* eq: Z */
2788 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2789 frn, frm);
2790 break;
2791 case 1: /* vs: V */
2792 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2793 frn, frm);
2794 break;
2795 case 2: /* ge: N == V -> N ^ V == 0 */
2796 tmp = tcg_temp_new_i64();
2797 tcg_gen_xor_i64(tmp, vf, nf);
2798 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2799 frn, frm);
2800 tcg_temp_free_i64(tmp);
2801 break;
2802 case 3: /* gt: !Z && N == V */
2803 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2804 frn, frm);
2805 tmp = tcg_temp_new_i64();
2806 tcg_gen_xor_i64(tmp, vf, nf);
2807 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2808 dest, frm);
2809 tcg_temp_free_i64(tmp);
2810 break;
2811 }
2812 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2813 tcg_temp_free_i64(frn);
2814 tcg_temp_free_i64(frm);
2815 tcg_temp_free_i64(dest);
2816
2817 tcg_temp_free_i64(zf);
2818 tcg_temp_free_i64(nf);
2819 tcg_temp_free_i64(vf);
2820
2821 tcg_temp_free_i64(zero);
2822 } else {
2823 TCGv_i32 frn, frm, dest;
2824 TCGv_i32 tmp, zero;
2825
2826 zero = tcg_const_i32(0);
2827
2828 frn = tcg_temp_new_i32();
2829 frm = tcg_temp_new_i32();
2830 dest = tcg_temp_new_i32();
2831 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2832 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2833 switch (cc) {
2834 case 0: /* eq: Z */
2835 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2836 frn, frm);
2837 break;
2838 case 1: /* vs: V */
2839 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2840 frn, frm);
2841 break;
2842 case 2: /* ge: N == V -> N ^ V == 0 */
2843 tmp = tcg_temp_new_i32();
2844 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2845 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2846 frn, frm);
2847 tcg_temp_free_i32(tmp);
2848 break;
2849 case 3: /* gt: !Z && N == V */
2850 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2851 frn, frm);
2852 tmp = tcg_temp_new_i32();
2853 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2854 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2855 dest, frm);
2856 tcg_temp_free_i32(tmp);
2857 break;
2858 }
2859 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2860 tcg_temp_free_i32(frn);
2861 tcg_temp_free_i32(frm);
2862 tcg_temp_free_i32(dest);
2863
2864 tcg_temp_free_i32(zero);
2865 }
2866
2867 return 0;
2868}
2869
40cfacdd
WN
2870static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2871 uint32_t rm, uint32_t dp)
2872{
2873 uint32_t vmin = extract32(insn, 6, 1);
2874 TCGv_ptr fpst = get_fpstatus_ptr(0);
2875
2876 if (dp) {
2877 TCGv_i64 frn, frm, dest;
2878
2879 frn = tcg_temp_new_i64();
2880 frm = tcg_temp_new_i64();
2881 dest = tcg_temp_new_i64();
2882
2883 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2884 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2885 if (vmin) {
f71a2ae5 2886 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 2887 } else {
f71a2ae5 2888 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
2889 }
2890 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2891 tcg_temp_free_i64(frn);
2892 tcg_temp_free_i64(frm);
2893 tcg_temp_free_i64(dest);
2894 } else {
2895 TCGv_i32 frn, frm, dest;
2896
2897 frn = tcg_temp_new_i32();
2898 frm = tcg_temp_new_i32();
2899 dest = tcg_temp_new_i32();
2900
2901 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2902 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2903 if (vmin) {
f71a2ae5 2904 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 2905 } else {
f71a2ae5 2906 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
2907 }
2908 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2909 tcg_temp_free_i32(frn);
2910 tcg_temp_free_i32(frm);
2911 tcg_temp_free_i32(dest);
2912 }
2913
2914 tcg_temp_free_ptr(fpst);
2915 return 0;
2916}
2917
7655f39b
WN
2918static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2919 int rounding)
2920{
2921 TCGv_ptr fpst = get_fpstatus_ptr(0);
2922 TCGv_i32 tcg_rmode;
2923
2924 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2925 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2926
2927 if (dp) {
2928 TCGv_i64 tcg_op;
2929 TCGv_i64 tcg_res;
2930 tcg_op = tcg_temp_new_i64();
2931 tcg_res = tcg_temp_new_i64();
2932 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2933 gen_helper_rintd(tcg_res, tcg_op, fpst);
2934 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2935 tcg_temp_free_i64(tcg_op);
2936 tcg_temp_free_i64(tcg_res);
2937 } else {
2938 TCGv_i32 tcg_op;
2939 TCGv_i32 tcg_res;
2940 tcg_op = tcg_temp_new_i32();
2941 tcg_res = tcg_temp_new_i32();
2942 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2943 gen_helper_rints(tcg_res, tcg_op, fpst);
2944 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2945 tcg_temp_free_i32(tcg_op);
2946 tcg_temp_free_i32(tcg_res);
2947 }
2948
2949 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2950 tcg_temp_free_i32(tcg_rmode);
2951
2952 tcg_temp_free_ptr(fpst);
2953 return 0;
2954}
2955
c9975a83
WN
2956static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2957 int rounding)
2958{
2959 bool is_signed = extract32(insn, 7, 1);
2960 TCGv_ptr fpst = get_fpstatus_ptr(0);
2961 TCGv_i32 tcg_rmode, tcg_shift;
2962
2963 tcg_shift = tcg_const_i32(0);
2964
2965 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2966 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2967
2968 if (dp) {
2969 TCGv_i64 tcg_double, tcg_res;
2970 TCGv_i32 tcg_tmp;
2971 /* Rd is encoded as a single precision register even when the source
2972 * is double precision.
2973 */
2974 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
2975 tcg_double = tcg_temp_new_i64();
2976 tcg_res = tcg_temp_new_i64();
2977 tcg_tmp = tcg_temp_new_i32();
2978 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
2979 if (is_signed) {
2980 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
2981 } else {
2982 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
2983 }
ecc7b3aa 2984 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
c9975a83
WN
2985 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
2986 tcg_temp_free_i32(tcg_tmp);
2987 tcg_temp_free_i64(tcg_res);
2988 tcg_temp_free_i64(tcg_double);
2989 } else {
2990 TCGv_i32 tcg_single, tcg_res;
2991 tcg_single = tcg_temp_new_i32();
2992 tcg_res = tcg_temp_new_i32();
2993 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
2994 if (is_signed) {
2995 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
2996 } else {
2997 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
2998 }
2999 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3000 tcg_temp_free_i32(tcg_res);
3001 tcg_temp_free_i32(tcg_single);
3002 }
3003
3004 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3005 tcg_temp_free_i32(tcg_rmode);
3006
3007 tcg_temp_free_i32(tcg_shift);
3008
3009 tcg_temp_free_ptr(fpst);
3010
3011 return 0;
3012}
7655f39b
WN
3013
3014/* Table for converting the most common AArch32 encoding of
3015 * rounding mode to arm_fprounding order (which matches the
3016 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3017 */
3018static const uint8_t fp_decode_rm[] = {
3019 FPROUNDING_TIEAWAY,
3020 FPROUNDING_TIEEVEN,
3021 FPROUNDING_POSINF,
3022 FPROUNDING_NEGINF,
3023};
3024
7dcc1f89 3025static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
3026{
3027 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3028
d614a513 3029 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
04731fb5
WN
3030 return 1;
3031 }
3032
3033 if (dp) {
3034 VFP_DREG_D(rd, insn);
3035 VFP_DREG_N(rn, insn);
3036 VFP_DREG_M(rm, insn);
3037 } else {
3038 rd = VFP_SREG_D(insn);
3039 rn = VFP_SREG_N(insn);
3040 rm = VFP_SREG_M(insn);
3041 }
3042
3043 if ((insn & 0x0f800e50) == 0x0e000a00) {
3044 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
3045 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3046 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
3047 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3048 /* VRINTA, VRINTN, VRINTP, VRINTM */
3049 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3050 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
3051 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3052 /* VCVTA, VCVTN, VCVTP, VCVTM */
3053 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3054 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3055 }
3056 return 1;
3057}
3058
a1c7273b 3059/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3060 (ie. an undefined instruction). */
7dcc1f89 3061static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3062{
3063 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3064 int dp, veclen;
39d5492a
PM
3065 TCGv_i32 addr;
3066 TCGv_i32 tmp;
3067 TCGv_i32 tmp2;
b7bcbe95 3068
d614a513 3069 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3070 return 1;
d614a513 3071 }
40f137e1 3072
2c7ffc41
PM
3073 /* FIXME: this access check should not take precedence over UNDEF
3074 * for invalid encodings; we will generate incorrect syndrome information
3075 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3076 */
9dbbc748 3077 if (s->fp_excp_el) {
2c7ffc41 3078 gen_exception_insn(s, 4, EXCP_UDEF,
9dbbc748 3079 syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
2c7ffc41
PM
3080 return 0;
3081 }
3082
5df8bac1 3083 if (!s->vfp_enabled) {
9ee6e8bb 3084 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3085 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3086 return 1;
3087 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3088 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3089 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3090 return 1;
a50c0f51 3091 }
40f137e1 3092 }
6a57f3eb
WN
3093
3094 if (extract32(insn, 28, 4) == 0xf) {
3095 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3096 * only used in v8 and above.
3097 */
7dcc1f89 3098 return disas_vfp_v8_insn(s, insn);
6a57f3eb
WN
3099 }
3100
b7bcbe95
FB
3101 dp = ((insn & 0xf00) == 0xb00);
3102 switch ((insn >> 24) & 0xf) {
3103 case 0xe:
3104 if (insn & (1 << 4)) {
3105 /* single register transfer */
b7bcbe95
FB
3106 rd = (insn >> 12) & 0xf;
3107 if (dp) {
9ee6e8bb
PB
3108 int size;
3109 int pass;
3110
3111 VFP_DREG_N(rn, insn);
3112 if (insn & 0xf)
b7bcbe95 3113 return 1;
9ee6e8bb 3114 if (insn & 0x00c00060
d614a513 3115 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3116 return 1;
d614a513 3117 }
9ee6e8bb
PB
3118
3119 pass = (insn >> 21) & 1;
3120 if (insn & (1 << 22)) {
3121 size = 0;
3122 offset = ((insn >> 5) & 3) * 8;
3123 } else if (insn & (1 << 5)) {
3124 size = 1;
3125 offset = (insn & (1 << 6)) ? 16 : 0;
3126 } else {
3127 size = 2;
3128 offset = 0;
3129 }
18c9b560 3130 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3131 /* vfp->arm */
ad69471c 3132 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3133 switch (size) {
3134 case 0:
9ee6e8bb 3135 if (offset)
ad69471c 3136 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3137 if (insn & (1 << 23))
ad69471c 3138 gen_uxtb(tmp);
9ee6e8bb 3139 else
ad69471c 3140 gen_sxtb(tmp);
9ee6e8bb
PB
3141 break;
3142 case 1:
9ee6e8bb
PB
3143 if (insn & (1 << 23)) {
3144 if (offset) {
ad69471c 3145 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3146 } else {
ad69471c 3147 gen_uxth(tmp);
9ee6e8bb
PB
3148 }
3149 } else {
3150 if (offset) {
ad69471c 3151 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3152 } else {
ad69471c 3153 gen_sxth(tmp);
9ee6e8bb
PB
3154 }
3155 }
3156 break;
3157 case 2:
9ee6e8bb
PB
3158 break;
3159 }
ad69471c 3160 store_reg(s, rd, tmp);
b7bcbe95
FB
3161 } else {
3162 /* arm->vfp */
ad69471c 3163 tmp = load_reg(s, rd);
9ee6e8bb
PB
3164 if (insn & (1 << 23)) {
3165 /* VDUP */
3166 if (size == 0) {
ad69471c 3167 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3168 } else if (size == 1) {
ad69471c 3169 gen_neon_dup_low16(tmp);
9ee6e8bb 3170 }
cbbccffc 3171 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3172 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3173 tcg_gen_mov_i32(tmp2, tmp);
3174 neon_store_reg(rn, n, tmp2);
3175 }
3176 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3177 } else {
3178 /* VMOV */
3179 switch (size) {
3180 case 0:
ad69471c 3181 tmp2 = neon_load_reg(rn, pass);
d593c48e 3182 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3183 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3184 break;
3185 case 1:
ad69471c 3186 tmp2 = neon_load_reg(rn, pass);
d593c48e 3187 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3188 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3189 break;
3190 case 2:
9ee6e8bb
PB
3191 break;
3192 }
ad69471c 3193 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3194 }
b7bcbe95 3195 }
9ee6e8bb
PB
3196 } else { /* !dp */
3197 if ((insn & 0x6f) != 0x00)
3198 return 1;
3199 rn = VFP_SREG_N(insn);
18c9b560 3200 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3201 /* vfp->arm */
3202 if (insn & (1 << 21)) {
3203 /* system register */
40f137e1 3204 rn >>= 1;
9ee6e8bb 3205
b7bcbe95 3206 switch (rn) {
40f137e1 3207 case ARM_VFP_FPSID:
4373f3ce 3208 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3209 VFP3 restricts all id registers to privileged
3210 accesses. */
3211 if (IS_USER(s)
d614a513 3212 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3213 return 1;
d614a513 3214 }
4373f3ce 3215 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3216 break;
40f137e1 3217 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3218 if (IS_USER(s))
3219 return 1;
4373f3ce 3220 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3221 break;
40f137e1
PB
3222 case ARM_VFP_FPINST:
3223 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3224 /* Not present in VFP3. */
3225 if (IS_USER(s)
d614a513 3226 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3227 return 1;
d614a513 3228 }
4373f3ce 3229 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3230 break;
40f137e1 3231 case ARM_VFP_FPSCR:
601d70b9 3232 if (rd == 15) {
4373f3ce
PB
3233 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3234 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3235 } else {
7d1b0095 3236 tmp = tcg_temp_new_i32();
4373f3ce
PB
3237 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3238 }
b7bcbe95 3239 break;
a50c0f51 3240 case ARM_VFP_MVFR2:
d614a513 3241 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3242 return 1;
3243 }
3244 /* fall through */
9ee6e8bb
PB
3245 case ARM_VFP_MVFR0:
3246 case ARM_VFP_MVFR1:
3247 if (IS_USER(s)
d614a513 3248 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3249 return 1;
d614a513 3250 }
4373f3ce 3251 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3252 break;
b7bcbe95
FB
3253 default:
3254 return 1;
3255 }
3256 } else {
3257 gen_mov_F0_vreg(0, rn);
4373f3ce 3258 tmp = gen_vfp_mrs();
b7bcbe95
FB
3259 }
3260 if (rd == 15) {
b5ff1b31 3261 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3262 gen_set_nzcv(tmp);
7d1b0095 3263 tcg_temp_free_i32(tmp);
4373f3ce
PB
3264 } else {
3265 store_reg(s, rd, tmp);
3266 }
b7bcbe95
FB
3267 } else {
3268 /* arm->vfp */
b7bcbe95 3269 if (insn & (1 << 21)) {
40f137e1 3270 rn >>= 1;
b7bcbe95
FB
3271 /* system register */
3272 switch (rn) {
40f137e1 3273 case ARM_VFP_FPSID:
9ee6e8bb
PB
3274 case ARM_VFP_MVFR0:
3275 case ARM_VFP_MVFR1:
b7bcbe95
FB
3276 /* Writes are ignored. */
3277 break;
40f137e1 3278 case ARM_VFP_FPSCR:
e4c1cfa5 3279 tmp = load_reg(s, rd);
4373f3ce 3280 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3281 tcg_temp_free_i32(tmp);
b5ff1b31 3282 gen_lookup_tb(s);
b7bcbe95 3283 break;
40f137e1 3284 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3285 if (IS_USER(s))
3286 return 1;
71b3c3de
JR
3287 /* TODO: VFP subarchitecture support.
3288 * For now, keep the EN bit only */
e4c1cfa5 3289 tmp = load_reg(s, rd);
71b3c3de 3290 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3291 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3292 gen_lookup_tb(s);
3293 break;
3294 case ARM_VFP_FPINST:
3295 case ARM_VFP_FPINST2:
23adb861
PM
3296 if (IS_USER(s)) {
3297 return 1;
3298 }
e4c1cfa5 3299 tmp = load_reg(s, rd);
4373f3ce 3300 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3301 break;
b7bcbe95
FB
3302 default:
3303 return 1;
3304 }
3305 } else {
e4c1cfa5 3306 tmp = load_reg(s, rd);
4373f3ce 3307 gen_vfp_msr(tmp);
b7bcbe95
FB
3308 gen_mov_vreg_F0(0, rn);
3309 }
3310 }
3311 }
3312 } else {
3313 /* data processing */
3314 /* The opcode is in bits 23, 21, 20 and 6. */
3315 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3316 if (dp) {
3317 if (op == 15) {
3318 /* rn is opcode */
3319 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3320 } else {
3321 /* rn is register number */
9ee6e8bb 3322 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3323 }
3324
239c20c7
WN
3325 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3326 ((rn & 0x1e) == 0x6))) {
3327 /* Integer or single/half precision destination. */
9ee6e8bb 3328 rd = VFP_SREG_D(insn);
b7bcbe95 3329 } else {
9ee6e8bb 3330 VFP_DREG_D(rd, insn);
b7bcbe95 3331 }
04595bf6 3332 if (op == 15 &&
239c20c7
WN
3333 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3334 ((rn & 0x1e) == 0x4))) {
3335 /* VCVT from int or half precision is always from S reg
3336 * regardless of dp bit. VCVT with immediate frac_bits
3337 * has same format as SREG_M.
04595bf6
PM
3338 */
3339 rm = VFP_SREG_M(insn);
b7bcbe95 3340 } else {
9ee6e8bb 3341 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3342 }
3343 } else {
9ee6e8bb 3344 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3345 if (op == 15 && rn == 15) {
3346 /* Double precision destination. */
9ee6e8bb
PB
3347 VFP_DREG_D(rd, insn);
3348 } else {
3349 rd = VFP_SREG_D(insn);
3350 }
04595bf6
PM
3351 /* NB that we implicitly rely on the encoding for the frac_bits
3352 * in VCVT of fixed to float being the same as that of an SREG_M
3353 */
9ee6e8bb 3354 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3355 }
3356
69d1fc22 3357 veclen = s->vec_len;
b7bcbe95
FB
3358 if (op == 15 && rn > 3)
3359 veclen = 0;
3360
3361 /* Shut up compiler warnings. */
3362 delta_m = 0;
3363 delta_d = 0;
3364 bank_mask = 0;
3b46e624 3365
b7bcbe95
FB
3366 if (veclen > 0) {
3367 if (dp)
3368 bank_mask = 0xc;
3369 else
3370 bank_mask = 0x18;
3371
3372 /* Figure out what type of vector operation this is. */
3373 if ((rd & bank_mask) == 0) {
3374 /* scalar */
3375 veclen = 0;
3376 } else {
3377 if (dp)
69d1fc22 3378 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3379 else
69d1fc22 3380 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3381
3382 if ((rm & bank_mask) == 0) {
3383 /* mixed scalar/vector */
3384 delta_m = 0;
3385 } else {
3386 /* vector */
3387 delta_m = delta_d;
3388 }
3389 }
3390 }
3391
3392 /* Load the initial operands. */
3393 if (op == 15) {
3394 switch (rn) {
3395 case 16:
3396 case 17:
3397 /* Integer source */
3398 gen_mov_F0_vreg(0, rm);
3399 break;
3400 case 8:
3401 case 9:
3402 /* Compare */
3403 gen_mov_F0_vreg(dp, rd);
3404 gen_mov_F1_vreg(dp, rm);
3405 break;
3406 case 10:
3407 case 11:
3408 /* Compare with zero */
3409 gen_mov_F0_vreg(dp, rd);
3410 gen_vfp_F1_ld0(dp);
3411 break;
9ee6e8bb
PB
3412 case 20:
3413 case 21:
3414 case 22:
3415 case 23:
644ad806
PB
3416 case 28:
3417 case 29:
3418 case 30:
3419 case 31:
9ee6e8bb
PB
3420 /* Source and destination the same. */
3421 gen_mov_F0_vreg(dp, rd);
3422 break;
6e0c0ed1
PM
3423 case 4:
3424 case 5:
3425 case 6:
3426 case 7:
239c20c7
WN
3427 /* VCVTB, VCVTT: only present with the halfprec extension
3428 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3429 * (we choose to UNDEF)
6e0c0ed1 3430 */
d614a513
PM
3431 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3432 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3433 return 1;
3434 }
239c20c7
WN
3435 if (!extract32(rn, 1, 1)) {
3436 /* Half precision source. */
3437 gen_mov_F0_vreg(0, rm);
3438 break;
3439 }
6e0c0ed1 3440 /* Otherwise fall through */
b7bcbe95
FB
3441 default:
3442 /* One source operand. */
3443 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3444 break;
b7bcbe95
FB
3445 }
3446 } else {
3447 /* Two source operands. */
3448 gen_mov_F0_vreg(dp, rn);
3449 gen_mov_F1_vreg(dp, rm);
3450 }
3451
3452 for (;;) {
3453 /* Perform the calculation. */
3454 switch (op) {
605a6aed
PM
3455 case 0: /* VMLA: fd + (fn * fm) */
3456 /* Note that order of inputs to the add matters for NaNs */
3457 gen_vfp_F1_mul(dp);
3458 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3459 gen_vfp_add(dp);
3460 break;
605a6aed 3461 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3462 gen_vfp_mul(dp);
605a6aed
PM
3463 gen_vfp_F1_neg(dp);
3464 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3465 gen_vfp_add(dp);
3466 break;
605a6aed
PM
3467 case 2: /* VNMLS: -fd + (fn * fm) */
3468 /* Note that it isn't valid to replace (-A + B) with (B - A)
3469 * or similar plausible looking simplifications
3470 * because this will give wrong results for NaNs.
3471 */
3472 gen_vfp_F1_mul(dp);
3473 gen_mov_F0_vreg(dp, rd);
3474 gen_vfp_neg(dp);
3475 gen_vfp_add(dp);
b7bcbe95 3476 break;
605a6aed 3477 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3478 gen_vfp_mul(dp);
605a6aed
PM
3479 gen_vfp_F1_neg(dp);
3480 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3481 gen_vfp_neg(dp);
605a6aed 3482 gen_vfp_add(dp);
b7bcbe95
FB
3483 break;
3484 case 4: /* mul: fn * fm */
3485 gen_vfp_mul(dp);
3486 break;
3487 case 5: /* nmul: -(fn * fm) */
3488 gen_vfp_mul(dp);
3489 gen_vfp_neg(dp);
3490 break;
3491 case 6: /* add: fn + fm */
3492 gen_vfp_add(dp);
3493 break;
3494 case 7: /* sub: fn - fm */
3495 gen_vfp_sub(dp);
3496 break;
3497 case 8: /* div: fn / fm */
3498 gen_vfp_div(dp);
3499 break;
da97f52c
PM
3500 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3501 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3502 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3503 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3504 /* These are fused multiply-add, and must be done as one
3505 * floating point operation with no rounding between the
3506 * multiplication and addition steps.
3507 * NB that doing the negations here as separate steps is
3508 * correct : an input NaN should come out with its sign bit
3509 * flipped if it is a negated-input.
3510 */
d614a513 3511 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3512 return 1;
3513 }
3514 if (dp) {
3515 TCGv_ptr fpst;
3516 TCGv_i64 frd;
3517 if (op & 1) {
3518 /* VFNMS, VFMS */
3519 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3520 }
3521 frd = tcg_temp_new_i64();
3522 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3523 if (op & 2) {
3524 /* VFNMA, VFNMS */
3525 gen_helper_vfp_negd(frd, frd);
3526 }
3527 fpst = get_fpstatus_ptr(0);
3528 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3529 cpu_F1d, frd, fpst);
3530 tcg_temp_free_ptr(fpst);
3531 tcg_temp_free_i64(frd);
3532 } else {
3533 TCGv_ptr fpst;
3534 TCGv_i32 frd;
3535 if (op & 1) {
3536 /* VFNMS, VFMS */
3537 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3538 }
3539 frd = tcg_temp_new_i32();
3540 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3541 if (op & 2) {
3542 gen_helper_vfp_negs(frd, frd);
3543 }
3544 fpst = get_fpstatus_ptr(0);
3545 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3546 cpu_F1s, frd, fpst);
3547 tcg_temp_free_ptr(fpst);
3548 tcg_temp_free_i32(frd);
3549 }
3550 break;
9ee6e8bb 3551 case 14: /* fconst */
d614a513
PM
3552 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3553 return 1;
3554 }
9ee6e8bb
PB
3555
3556 n = (insn << 12) & 0x80000000;
3557 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3558 if (dp) {
3559 if (i & 0x40)
3560 i |= 0x3f80;
3561 else
3562 i |= 0x4000;
3563 n |= i << 16;
4373f3ce 3564 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3565 } else {
3566 if (i & 0x40)
3567 i |= 0x780;
3568 else
3569 i |= 0x800;
3570 n |= i << 19;
5b340b51 3571 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3572 }
9ee6e8bb 3573 break;
b7bcbe95
FB
3574 case 15: /* extension space */
3575 switch (rn) {
3576 case 0: /* cpy */
3577 /* no-op */
3578 break;
3579 case 1: /* abs */
3580 gen_vfp_abs(dp);
3581 break;
3582 case 2: /* neg */
3583 gen_vfp_neg(dp);
3584 break;
3585 case 3: /* sqrt */
3586 gen_vfp_sqrt(dp);
3587 break;
239c20c7 3588 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
60011498
PB
3589 tmp = gen_vfp_mrs();
3590 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3591 if (dp) {
3592 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3593 cpu_env);
3594 } else {
3595 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3596 cpu_env);
3597 }
7d1b0095 3598 tcg_temp_free_i32(tmp);
60011498 3599 break;
239c20c7 3600 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
60011498
PB
3601 tmp = gen_vfp_mrs();
3602 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3603 if (dp) {
3604 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3605 cpu_env);
3606 } else {
3607 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3608 cpu_env);
3609 }
7d1b0095 3610 tcg_temp_free_i32(tmp);
60011498 3611 break;
239c20c7 3612 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
7d1b0095 3613 tmp = tcg_temp_new_i32();
239c20c7
WN
3614 if (dp) {
3615 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3616 cpu_env);
3617 } else {
3618 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3619 cpu_env);
3620 }
60011498
PB
3621 gen_mov_F0_vreg(0, rd);
3622 tmp2 = gen_vfp_mrs();
3623 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3624 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3625 tcg_temp_free_i32(tmp2);
60011498
PB
3626 gen_vfp_msr(tmp);
3627 break;
239c20c7 3628 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
7d1b0095 3629 tmp = tcg_temp_new_i32();
239c20c7
WN
3630 if (dp) {
3631 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3632 cpu_env);
3633 } else {
3634 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3635 cpu_env);
3636 }
60011498
PB
3637 tcg_gen_shli_i32(tmp, tmp, 16);
3638 gen_mov_F0_vreg(0, rd);
3639 tmp2 = gen_vfp_mrs();
3640 tcg_gen_ext16u_i32(tmp2, tmp2);
3641 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3642 tcg_temp_free_i32(tmp2);
60011498
PB
3643 gen_vfp_msr(tmp);
3644 break;
b7bcbe95
FB
3645 case 8: /* cmp */
3646 gen_vfp_cmp(dp);
3647 break;
3648 case 9: /* cmpe */
3649 gen_vfp_cmpe(dp);
3650 break;
3651 case 10: /* cmpz */
3652 gen_vfp_cmp(dp);
3653 break;
3654 case 11: /* cmpez */
3655 gen_vfp_F1_ld0(dp);
3656 gen_vfp_cmpe(dp);
3657 break;
664c6733
WN
3658 case 12: /* vrintr */
3659 {
3660 TCGv_ptr fpst = get_fpstatus_ptr(0);
3661 if (dp) {
3662 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3663 } else {
3664 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3665 }
3666 tcg_temp_free_ptr(fpst);
3667 break;
3668 }
a290c62a
WN
3669 case 13: /* vrintz */
3670 {
3671 TCGv_ptr fpst = get_fpstatus_ptr(0);
3672 TCGv_i32 tcg_rmode;
3673 tcg_rmode = tcg_const_i32(float_round_to_zero);
3674 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3675 if (dp) {
3676 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3677 } else {
3678 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3679 }
3680 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3681 tcg_temp_free_i32(tcg_rmode);
3682 tcg_temp_free_ptr(fpst);
3683 break;
3684 }
4e82bc01
WN
3685 case 14: /* vrintx */
3686 {
3687 TCGv_ptr fpst = get_fpstatus_ptr(0);
3688 if (dp) {
3689 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3690 } else {
3691 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3692 }
3693 tcg_temp_free_ptr(fpst);
3694 break;
3695 }
b7bcbe95
FB
3696 case 15: /* single<->double conversion */
3697 if (dp)
4373f3ce 3698 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3699 else
4373f3ce 3700 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3701 break;
3702 case 16: /* fuito */
5500b06c 3703 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3704 break;
3705 case 17: /* fsito */
5500b06c 3706 gen_vfp_sito(dp, 0);
b7bcbe95 3707 break;
9ee6e8bb 3708 case 20: /* fshto */
d614a513
PM
3709 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3710 return 1;
3711 }
5500b06c 3712 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3713 break;
3714 case 21: /* fslto */
d614a513
PM
3715 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3716 return 1;
3717 }
5500b06c 3718 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3719 break;
3720 case 22: /* fuhto */
d614a513
PM
3721 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3722 return 1;
3723 }
5500b06c 3724 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3725 break;
3726 case 23: /* fulto */
d614a513
PM
3727 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3728 return 1;
3729 }
5500b06c 3730 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3731 break;
b7bcbe95 3732 case 24: /* ftoui */
5500b06c 3733 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3734 break;
3735 case 25: /* ftouiz */
5500b06c 3736 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3737 break;
3738 case 26: /* ftosi */
5500b06c 3739 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3740 break;
3741 case 27: /* ftosiz */
5500b06c 3742 gen_vfp_tosiz(dp, 0);
b7bcbe95 3743 break;
9ee6e8bb 3744 case 28: /* ftosh */
d614a513
PM
3745 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3746 return 1;
3747 }
5500b06c 3748 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3749 break;
3750 case 29: /* ftosl */
d614a513
PM
3751 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3752 return 1;
3753 }
5500b06c 3754 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3755 break;
3756 case 30: /* ftouh */
d614a513
PM
3757 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3758 return 1;
3759 }
5500b06c 3760 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3761 break;
3762 case 31: /* ftoul */
d614a513
PM
3763 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3764 return 1;
3765 }
5500b06c 3766 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3767 break;
b7bcbe95 3768 default: /* undefined */
b7bcbe95
FB
3769 return 1;
3770 }
3771 break;
3772 default: /* undefined */
b7bcbe95
FB
3773 return 1;
3774 }
3775
3776 /* Write back the result. */
239c20c7
WN
3777 if (op == 15 && (rn >= 8 && rn <= 11)) {
3778 /* Comparison, do nothing. */
3779 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3780 (rn & 0x1e) == 0x6)) {
3781 /* VCVT double to int: always integer result.
3782 * VCVT double to half precision is always a single
3783 * precision result.
3784 */
b7bcbe95 3785 gen_mov_vreg_F0(0, rd);
239c20c7 3786 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
3787 /* conversion */
3788 gen_mov_vreg_F0(!dp, rd);
239c20c7 3789 } else {
b7bcbe95 3790 gen_mov_vreg_F0(dp, rd);
239c20c7 3791 }
b7bcbe95
FB
3792
3793 /* break out of the loop if we have finished */
3794 if (veclen == 0)
3795 break;
3796
3797 if (op == 15 && delta_m == 0) {
3798 /* single source one-many */
3799 while (veclen--) {
3800 rd = ((rd + delta_d) & (bank_mask - 1))
3801 | (rd & bank_mask);
3802 gen_mov_vreg_F0(dp, rd);
3803 }
3804 break;
3805 }
3806 /* Setup the next operands. */
3807 veclen--;
3808 rd = ((rd + delta_d) & (bank_mask - 1))
3809 | (rd & bank_mask);
3810
3811 if (op == 15) {
3812 /* One source operand. */
3813 rm = ((rm + delta_m) & (bank_mask - 1))
3814 | (rm & bank_mask);
3815 gen_mov_F0_vreg(dp, rm);
3816 } else {
3817 /* Two source operands. */
3818 rn = ((rn + delta_d) & (bank_mask - 1))
3819 | (rn & bank_mask);
3820 gen_mov_F0_vreg(dp, rn);
3821 if (delta_m) {
3822 rm = ((rm + delta_m) & (bank_mask - 1))
3823 | (rm & bank_mask);
3824 gen_mov_F1_vreg(dp, rm);
3825 }
3826 }
3827 }
3828 }
3829 break;
3830 case 0xc:
3831 case 0xd:
8387da81 3832 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3833 /* two-register transfer */
3834 rn = (insn >> 16) & 0xf;
3835 rd = (insn >> 12) & 0xf;
3836 if (dp) {
9ee6e8bb
PB
3837 VFP_DREG_M(rm, insn);
3838 } else {
3839 rm = VFP_SREG_M(insn);
3840 }
b7bcbe95 3841
18c9b560 3842 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3843 /* vfp->arm */
3844 if (dp) {
4373f3ce
PB
3845 gen_mov_F0_vreg(0, rm * 2);
3846 tmp = gen_vfp_mrs();
3847 store_reg(s, rd, tmp);
3848 gen_mov_F0_vreg(0, rm * 2 + 1);
3849 tmp = gen_vfp_mrs();
3850 store_reg(s, rn, tmp);
b7bcbe95
FB
3851 } else {
3852 gen_mov_F0_vreg(0, rm);
4373f3ce 3853 tmp = gen_vfp_mrs();
8387da81 3854 store_reg(s, rd, tmp);
b7bcbe95 3855 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3856 tmp = gen_vfp_mrs();
8387da81 3857 store_reg(s, rn, tmp);
b7bcbe95
FB
3858 }
3859 } else {
3860 /* arm->vfp */
3861 if (dp) {
4373f3ce
PB
3862 tmp = load_reg(s, rd);
3863 gen_vfp_msr(tmp);
3864 gen_mov_vreg_F0(0, rm * 2);
3865 tmp = load_reg(s, rn);
3866 gen_vfp_msr(tmp);
3867 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3868 } else {
8387da81 3869 tmp = load_reg(s, rd);
4373f3ce 3870 gen_vfp_msr(tmp);
b7bcbe95 3871 gen_mov_vreg_F0(0, rm);
8387da81 3872 tmp = load_reg(s, rn);
4373f3ce 3873 gen_vfp_msr(tmp);
b7bcbe95
FB
3874 gen_mov_vreg_F0(0, rm + 1);
3875 }
3876 }
3877 } else {
3878 /* Load/store */
3879 rn = (insn >> 16) & 0xf;
3880 if (dp)
9ee6e8bb 3881 VFP_DREG_D(rd, insn);
b7bcbe95 3882 else
9ee6e8bb 3883 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3884 if ((insn & 0x01200000) == 0x01000000) {
3885 /* Single load/store */
3886 offset = (insn & 0xff) << 2;
3887 if ((insn & (1 << 23)) == 0)
3888 offset = -offset;
934814f1
PM
3889 if (s->thumb && rn == 15) {
3890 /* This is actually UNPREDICTABLE */
3891 addr = tcg_temp_new_i32();
3892 tcg_gen_movi_i32(addr, s->pc & ~2);
3893 } else {
3894 addr = load_reg(s, rn);
3895 }
312eea9f 3896 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3897 if (insn & (1 << 20)) {
312eea9f 3898 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3899 gen_mov_vreg_F0(dp, rd);
3900 } else {
3901 gen_mov_F0_vreg(dp, rd);
312eea9f 3902 gen_vfp_st(s, dp, addr);
b7bcbe95 3903 }
7d1b0095 3904 tcg_temp_free_i32(addr);
b7bcbe95
FB
3905 } else {
3906 /* load/store multiple */
934814f1 3907 int w = insn & (1 << 21);
b7bcbe95
FB
3908 if (dp)
3909 n = (insn >> 1) & 0x7f;
3910 else
3911 n = insn & 0xff;
3912
934814f1
PM
3913 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3914 /* P == U , W == 1 => UNDEF */
3915 return 1;
3916 }
3917 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3918 /* UNPREDICTABLE cases for bad immediates: we choose to
3919 * UNDEF to avoid generating huge numbers of TCG ops
3920 */
3921 return 1;
3922 }
3923 if (rn == 15 && w) {
3924 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3925 return 1;
3926 }
3927
3928 if (s->thumb && rn == 15) {
3929 /* This is actually UNPREDICTABLE */
3930 addr = tcg_temp_new_i32();
3931 tcg_gen_movi_i32(addr, s->pc & ~2);
3932 } else {
3933 addr = load_reg(s, rn);
3934 }
b7bcbe95 3935 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3936 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3937
3938 if (dp)
3939 offset = 8;
3940 else
3941 offset = 4;
3942 for (i = 0; i < n; i++) {
18c9b560 3943 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3944 /* load */
312eea9f 3945 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3946 gen_mov_vreg_F0(dp, rd + i);
3947 } else {
3948 /* store */
3949 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3950 gen_vfp_st(s, dp, addr);
b7bcbe95 3951 }
312eea9f 3952 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3953 }
934814f1 3954 if (w) {
b7bcbe95
FB
3955 /* writeback */
3956 if (insn & (1 << 24))
3957 offset = -offset * n;
3958 else if (dp && (insn & 1))
3959 offset = 4;
3960 else
3961 offset = 0;
3962
3963 if (offset != 0)
312eea9f
FN
3964 tcg_gen_addi_i32(addr, addr, offset);
3965 store_reg(s, rn, addr);
3966 } else {
7d1b0095 3967 tcg_temp_free_i32(addr);
b7bcbe95
FB
3968 }
3969 }
3970 }
3971 break;
3972 default:
3973 /* Should never happen. */
3974 return 1;
3975 }
3976 return 0;
3977}
3978
0a2461fa 3979static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
c53be334 3980{
6e256c93
FB
3981 TranslationBlock *tb;
3982
3983 tb = s->tb;
3984 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3985 tcg_gen_goto_tb(n);
eaed129d 3986 gen_set_pc_im(s, dest);
8cfd0495 3987 tcg_gen_exit_tb((uintptr_t)tb + n);
6e256c93 3988 } else {
eaed129d 3989 gen_set_pc_im(s, dest);
57fec1fe 3990 tcg_gen_exit_tb(0);
6e256c93 3991 }
c53be334
FB
3992}
3993
8aaca4c0
FB
3994static inline void gen_jmp (DisasContext *s, uint32_t dest)
3995{
50225ad0 3996 if (unlikely(s->singlestep_enabled || s->ss_active)) {
8aaca4c0 3997 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3998 if (s->thumb)
d9ba4830
PB
3999 dest |= 1;
4000 gen_bx_im(s, dest);
8aaca4c0 4001 } else {
6e256c93 4002 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
4003 s->is_jmp = DISAS_TB_JUMP;
4004 }
4005}
4006
39d5492a 4007static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 4008{
ee097184 4009 if (x)
d9ba4830 4010 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 4011 else
d9ba4830 4012 gen_sxth(t0);
ee097184 4013 if (y)
d9ba4830 4014 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 4015 else
d9ba4830
PB
4016 gen_sxth(t1);
4017 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
4018}
4019
4020/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
4021static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4022{
b5ff1b31
FB
4023 uint32_t mask;
4024
4025 mask = 0;
4026 if (flags & (1 << 0))
4027 mask |= 0xff;
4028 if (flags & (1 << 1))
4029 mask |= 0xff00;
4030 if (flags & (1 << 2))
4031 mask |= 0xff0000;
4032 if (flags & (1 << 3))
4033 mask |= 0xff000000;
9ee6e8bb 4034
2ae23e75 4035 /* Mask out undefined bits. */
9ee6e8bb 4036 mask &= ~CPSR_RESERVED;
d614a513 4037 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4038 mask &= ~CPSR_T;
d614a513
PM
4039 }
4040 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4041 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4042 }
4043 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4044 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4045 }
4046 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4047 mask &= ~CPSR_IT;
d614a513 4048 }
4051e12c
PM
4049 /* Mask out execution state and reserved bits. */
4050 if (!spsr) {
4051 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4052 }
b5ff1b31
FB
4053 /* Mask out privileged bits. */
4054 if (IS_USER(s))
9ee6e8bb 4055 mask &= CPSR_USER;
b5ff1b31
FB
4056 return mask;
4057}
4058
2fbac54b 4059/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4060static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4061{
39d5492a 4062 TCGv_i32 tmp;
b5ff1b31
FB
4063 if (spsr) {
4064 /* ??? This is also undefined in system mode. */
4065 if (IS_USER(s))
4066 return 1;
d9ba4830
PB
4067
4068 tmp = load_cpu_field(spsr);
4069 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4070 tcg_gen_andi_i32(t0, t0, mask);
4071 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4072 store_cpu_field(tmp, spsr);
b5ff1b31 4073 } else {
2fbac54b 4074 gen_set_cpsr(t0, mask);
b5ff1b31 4075 }
7d1b0095 4076 tcg_temp_free_i32(t0);
b5ff1b31
FB
4077 gen_lookup_tb(s);
4078 return 0;
4079}
4080
2fbac54b
FN
4081/* Returns nonzero if access to the PSR is not permitted. */
4082static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4083{
39d5492a 4084 TCGv_i32 tmp;
7d1b0095 4085 tmp = tcg_temp_new_i32();
2fbac54b
FN
4086 tcg_gen_movi_i32(tmp, val);
4087 return gen_set_psr(s, mask, spsr, tmp);
4088}
4089
e9bb4aa9 4090/* Generate an old-style exception return. Marks pc as dead. */
39d5492a 4091static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4092{
39d5492a 4093 TCGv_i32 tmp;
e9bb4aa9 4094 store_reg(s, 15, pc);
d9ba4830 4095 tmp = load_cpu_field(spsr);
4051e12c 4096 gen_set_cpsr(tmp, CPSR_ERET_MASK);
7d1b0095 4097 tcg_temp_free_i32(tmp);
577bf808 4098 s->is_jmp = DISAS_JUMP;
b5ff1b31
FB
4099}
4100
b0109805 4101/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4102static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4103{
4051e12c 4104 gen_set_cpsr(cpsr, CPSR_ERET_MASK);
7d1b0095 4105 tcg_temp_free_i32(cpsr);
b0109805 4106 store_reg(s, 15, pc);
577bf808 4107 s->is_jmp = DISAS_JUMP;
9ee6e8bb 4108}
3b46e624 4109
9ee6e8bb
PB
4110static void gen_nop_hint(DisasContext *s, int val)
4111{
4112 switch (val) {
c87e5a61
PM
4113 case 1: /* yield */
4114 gen_set_pc_im(s, s->pc);
4115 s->is_jmp = DISAS_YIELD;
4116 break;
9ee6e8bb 4117 case 3: /* wfi */
eaed129d 4118 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
4119 s->is_jmp = DISAS_WFI;
4120 break;
4121 case 2: /* wfe */
72c1d3af
PM
4122 gen_set_pc_im(s, s->pc);
4123 s->is_jmp = DISAS_WFE;
4124 break;
9ee6e8bb 4125 case 4: /* sev */
12b10571
MR
4126 case 5: /* sevl */
4127 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4128 default: /* nop */
4129 break;
4130 }
4131}
99c475ab 4132
ad69471c 4133#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4134
39d5492a 4135static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4136{
4137 switch (size) {
dd8fbd78
FN
4138 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4139 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4140 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4141 default: abort();
9ee6e8bb 4142 }
9ee6e8bb
PB
4143}
4144
39d5492a 4145static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4146{
4147 switch (size) {
dd8fbd78
FN
4148 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4149 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4150 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4151 default: return;
4152 }
4153}
4154
4155/* 32-bit pairwise ops end up the same as the elementwise versions. */
4156#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4157#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4158#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4159#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4160
ad69471c
PB
4161#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4162 switch ((size << 1) | u) { \
4163 case 0: \
dd8fbd78 4164 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4165 break; \
4166 case 1: \
dd8fbd78 4167 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4168 break; \
4169 case 2: \
dd8fbd78 4170 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4171 break; \
4172 case 3: \
dd8fbd78 4173 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4174 break; \
4175 case 4: \
dd8fbd78 4176 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4177 break; \
4178 case 5: \
dd8fbd78 4179 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4180 break; \
4181 default: return 1; \
4182 }} while (0)
9ee6e8bb
PB
4183
4184#define GEN_NEON_INTEGER_OP(name) do { \
4185 switch ((size << 1) | u) { \
ad69471c 4186 case 0: \
dd8fbd78 4187 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4188 break; \
4189 case 1: \
dd8fbd78 4190 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4191 break; \
4192 case 2: \
dd8fbd78 4193 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4194 break; \
4195 case 3: \
dd8fbd78 4196 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4197 break; \
4198 case 4: \
dd8fbd78 4199 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4200 break; \
4201 case 5: \
dd8fbd78 4202 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4203 break; \
9ee6e8bb
PB
4204 default: return 1; \
4205 }} while (0)
4206
39d5492a 4207static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4208{
39d5492a 4209 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4210 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4211 return tmp;
9ee6e8bb
PB
4212}
4213
39d5492a 4214static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4215{
dd8fbd78 4216 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4217 tcg_temp_free_i32(var);
9ee6e8bb
PB
4218}
4219
39d5492a 4220static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4221{
39d5492a 4222 TCGv_i32 tmp;
9ee6e8bb 4223 if (size == 1) {
0fad6efc
PM
4224 tmp = neon_load_reg(reg & 7, reg >> 4);
4225 if (reg & 8) {
dd8fbd78 4226 gen_neon_dup_high16(tmp);
0fad6efc
PM
4227 } else {
4228 gen_neon_dup_low16(tmp);
dd8fbd78 4229 }
0fad6efc
PM
4230 } else {
4231 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4232 }
dd8fbd78 4233 return tmp;
9ee6e8bb
PB
4234}
4235
02acedf9 4236static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4237{
39d5492a 4238 TCGv_i32 tmp, tmp2;
600b828c 4239 if (!q && size == 2) {
02acedf9
PM
4240 return 1;
4241 }
4242 tmp = tcg_const_i32(rd);
4243 tmp2 = tcg_const_i32(rm);
4244 if (q) {
4245 switch (size) {
4246 case 0:
02da0b2d 4247 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4248 break;
4249 case 1:
02da0b2d 4250 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4251 break;
4252 case 2:
02da0b2d 4253 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
4254 break;
4255 default:
4256 abort();
4257 }
4258 } else {
4259 switch (size) {
4260 case 0:
02da0b2d 4261 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4262 break;
4263 case 1:
02da0b2d 4264 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4265 break;
4266 default:
4267 abort();
4268 }
4269 }
4270 tcg_temp_free_i32(tmp);
4271 tcg_temp_free_i32(tmp2);
4272 return 0;
19457615
FN
4273}
4274
d68a6f3a 4275static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4276{
39d5492a 4277 TCGv_i32 tmp, tmp2;
600b828c 4278 if (!q && size == 2) {
d68a6f3a
PM
4279 return 1;
4280 }
4281 tmp = tcg_const_i32(rd);
4282 tmp2 = tcg_const_i32(rm);
4283 if (q) {
4284 switch (size) {
4285 case 0:
02da0b2d 4286 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4287 break;
4288 case 1:
02da0b2d 4289 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4290 break;
4291 case 2:
02da0b2d 4292 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
4293 break;
4294 default:
4295 abort();
4296 }
4297 } else {
4298 switch (size) {
4299 case 0:
02da0b2d 4300 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4301 break;
4302 case 1:
02da0b2d 4303 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4304 break;
4305 default:
4306 abort();
4307 }
4308 }
4309 tcg_temp_free_i32(tmp);
4310 tcg_temp_free_i32(tmp2);
4311 return 0;
19457615
FN
4312}
4313
39d5492a 4314static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4315{
39d5492a 4316 TCGv_i32 rd, tmp;
19457615 4317
7d1b0095
PM
4318 rd = tcg_temp_new_i32();
4319 tmp = tcg_temp_new_i32();
19457615
FN
4320
4321 tcg_gen_shli_i32(rd, t0, 8);
4322 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4323 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4324 tcg_gen_or_i32(rd, rd, tmp);
4325
4326 tcg_gen_shri_i32(t1, t1, 8);
4327 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4328 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4329 tcg_gen_or_i32(t1, t1, tmp);
4330 tcg_gen_mov_i32(t0, rd);
4331
7d1b0095
PM
4332 tcg_temp_free_i32(tmp);
4333 tcg_temp_free_i32(rd);
19457615
FN
4334}
4335
39d5492a 4336static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4337{
39d5492a 4338 TCGv_i32 rd, tmp;
19457615 4339
7d1b0095
PM
4340 rd = tcg_temp_new_i32();
4341 tmp = tcg_temp_new_i32();
19457615
FN
4342
4343 tcg_gen_shli_i32(rd, t0, 16);
4344 tcg_gen_andi_i32(tmp, t1, 0xffff);
4345 tcg_gen_or_i32(rd, rd, tmp);
4346 tcg_gen_shri_i32(t1, t1, 16);
4347 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4348 tcg_gen_or_i32(t1, t1, tmp);
4349 tcg_gen_mov_i32(t0, rd);
4350
7d1b0095
PM
4351 tcg_temp_free_i32(tmp);
4352 tcg_temp_free_i32(rd);
19457615
FN
4353}
4354
4355
9ee6e8bb
PB
4356static struct {
4357 int nregs;
4358 int interleave;
4359 int spacing;
4360} neon_ls_element_type[11] = {
4361 {4, 4, 1},
4362 {4, 4, 2},
4363 {4, 1, 1},
4364 {4, 2, 1},
4365 {3, 3, 1},
4366 {3, 3, 2},
4367 {3, 1, 1},
4368 {1, 1, 1},
4369 {2, 2, 1},
4370 {2, 2, 2},
4371 {2, 1, 1}
4372};
4373
4374/* Translate a NEON load/store element instruction. Return nonzero if the
4375 instruction is invalid. */
7dcc1f89 4376static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4377{
4378 int rd, rn, rm;
4379 int op;
4380 int nregs;
4381 int interleave;
84496233 4382 int spacing;
9ee6e8bb
PB
4383 int stride;
4384 int size;
4385 int reg;
4386 int pass;
4387 int load;
4388 int shift;
9ee6e8bb 4389 int n;
39d5492a
PM
4390 TCGv_i32 addr;
4391 TCGv_i32 tmp;
4392 TCGv_i32 tmp2;
84496233 4393 TCGv_i64 tmp64;
9ee6e8bb 4394
2c7ffc41
PM
4395 /* FIXME: this access check should not take precedence over UNDEF
4396 * for invalid encodings; we will generate incorrect syndrome information
4397 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4398 */
9dbbc748 4399 if (s->fp_excp_el) {
2c7ffc41 4400 gen_exception_insn(s, 4, EXCP_UDEF,
9dbbc748 4401 syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
2c7ffc41
PM
4402 return 0;
4403 }
4404
5df8bac1 4405 if (!s->vfp_enabled)
9ee6e8bb
PB
4406 return 1;
4407 VFP_DREG_D(rd, insn);
4408 rn = (insn >> 16) & 0xf;
4409 rm = insn & 0xf;
4410 load = (insn & (1 << 21)) != 0;
4411 if ((insn & (1 << 23)) == 0) {
4412 /* Load store all elements. */
4413 op = (insn >> 8) & 0xf;
4414 size = (insn >> 6) & 3;
84496233 4415 if (op > 10)
9ee6e8bb 4416 return 1;
f2dd89d0
PM
4417 /* Catch UNDEF cases for bad values of align field */
4418 switch (op & 0xc) {
4419 case 4:
4420 if (((insn >> 5) & 1) == 1) {
4421 return 1;
4422 }
4423 break;
4424 case 8:
4425 if (((insn >> 4) & 3) == 3) {
4426 return 1;
4427 }
4428 break;
4429 default:
4430 break;
4431 }
9ee6e8bb
PB
4432 nregs = neon_ls_element_type[op].nregs;
4433 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4434 spacing = neon_ls_element_type[op].spacing;
4435 if (size == 3 && (interleave | spacing) != 1)
4436 return 1;
e318a60b 4437 addr = tcg_temp_new_i32();
dcc65026 4438 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4439 stride = (1 << size) * interleave;
4440 for (reg = 0; reg < nregs; reg++) {
4441 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4442 load_reg_var(s, addr, rn);
4443 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4444 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4445 load_reg_var(s, addr, rn);
4446 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4447 }
84496233 4448 if (size == 3) {
8ed1237d 4449 tmp64 = tcg_temp_new_i64();
84496233 4450 if (load) {
6ce2faf4 4451 gen_aa32_ld64(tmp64, addr, get_mem_index(s));
84496233 4452 neon_store_reg64(tmp64, rd);
84496233 4453 } else {
84496233 4454 neon_load_reg64(tmp64, rd);
6ce2faf4 4455 gen_aa32_st64(tmp64, addr, get_mem_index(s));
84496233 4456 }
8ed1237d 4457 tcg_temp_free_i64(tmp64);
84496233
JR
4458 tcg_gen_addi_i32(addr, addr, stride);
4459 } else {
4460 for (pass = 0; pass < 2; pass++) {
4461 if (size == 2) {
4462 if (load) {
58ab8e96 4463 tmp = tcg_temp_new_i32();
6ce2faf4 4464 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
84496233
JR
4465 neon_store_reg(rd, pass, tmp);
4466 } else {
4467 tmp = neon_load_reg(rd, pass);
6ce2faf4 4468 gen_aa32_st32(tmp, addr, get_mem_index(s));
58ab8e96 4469 tcg_temp_free_i32(tmp);
84496233 4470 }
1b2b1e54 4471 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4472 } else if (size == 1) {
4473 if (load) {
58ab8e96 4474 tmp = tcg_temp_new_i32();
6ce2faf4 4475 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
84496233 4476 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4477 tmp2 = tcg_temp_new_i32();
6ce2faf4 4478 gen_aa32_ld16u(tmp2, addr, get_mem_index(s));
84496233 4479 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4480 tcg_gen_shli_i32(tmp2, tmp2, 16);
4481 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4482 tcg_temp_free_i32(tmp2);
84496233
JR
4483 neon_store_reg(rd, pass, tmp);
4484 } else {
4485 tmp = neon_load_reg(rd, pass);
7d1b0095 4486 tmp2 = tcg_temp_new_i32();
84496233 4487 tcg_gen_shri_i32(tmp2, tmp, 16);
6ce2faf4 4488 gen_aa32_st16(tmp, addr, get_mem_index(s));
58ab8e96 4489 tcg_temp_free_i32(tmp);
84496233 4490 tcg_gen_addi_i32(addr, addr, stride);
6ce2faf4 4491 gen_aa32_st16(tmp2, addr, get_mem_index(s));
58ab8e96 4492 tcg_temp_free_i32(tmp2);
1b2b1e54 4493 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4494 }
84496233
JR
4495 } else /* size == 0 */ {
4496 if (load) {
39d5492a 4497 TCGV_UNUSED_I32(tmp2);
84496233 4498 for (n = 0; n < 4; n++) {
58ab8e96 4499 tmp = tcg_temp_new_i32();
6ce2faf4 4500 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
84496233
JR
4501 tcg_gen_addi_i32(addr, addr, stride);
4502 if (n == 0) {
4503 tmp2 = tmp;
4504 } else {
41ba8341
PB
4505 tcg_gen_shli_i32(tmp, tmp, n * 8);
4506 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4507 tcg_temp_free_i32(tmp);
84496233 4508 }
9ee6e8bb 4509 }
84496233
JR
4510 neon_store_reg(rd, pass, tmp2);
4511 } else {
4512 tmp2 = neon_load_reg(rd, pass);
4513 for (n = 0; n < 4; n++) {
7d1b0095 4514 tmp = tcg_temp_new_i32();
84496233
JR
4515 if (n == 0) {
4516 tcg_gen_mov_i32(tmp, tmp2);
4517 } else {
4518 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4519 }
6ce2faf4 4520 gen_aa32_st8(tmp, addr, get_mem_index(s));
58ab8e96 4521 tcg_temp_free_i32(tmp);
84496233
JR
4522 tcg_gen_addi_i32(addr, addr, stride);
4523 }
7d1b0095 4524 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4525 }
4526 }
4527 }
4528 }
84496233 4529 rd += spacing;
9ee6e8bb 4530 }
e318a60b 4531 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4532 stride = nregs * 8;
4533 } else {
4534 size = (insn >> 10) & 3;
4535 if (size == 3) {
4536 /* Load single element to all lanes. */
8e18cde3
PM
4537 int a = (insn >> 4) & 1;
4538 if (!load) {
9ee6e8bb 4539 return 1;
8e18cde3 4540 }
9ee6e8bb
PB
4541 size = (insn >> 6) & 3;
4542 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4543
4544 if (size == 3) {
4545 if (nregs != 4 || a == 0) {
9ee6e8bb 4546 return 1;
99c475ab 4547 }
8e18cde3
PM
4548 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4549 size = 2;
4550 }
4551 if (nregs == 1 && a == 1 && size == 0) {
4552 return 1;
4553 }
4554 if (nregs == 3 && a == 1) {
4555 return 1;
4556 }
e318a60b 4557 addr = tcg_temp_new_i32();
8e18cde3
PM
4558 load_reg_var(s, addr, rn);
4559 if (nregs == 1) {
4560 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4561 tmp = gen_load_and_replicate(s, addr, size);
4562 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4563 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4564 if (insn & (1 << 5)) {
4565 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4566 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4567 }
4568 tcg_temp_free_i32(tmp);
4569 } else {
4570 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4571 stride = (insn & (1 << 5)) ? 2 : 1;
4572 for (reg = 0; reg < nregs; reg++) {
4573 tmp = gen_load_and_replicate(s, addr, size);
4574 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4575 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4576 tcg_temp_free_i32(tmp);
4577 tcg_gen_addi_i32(addr, addr, 1 << size);
4578 rd += stride;
4579 }
9ee6e8bb 4580 }
e318a60b 4581 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4582 stride = (1 << size) * nregs;
4583 } else {
4584 /* Single element. */
93262b16 4585 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4586 pass = (insn >> 7) & 1;
4587 switch (size) {
4588 case 0:
4589 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4590 stride = 1;
4591 break;
4592 case 1:
4593 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4594 stride = (insn & (1 << 5)) ? 2 : 1;
4595 break;
4596 case 2:
4597 shift = 0;
9ee6e8bb
PB
4598 stride = (insn & (1 << 6)) ? 2 : 1;
4599 break;
4600 default:
4601 abort();
4602 }
4603 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4604 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4605 switch (nregs) {
4606 case 1:
4607 if (((idx & (1 << size)) != 0) ||
4608 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4609 return 1;
4610 }
4611 break;
4612 case 3:
4613 if ((idx & 1) != 0) {
4614 return 1;
4615 }
4616 /* fall through */
4617 case 2:
4618 if (size == 2 && (idx & 2) != 0) {
4619 return 1;
4620 }
4621 break;
4622 case 4:
4623 if ((size == 2) && ((idx & 3) == 3)) {
4624 return 1;
4625 }
4626 break;
4627 default:
4628 abort();
4629 }
4630 if ((rd + stride * (nregs - 1)) > 31) {
4631 /* Attempts to write off the end of the register file
4632 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4633 * the neon_load_reg() would write off the end of the array.
4634 */
4635 return 1;
4636 }
e318a60b 4637 addr = tcg_temp_new_i32();
dcc65026 4638 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4639 for (reg = 0; reg < nregs; reg++) {
4640 if (load) {
58ab8e96 4641 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
4642 switch (size) {
4643 case 0:
6ce2faf4 4644 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4645 break;
4646 case 1:
6ce2faf4 4647 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4648 break;
4649 case 2:
6ce2faf4 4650 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9ee6e8bb 4651 break;
a50f5b91
PB
4652 default: /* Avoid compiler warnings. */
4653 abort();
9ee6e8bb
PB
4654 }
4655 if (size != 2) {
8f8e3aa4 4656 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4657 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4658 shift, size ? 16 : 8);
7d1b0095 4659 tcg_temp_free_i32(tmp2);
9ee6e8bb 4660 }
8f8e3aa4 4661 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4662 } else { /* Store */
8f8e3aa4
PB
4663 tmp = neon_load_reg(rd, pass);
4664 if (shift)
4665 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4666 switch (size) {
4667 case 0:
6ce2faf4 4668 gen_aa32_st8(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4669 break;
4670 case 1:
6ce2faf4 4671 gen_aa32_st16(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4672 break;
4673 case 2:
6ce2faf4 4674 gen_aa32_st32(tmp, addr, get_mem_index(s));
9ee6e8bb 4675 break;
99c475ab 4676 }
58ab8e96 4677 tcg_temp_free_i32(tmp);
99c475ab 4678 }
9ee6e8bb 4679 rd += stride;
1b2b1e54 4680 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4681 }
e318a60b 4682 tcg_temp_free_i32(addr);
9ee6e8bb 4683 stride = nregs * (1 << size);
99c475ab 4684 }
9ee6e8bb
PB
4685 }
4686 if (rm != 15) {
39d5492a 4687 TCGv_i32 base;
b26eefb6
PB
4688
4689 base = load_reg(s, rn);
9ee6e8bb 4690 if (rm == 13) {
b26eefb6 4691 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4692 } else {
39d5492a 4693 TCGv_i32 index;
b26eefb6
PB
4694 index = load_reg(s, rm);
4695 tcg_gen_add_i32(base, base, index);
7d1b0095 4696 tcg_temp_free_i32(index);
9ee6e8bb 4697 }
b26eefb6 4698 store_reg(s, rn, base);
9ee6e8bb
PB
4699 }
4700 return 0;
4701}
3b46e624 4702
8f8e3aa4 4703/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 4704static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
4705{
4706 tcg_gen_and_i32(t, t, c);
f669df27 4707 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4708 tcg_gen_or_i32(dest, t, f);
4709}
4710
39d5492a 4711static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4712{
4713 switch (size) {
4714 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4715 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 4716 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
4717 default: abort();
4718 }
4719}
4720
39d5492a 4721static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4722{
4723 switch (size) {
02da0b2d
PM
4724 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4725 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4726 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4727 default: abort();
4728 }
4729}
4730
39d5492a 4731static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4732{
4733 switch (size) {
02da0b2d
PM
4734 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4735 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4736 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4737 default: abort();
4738 }
4739}
4740
39d5492a 4741static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
4742{
4743 switch (size) {
02da0b2d
PM
4744 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4745 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4746 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4747 default: abort();
4748 }
4749}
4750
39d5492a 4751static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
4752 int q, int u)
4753{
4754 if (q) {
4755 if (u) {
4756 switch (size) {
4757 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4758 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4759 default: abort();
4760 }
4761 } else {
4762 switch (size) {
4763 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4764 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4765 default: abort();
4766 }
4767 }
4768 } else {
4769 if (u) {
4770 switch (size) {
b408a9b0
CL
4771 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4772 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4773 default: abort();
4774 }
4775 } else {
4776 switch (size) {
4777 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4778 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4779 default: abort();
4780 }
4781 }
4782 }
4783}
4784
39d5492a 4785static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
4786{
4787 if (u) {
4788 switch (size) {
4789 case 0: gen_helper_neon_widen_u8(dest, src); break;
4790 case 1: gen_helper_neon_widen_u16(dest, src); break;
4791 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4792 default: abort();
4793 }
4794 } else {
4795 switch (size) {
4796 case 0: gen_helper_neon_widen_s8(dest, src); break;
4797 case 1: gen_helper_neon_widen_s16(dest, src); break;
4798 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4799 default: abort();
4800 }
4801 }
7d1b0095 4802 tcg_temp_free_i32(src);
ad69471c
PB
4803}
4804
4805static inline void gen_neon_addl(int size)
4806{
4807 switch (size) {
4808 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4809 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4810 case 2: tcg_gen_add_i64(CPU_V001); break;
4811 default: abort();
4812 }
4813}
4814
4815static inline void gen_neon_subl(int size)
4816{
4817 switch (size) {
4818 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4819 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4820 case 2: tcg_gen_sub_i64(CPU_V001); break;
4821 default: abort();
4822 }
4823}
4824
a7812ae4 4825static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4826{
4827 switch (size) {
4828 case 0: gen_helper_neon_negl_u16(var, var); break;
4829 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
4830 case 2:
4831 tcg_gen_neg_i64(var, var);
4832 break;
ad69471c
PB
4833 default: abort();
4834 }
4835}
4836
a7812ae4 4837static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4838{
4839 switch (size) {
02da0b2d
PM
4840 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4841 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4842 default: abort();
4843 }
4844}
4845
39d5492a
PM
4846static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4847 int size, int u)
ad69471c 4848{
a7812ae4 4849 TCGv_i64 tmp;
ad69471c
PB
4850
4851 switch ((size << 1) | u) {
4852 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4853 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4854 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4855 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4856 case 4:
4857 tmp = gen_muls_i64_i32(a, b);
4858 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4859 tcg_temp_free_i64(tmp);
ad69471c
PB
4860 break;
4861 case 5:
4862 tmp = gen_mulu_i64_i32(a, b);
4863 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4864 tcg_temp_free_i64(tmp);
ad69471c
PB
4865 break;
4866 default: abort();
4867 }
c6067f04
CL
4868
4869 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4870 Don't forget to clean them now. */
4871 if (size < 2) {
7d1b0095
PM
4872 tcg_temp_free_i32(a);
4873 tcg_temp_free_i32(b);
c6067f04 4874 }
ad69471c
PB
4875}
4876
39d5492a
PM
4877static void gen_neon_narrow_op(int op, int u, int size,
4878 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
4879{
4880 if (op) {
4881 if (u) {
4882 gen_neon_unarrow_sats(size, dest, src);
4883 } else {
4884 gen_neon_narrow(size, dest, src);
4885 }
4886 } else {
4887 if (u) {
4888 gen_neon_narrow_satu(size, dest, src);
4889 } else {
4890 gen_neon_narrow_sats(size, dest, src);
4891 }
4892 }
4893}
4894
62698be3
PM
4895/* Symbolic constants for op fields for Neon 3-register same-length.
4896 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4897 * table A7-9.
4898 */
4899#define NEON_3R_VHADD 0
4900#define NEON_3R_VQADD 1
4901#define NEON_3R_VRHADD 2
4902#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4903#define NEON_3R_VHSUB 4
4904#define NEON_3R_VQSUB 5
4905#define NEON_3R_VCGT 6
4906#define NEON_3R_VCGE 7
4907#define NEON_3R_VSHL 8
4908#define NEON_3R_VQSHL 9
4909#define NEON_3R_VRSHL 10
4910#define NEON_3R_VQRSHL 11
4911#define NEON_3R_VMAX 12
4912#define NEON_3R_VMIN 13
4913#define NEON_3R_VABD 14
4914#define NEON_3R_VABA 15
4915#define NEON_3R_VADD_VSUB 16
4916#define NEON_3R_VTST_VCEQ 17
4917#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4918#define NEON_3R_VMUL 19
4919#define NEON_3R_VPMAX 20
4920#define NEON_3R_VPMIN 21
4921#define NEON_3R_VQDMULH_VQRDMULH 22
4922#define NEON_3R_VPADD 23
f1ecb913 4923#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
da97f52c 4924#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
4925#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4926#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4927#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4928#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4929#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 4930#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
4931
4932static const uint8_t neon_3r_sizes[] = {
4933 [NEON_3R_VHADD] = 0x7,
4934 [NEON_3R_VQADD] = 0xf,
4935 [NEON_3R_VRHADD] = 0x7,
4936 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4937 [NEON_3R_VHSUB] = 0x7,
4938 [NEON_3R_VQSUB] = 0xf,
4939 [NEON_3R_VCGT] = 0x7,
4940 [NEON_3R_VCGE] = 0x7,
4941 [NEON_3R_VSHL] = 0xf,
4942 [NEON_3R_VQSHL] = 0xf,
4943 [NEON_3R_VRSHL] = 0xf,
4944 [NEON_3R_VQRSHL] = 0xf,
4945 [NEON_3R_VMAX] = 0x7,
4946 [NEON_3R_VMIN] = 0x7,
4947 [NEON_3R_VABD] = 0x7,
4948 [NEON_3R_VABA] = 0x7,
4949 [NEON_3R_VADD_VSUB] = 0xf,
4950 [NEON_3R_VTST_VCEQ] = 0x7,
4951 [NEON_3R_VML] = 0x7,
4952 [NEON_3R_VMUL] = 0x7,
4953 [NEON_3R_VPMAX] = 0x7,
4954 [NEON_3R_VPMIN] = 0x7,
4955 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4956 [NEON_3R_VPADD] = 0x7,
f1ecb913 4957 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
da97f52c 4958 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4959 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4960 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4961 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4962 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4963 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 4964 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4965};
4966
600b828c
PM
4967/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4968 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4969 * table A7-13.
4970 */
4971#define NEON_2RM_VREV64 0
4972#define NEON_2RM_VREV32 1
4973#define NEON_2RM_VREV16 2
4974#define NEON_2RM_VPADDL 4
4975#define NEON_2RM_VPADDL_U 5
9d935509
AB
4976#define NEON_2RM_AESE 6 /* Includes AESD */
4977#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
4978#define NEON_2RM_VCLS 8
4979#define NEON_2RM_VCLZ 9
4980#define NEON_2RM_VCNT 10
4981#define NEON_2RM_VMVN 11
4982#define NEON_2RM_VPADAL 12
4983#define NEON_2RM_VPADAL_U 13
4984#define NEON_2RM_VQABS 14
4985#define NEON_2RM_VQNEG 15
4986#define NEON_2RM_VCGT0 16
4987#define NEON_2RM_VCGE0 17
4988#define NEON_2RM_VCEQ0 18
4989#define NEON_2RM_VCLE0 19
4990#define NEON_2RM_VCLT0 20
f1ecb913 4991#define NEON_2RM_SHA1H 21
600b828c
PM
4992#define NEON_2RM_VABS 22
4993#define NEON_2RM_VNEG 23
4994#define NEON_2RM_VCGT0_F 24
4995#define NEON_2RM_VCGE0_F 25
4996#define NEON_2RM_VCEQ0_F 26
4997#define NEON_2RM_VCLE0_F 27
4998#define NEON_2RM_VCLT0_F 28
4999#define NEON_2RM_VABS_F 30
5000#define NEON_2RM_VNEG_F 31
5001#define NEON_2RM_VSWP 32
5002#define NEON_2RM_VTRN 33
5003#define NEON_2RM_VUZP 34
5004#define NEON_2RM_VZIP 35
5005#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5006#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5007#define NEON_2RM_VSHLL 38
f1ecb913 5008#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 5009#define NEON_2RM_VRINTN 40
2ce70625 5010#define NEON_2RM_VRINTX 41
34f7b0a2
WN
5011#define NEON_2RM_VRINTA 42
5012#define NEON_2RM_VRINTZ 43
600b828c 5013#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 5014#define NEON_2RM_VRINTM 45
600b828c 5015#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 5016#define NEON_2RM_VRINTP 47
901ad525
WN
5017#define NEON_2RM_VCVTAU 48
5018#define NEON_2RM_VCVTAS 49
5019#define NEON_2RM_VCVTNU 50
5020#define NEON_2RM_VCVTNS 51
5021#define NEON_2RM_VCVTPU 52
5022#define NEON_2RM_VCVTPS 53
5023#define NEON_2RM_VCVTMU 54
5024#define NEON_2RM_VCVTMS 55
600b828c
PM
5025#define NEON_2RM_VRECPE 56
5026#define NEON_2RM_VRSQRTE 57
5027#define NEON_2RM_VRECPE_F 58
5028#define NEON_2RM_VRSQRTE_F 59
5029#define NEON_2RM_VCVT_FS 60
5030#define NEON_2RM_VCVT_FU 61
5031#define NEON_2RM_VCVT_SF 62
5032#define NEON_2RM_VCVT_UF 63
5033
5034static int neon_2rm_is_float_op(int op)
5035{
5036 /* Return true if this neon 2reg-misc op is float-to-float */
5037 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5038 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5039 op == NEON_2RM_VRINTM ||
5040 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5041 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5042}
5043
5044/* Each entry in this array has bit n set if the insn allows
5045 * size value n (otherwise it will UNDEF). Since unallocated
5046 * op values will have no bits set they always UNDEF.
5047 */
5048static const uint8_t neon_2rm_sizes[] = {
5049 [NEON_2RM_VREV64] = 0x7,
5050 [NEON_2RM_VREV32] = 0x3,
5051 [NEON_2RM_VREV16] = 0x1,
5052 [NEON_2RM_VPADDL] = 0x7,
5053 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5054 [NEON_2RM_AESE] = 0x1,
5055 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5056 [NEON_2RM_VCLS] = 0x7,
5057 [NEON_2RM_VCLZ] = 0x7,
5058 [NEON_2RM_VCNT] = 0x1,
5059 [NEON_2RM_VMVN] = 0x1,
5060 [NEON_2RM_VPADAL] = 0x7,
5061 [NEON_2RM_VPADAL_U] = 0x7,
5062 [NEON_2RM_VQABS] = 0x7,
5063 [NEON_2RM_VQNEG] = 0x7,
5064 [NEON_2RM_VCGT0] = 0x7,
5065 [NEON_2RM_VCGE0] = 0x7,
5066 [NEON_2RM_VCEQ0] = 0x7,
5067 [NEON_2RM_VCLE0] = 0x7,
5068 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5069 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5070 [NEON_2RM_VABS] = 0x7,
5071 [NEON_2RM_VNEG] = 0x7,
5072 [NEON_2RM_VCGT0_F] = 0x4,
5073 [NEON_2RM_VCGE0_F] = 0x4,
5074 [NEON_2RM_VCEQ0_F] = 0x4,
5075 [NEON_2RM_VCLE0_F] = 0x4,
5076 [NEON_2RM_VCLT0_F] = 0x4,
5077 [NEON_2RM_VABS_F] = 0x4,
5078 [NEON_2RM_VNEG_F] = 0x4,
5079 [NEON_2RM_VSWP] = 0x1,
5080 [NEON_2RM_VTRN] = 0x7,
5081 [NEON_2RM_VUZP] = 0x7,
5082 [NEON_2RM_VZIP] = 0x7,
5083 [NEON_2RM_VMOVN] = 0x7,
5084 [NEON_2RM_VQMOVN] = 0x7,
5085 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5086 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5087 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5088 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5089 [NEON_2RM_VRINTA] = 0x4,
5090 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5091 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5092 [NEON_2RM_VRINTM] = 0x4,
600b828c 5093 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5094 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5095 [NEON_2RM_VCVTAU] = 0x4,
5096 [NEON_2RM_VCVTAS] = 0x4,
5097 [NEON_2RM_VCVTNU] = 0x4,
5098 [NEON_2RM_VCVTNS] = 0x4,
5099 [NEON_2RM_VCVTPU] = 0x4,
5100 [NEON_2RM_VCVTPS] = 0x4,
5101 [NEON_2RM_VCVTMU] = 0x4,
5102 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5103 [NEON_2RM_VRECPE] = 0x4,
5104 [NEON_2RM_VRSQRTE] = 0x4,
5105 [NEON_2RM_VRECPE_F] = 0x4,
5106 [NEON_2RM_VRSQRTE_F] = 0x4,
5107 [NEON_2RM_VCVT_FS] = 0x4,
5108 [NEON_2RM_VCVT_FU] = 0x4,
5109 [NEON_2RM_VCVT_SF] = 0x4,
5110 [NEON_2RM_VCVT_UF] = 0x4,
5111};
5112
9ee6e8bb
PB
5113/* Translate a NEON data processing instruction. Return nonzero if the
5114 instruction is invalid.
ad69471c
PB
5115 We process data in a mixture of 32-bit and 64-bit chunks.
5116 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 5117
7dcc1f89 5118static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5119{
5120 int op;
5121 int q;
5122 int rd, rn, rm;
5123 int size;
5124 int shift;
5125 int pass;
5126 int count;
5127 int pairwise;
5128 int u;
ca9a32e4 5129 uint32_t imm, mask;
39d5492a 5130 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 5131 TCGv_i64 tmp64;
9ee6e8bb 5132
2c7ffc41
PM
5133 /* FIXME: this access check should not take precedence over UNDEF
5134 * for invalid encodings; we will generate incorrect syndrome information
5135 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5136 */
9dbbc748 5137 if (s->fp_excp_el) {
2c7ffc41 5138 gen_exception_insn(s, 4, EXCP_UDEF,
9dbbc748 5139 syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
2c7ffc41
PM
5140 return 0;
5141 }
5142
5df8bac1 5143 if (!s->vfp_enabled)
9ee6e8bb
PB
5144 return 1;
5145 q = (insn & (1 << 6)) != 0;
5146 u = (insn >> 24) & 1;
5147 VFP_DREG_D(rd, insn);
5148 VFP_DREG_N(rn, insn);
5149 VFP_DREG_M(rm, insn);
5150 size = (insn >> 20) & 3;
5151 if ((insn & (1 << 23)) == 0) {
5152 /* Three register same length. */
5153 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5154 /* Catch invalid op and bad size combinations: UNDEF */
5155 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5156 return 1;
5157 }
25f84f79
PM
5158 /* All insns of this form UNDEF for either this condition or the
5159 * superset of cases "Q==1"; we catch the latter later.
5160 */
5161 if (q && ((rd | rn | rm) & 1)) {
5162 return 1;
5163 }
f1ecb913
AB
5164 /*
5165 * The SHA-1/SHA-256 3-register instructions require special treatment
5166 * here, as their size field is overloaded as an op type selector, and
5167 * they all consume their input in a single pass.
5168 */
5169 if (op == NEON_3R_SHA) {
5170 if (!q) {
5171 return 1;
5172 }
5173 if (!u) { /* SHA-1 */
d614a513 5174 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
5175 return 1;
5176 }
5177 tmp = tcg_const_i32(rd);
5178 tmp2 = tcg_const_i32(rn);
5179 tmp3 = tcg_const_i32(rm);
5180 tmp4 = tcg_const_i32(size);
5181 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5182 tcg_temp_free_i32(tmp4);
5183 } else { /* SHA-256 */
d614a513 5184 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
f1ecb913
AB
5185 return 1;
5186 }
5187 tmp = tcg_const_i32(rd);
5188 tmp2 = tcg_const_i32(rn);
5189 tmp3 = tcg_const_i32(rm);
5190 switch (size) {
5191 case 0:
5192 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5193 break;
5194 case 1:
5195 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5196 break;
5197 case 2:
5198 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5199 break;
5200 }
5201 }
5202 tcg_temp_free_i32(tmp);
5203 tcg_temp_free_i32(tmp2);
5204 tcg_temp_free_i32(tmp3);
5205 return 0;
5206 }
62698be3
PM
5207 if (size == 3 && op != NEON_3R_LOGIC) {
5208 /* 64-bit element instructions. */
9ee6e8bb 5209 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5210 neon_load_reg64(cpu_V0, rn + pass);
5211 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5212 switch (op) {
62698be3 5213 case NEON_3R_VQADD:
9ee6e8bb 5214 if (u) {
02da0b2d
PM
5215 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5216 cpu_V0, cpu_V1);
2c0262af 5217 } else {
02da0b2d
PM
5218 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5219 cpu_V0, cpu_V1);
2c0262af 5220 }
9ee6e8bb 5221 break;
62698be3 5222 case NEON_3R_VQSUB:
9ee6e8bb 5223 if (u) {
02da0b2d
PM
5224 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5225 cpu_V0, cpu_V1);
ad69471c 5226 } else {
02da0b2d
PM
5227 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5228 cpu_V0, cpu_V1);
ad69471c
PB
5229 }
5230 break;
62698be3 5231 case NEON_3R_VSHL:
ad69471c
PB
5232 if (u) {
5233 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5234 } else {
5235 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5236 }
5237 break;
62698be3 5238 case NEON_3R_VQSHL:
ad69471c 5239 if (u) {
02da0b2d
PM
5240 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5241 cpu_V1, cpu_V0);
ad69471c 5242 } else {
02da0b2d
PM
5243 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5244 cpu_V1, cpu_V0);
ad69471c
PB
5245 }
5246 break;
62698be3 5247 case NEON_3R_VRSHL:
ad69471c
PB
5248 if (u) {
5249 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5250 } else {
ad69471c
PB
5251 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5252 }
5253 break;
62698be3 5254 case NEON_3R_VQRSHL:
ad69471c 5255 if (u) {
02da0b2d
PM
5256 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5257 cpu_V1, cpu_V0);
ad69471c 5258 } else {
02da0b2d
PM
5259 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5260 cpu_V1, cpu_V0);
1e8d4eec 5261 }
9ee6e8bb 5262 break;
62698be3 5263 case NEON_3R_VADD_VSUB:
9ee6e8bb 5264 if (u) {
ad69471c 5265 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5266 } else {
ad69471c 5267 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5268 }
5269 break;
5270 default:
5271 abort();
2c0262af 5272 }
ad69471c 5273 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5274 }
9ee6e8bb 5275 return 0;
2c0262af 5276 }
25f84f79 5277 pairwise = 0;
9ee6e8bb 5278 switch (op) {
62698be3
PM
5279 case NEON_3R_VSHL:
5280 case NEON_3R_VQSHL:
5281 case NEON_3R_VRSHL:
5282 case NEON_3R_VQRSHL:
9ee6e8bb 5283 {
ad69471c
PB
5284 int rtmp;
5285 /* Shift instruction operands are reversed. */
5286 rtmp = rn;
9ee6e8bb 5287 rn = rm;
ad69471c 5288 rm = rtmp;
9ee6e8bb 5289 }
2c0262af 5290 break;
25f84f79
PM
5291 case NEON_3R_VPADD:
5292 if (u) {
5293 return 1;
5294 }
5295 /* Fall through */
62698be3
PM
5296 case NEON_3R_VPMAX:
5297 case NEON_3R_VPMIN:
9ee6e8bb 5298 pairwise = 1;
2c0262af 5299 break;
25f84f79
PM
5300 case NEON_3R_FLOAT_ARITH:
5301 pairwise = (u && size < 2); /* if VPADD (float) */
5302 break;
5303 case NEON_3R_FLOAT_MINMAX:
5304 pairwise = u; /* if VPMIN/VPMAX (float) */
5305 break;
5306 case NEON_3R_FLOAT_CMP:
5307 if (!u && size) {
5308 /* no encoding for U=0 C=1x */
5309 return 1;
5310 }
5311 break;
5312 case NEON_3R_FLOAT_ACMP:
5313 if (!u) {
5314 return 1;
5315 }
5316 break;
505935fc
WN
5317 case NEON_3R_FLOAT_MISC:
5318 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5319 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5320 return 1;
5321 }
2c0262af 5322 break;
25f84f79
PM
5323 case NEON_3R_VMUL:
5324 if (u && (size != 0)) {
5325 /* UNDEF on invalid size for polynomial subcase */
5326 return 1;
5327 }
2c0262af 5328 break;
da97f52c 5329 case NEON_3R_VFM:
d614a513 5330 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
da97f52c
PM
5331 return 1;
5332 }
5333 break;
9ee6e8bb 5334 default:
2c0262af 5335 break;
9ee6e8bb 5336 }
dd8fbd78 5337
25f84f79
PM
5338 if (pairwise && q) {
5339 /* All the pairwise insns UNDEF if Q is set */
5340 return 1;
5341 }
5342
9ee6e8bb
PB
5343 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5344
5345 if (pairwise) {
5346 /* Pairwise. */
a5a14945
JR
5347 if (pass < 1) {
5348 tmp = neon_load_reg(rn, 0);
5349 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5350 } else {
a5a14945
JR
5351 tmp = neon_load_reg(rm, 0);
5352 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5353 }
5354 } else {
5355 /* Elementwise. */
dd8fbd78
FN
5356 tmp = neon_load_reg(rn, pass);
5357 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5358 }
5359 switch (op) {
62698be3 5360 case NEON_3R_VHADD:
9ee6e8bb
PB
5361 GEN_NEON_INTEGER_OP(hadd);
5362 break;
62698be3 5363 case NEON_3R_VQADD:
02da0b2d 5364 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5365 break;
62698be3 5366 case NEON_3R_VRHADD:
9ee6e8bb 5367 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5368 break;
62698be3 5369 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5370 switch ((u << 2) | size) {
5371 case 0: /* VAND */
dd8fbd78 5372 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5373 break;
5374 case 1: /* BIC */
f669df27 5375 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5376 break;
5377 case 2: /* VORR */
dd8fbd78 5378 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5379 break;
5380 case 3: /* VORN */
f669df27 5381 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5382 break;
5383 case 4: /* VEOR */
dd8fbd78 5384 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5385 break;
5386 case 5: /* VBSL */
dd8fbd78
FN
5387 tmp3 = neon_load_reg(rd, pass);
5388 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5389 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5390 break;
5391 case 6: /* VBIT */
dd8fbd78
FN
5392 tmp3 = neon_load_reg(rd, pass);
5393 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5394 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5395 break;
5396 case 7: /* VBIF */
dd8fbd78
FN
5397 tmp3 = neon_load_reg(rd, pass);
5398 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5399 tcg_temp_free_i32(tmp3);
9ee6e8bb 5400 break;
2c0262af
FB
5401 }
5402 break;
62698be3 5403 case NEON_3R_VHSUB:
9ee6e8bb
PB
5404 GEN_NEON_INTEGER_OP(hsub);
5405 break;
62698be3 5406 case NEON_3R_VQSUB:
02da0b2d 5407 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5408 break;
62698be3 5409 case NEON_3R_VCGT:
9ee6e8bb
PB
5410 GEN_NEON_INTEGER_OP(cgt);
5411 break;
62698be3 5412 case NEON_3R_VCGE:
9ee6e8bb
PB
5413 GEN_NEON_INTEGER_OP(cge);
5414 break;
62698be3 5415 case NEON_3R_VSHL:
ad69471c 5416 GEN_NEON_INTEGER_OP(shl);
2c0262af 5417 break;
62698be3 5418 case NEON_3R_VQSHL:
02da0b2d 5419 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5420 break;
62698be3 5421 case NEON_3R_VRSHL:
ad69471c 5422 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5423 break;
62698be3 5424 case NEON_3R_VQRSHL:
02da0b2d 5425 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5426 break;
62698be3 5427 case NEON_3R_VMAX:
9ee6e8bb
PB
5428 GEN_NEON_INTEGER_OP(max);
5429 break;
62698be3 5430 case NEON_3R_VMIN:
9ee6e8bb
PB
5431 GEN_NEON_INTEGER_OP(min);
5432 break;
62698be3 5433 case NEON_3R_VABD:
9ee6e8bb
PB
5434 GEN_NEON_INTEGER_OP(abd);
5435 break;
62698be3 5436 case NEON_3R_VABA:
9ee6e8bb 5437 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5438 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5439 tmp2 = neon_load_reg(rd, pass);
5440 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5441 break;
62698be3 5442 case NEON_3R_VADD_VSUB:
9ee6e8bb 5443 if (!u) { /* VADD */
62698be3 5444 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5445 } else { /* VSUB */
5446 switch (size) {
dd8fbd78
FN
5447 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5448 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5449 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 5450 default: abort();
9ee6e8bb
PB
5451 }
5452 }
5453 break;
62698be3 5454 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
5455 if (!u) { /* VTST */
5456 switch (size) {
dd8fbd78
FN
5457 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5458 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5459 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 5460 default: abort();
9ee6e8bb
PB
5461 }
5462 } else { /* VCEQ */
5463 switch (size) {
dd8fbd78
FN
5464 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5465 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5466 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 5467 default: abort();
9ee6e8bb
PB
5468 }
5469 }
5470 break;
62698be3 5471 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 5472 switch (size) {
dd8fbd78
FN
5473 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5474 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5475 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5476 default: abort();
9ee6e8bb 5477 }
7d1b0095 5478 tcg_temp_free_i32(tmp2);
dd8fbd78 5479 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5480 if (u) { /* VMLS */
dd8fbd78 5481 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 5482 } else { /* VMLA */
dd8fbd78 5483 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5484 }
5485 break;
62698be3 5486 case NEON_3R_VMUL:
9ee6e8bb 5487 if (u) { /* polynomial */
dd8fbd78 5488 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
5489 } else { /* Integer */
5490 switch (size) {
dd8fbd78
FN
5491 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5492 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5493 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5494 default: abort();
9ee6e8bb
PB
5495 }
5496 }
5497 break;
62698be3 5498 case NEON_3R_VPMAX:
9ee6e8bb
PB
5499 GEN_NEON_INTEGER_OP(pmax);
5500 break;
62698be3 5501 case NEON_3R_VPMIN:
9ee6e8bb
PB
5502 GEN_NEON_INTEGER_OP(pmin);
5503 break;
62698be3 5504 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5505 if (!u) { /* VQDMULH */
5506 switch (size) {
02da0b2d
PM
5507 case 1:
5508 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5509 break;
5510 case 2:
5511 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5512 break;
62698be3 5513 default: abort();
9ee6e8bb 5514 }
62698be3 5515 } else { /* VQRDMULH */
9ee6e8bb 5516 switch (size) {
02da0b2d
PM
5517 case 1:
5518 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5519 break;
5520 case 2:
5521 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5522 break;
62698be3 5523 default: abort();
9ee6e8bb
PB
5524 }
5525 }
5526 break;
62698be3 5527 case NEON_3R_VPADD:
9ee6e8bb 5528 switch (size) {
dd8fbd78
FN
5529 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5530 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5531 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5532 default: abort();
9ee6e8bb
PB
5533 }
5534 break;
62698be3 5535 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5536 {
5537 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5538 switch ((u << 2) | size) {
5539 case 0: /* VADD */
aa47cfdd
PM
5540 case 4: /* VPADD */
5541 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5542 break;
5543 case 2: /* VSUB */
aa47cfdd 5544 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5545 break;
5546 case 6: /* VABD */
aa47cfdd 5547 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5548 break;
5549 default:
62698be3 5550 abort();
9ee6e8bb 5551 }
aa47cfdd 5552 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5553 break;
aa47cfdd 5554 }
62698be3 5555 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5556 {
5557 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5558 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5559 if (!u) {
7d1b0095 5560 tcg_temp_free_i32(tmp2);
dd8fbd78 5561 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5562 if (size == 0) {
aa47cfdd 5563 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5564 } else {
aa47cfdd 5565 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5566 }
5567 }
aa47cfdd 5568 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5569 break;
aa47cfdd 5570 }
62698be3 5571 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5572 {
5573 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5574 if (!u) {
aa47cfdd 5575 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5576 } else {
aa47cfdd
PM
5577 if (size == 0) {
5578 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5579 } else {
5580 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5581 }
b5ff1b31 5582 }
aa47cfdd 5583 tcg_temp_free_ptr(fpstatus);
2c0262af 5584 break;
aa47cfdd 5585 }
62698be3 5586 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
5587 {
5588 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5589 if (size == 0) {
5590 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5591 } else {
5592 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5593 }
5594 tcg_temp_free_ptr(fpstatus);
2c0262af 5595 break;
aa47cfdd 5596 }
62698be3 5597 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
5598 {
5599 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5600 if (size == 0) {
f71a2ae5 5601 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 5602 } else {
f71a2ae5 5603 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
5604 }
5605 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5606 break;
aa47cfdd 5607 }
505935fc
WN
5608 case NEON_3R_FLOAT_MISC:
5609 if (u) {
5610 /* VMAXNM/VMINNM */
5611 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5612 if (size == 0) {
f71a2ae5 5613 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 5614 } else {
f71a2ae5 5615 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
5616 }
5617 tcg_temp_free_ptr(fpstatus);
5618 } else {
5619 if (size == 0) {
5620 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5621 } else {
5622 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5623 }
5624 }
2c0262af 5625 break;
da97f52c
PM
5626 case NEON_3R_VFM:
5627 {
5628 /* VFMA, VFMS: fused multiply-add */
5629 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5630 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5631 if (size) {
5632 /* VFMS */
5633 gen_helper_vfp_negs(tmp, tmp);
5634 }
5635 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5636 tcg_temp_free_i32(tmp3);
5637 tcg_temp_free_ptr(fpstatus);
5638 break;
5639 }
9ee6e8bb
PB
5640 default:
5641 abort();
2c0262af 5642 }
7d1b0095 5643 tcg_temp_free_i32(tmp2);
dd8fbd78 5644
9ee6e8bb
PB
5645 /* Save the result. For elementwise operations we can put it
5646 straight into the destination register. For pairwise operations
5647 we have to be careful to avoid clobbering the source operands. */
5648 if (pairwise && rd == rm) {
dd8fbd78 5649 neon_store_scratch(pass, tmp);
9ee6e8bb 5650 } else {
dd8fbd78 5651 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5652 }
5653
5654 } /* for pass */
5655 if (pairwise && rd == rm) {
5656 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5657 tmp = neon_load_scratch(pass);
5658 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5659 }
5660 }
ad69471c 5661 /* End of 3 register same size operations. */
9ee6e8bb
PB
5662 } else if (insn & (1 << 4)) {
5663 if ((insn & 0x00380080) != 0) {
5664 /* Two registers and shift. */
5665 op = (insn >> 8) & 0xf;
5666 if (insn & (1 << 7)) {
cc13115b
PM
5667 /* 64-bit shift. */
5668 if (op > 7) {
5669 return 1;
5670 }
9ee6e8bb
PB
5671 size = 3;
5672 } else {
5673 size = 2;
5674 while ((insn & (1 << (size + 19))) == 0)
5675 size--;
5676 }
5677 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 5678 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
5679 by immediate using the variable shift operations. */
5680 if (op < 8) {
5681 /* Shift by immediate:
5682 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5683 if (q && ((rd | rm) & 1)) {
5684 return 1;
5685 }
5686 if (!u && (op == 4 || op == 6)) {
5687 return 1;
5688 }
9ee6e8bb
PB
5689 /* Right shifts are encoded as N - shift, where N is the
5690 element size in bits. */
5691 if (op <= 4)
5692 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
5693 if (size == 3) {
5694 count = q + 1;
5695 } else {
5696 count = q ? 4: 2;
5697 }
5698 switch (size) {
5699 case 0:
5700 imm = (uint8_t) shift;
5701 imm |= imm << 8;
5702 imm |= imm << 16;
5703 break;
5704 case 1:
5705 imm = (uint16_t) shift;
5706 imm |= imm << 16;
5707 break;
5708 case 2:
5709 case 3:
5710 imm = shift;
5711 break;
5712 default:
5713 abort();
5714 }
5715
5716 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5717 if (size == 3) {
5718 neon_load_reg64(cpu_V0, rm + pass);
5719 tcg_gen_movi_i64(cpu_V1, imm);
5720 switch (op) {
5721 case 0: /* VSHR */
5722 case 1: /* VSRA */
5723 if (u)
5724 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5725 else
ad69471c 5726 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5727 break;
ad69471c
PB
5728 case 2: /* VRSHR */
5729 case 3: /* VRSRA */
5730 if (u)
5731 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5732 else
ad69471c 5733 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5734 break;
ad69471c 5735 case 4: /* VSRI */
ad69471c
PB
5736 case 5: /* VSHL, VSLI */
5737 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5738 break;
0322b26e 5739 case 6: /* VQSHLU */
02da0b2d
PM
5740 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5741 cpu_V0, cpu_V1);
ad69471c 5742 break;
0322b26e
PM
5743 case 7: /* VQSHL */
5744 if (u) {
02da0b2d 5745 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5746 cpu_V0, cpu_V1);
5747 } else {
02da0b2d 5748 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5749 cpu_V0, cpu_V1);
5750 }
9ee6e8bb 5751 break;
9ee6e8bb 5752 }
ad69471c
PB
5753 if (op == 1 || op == 3) {
5754 /* Accumulate. */
5371cb81 5755 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5756 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5757 } else if (op == 4 || (op == 5 && u)) {
5758 /* Insert */
923e6509
CL
5759 neon_load_reg64(cpu_V1, rd + pass);
5760 uint64_t mask;
5761 if (shift < -63 || shift > 63) {
5762 mask = 0;
5763 } else {
5764 if (op == 4) {
5765 mask = 0xffffffffffffffffull >> -shift;
5766 } else {
5767 mask = 0xffffffffffffffffull << shift;
5768 }
5769 }
5770 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5771 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5772 }
5773 neon_store_reg64(cpu_V0, rd + pass);
5774 } else { /* size < 3 */
5775 /* Operands in T0 and T1. */
dd8fbd78 5776 tmp = neon_load_reg(rm, pass);
7d1b0095 5777 tmp2 = tcg_temp_new_i32();
dd8fbd78 5778 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5779 switch (op) {
5780 case 0: /* VSHR */
5781 case 1: /* VSRA */
5782 GEN_NEON_INTEGER_OP(shl);
5783 break;
5784 case 2: /* VRSHR */
5785 case 3: /* VRSRA */
5786 GEN_NEON_INTEGER_OP(rshl);
5787 break;
5788 case 4: /* VSRI */
ad69471c
PB
5789 case 5: /* VSHL, VSLI */
5790 switch (size) {
dd8fbd78
FN
5791 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5792 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5793 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5794 default: abort();
ad69471c
PB
5795 }
5796 break;
0322b26e 5797 case 6: /* VQSHLU */
ad69471c 5798 switch (size) {
0322b26e 5799 case 0:
02da0b2d
PM
5800 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5801 tmp, tmp2);
0322b26e
PM
5802 break;
5803 case 1:
02da0b2d
PM
5804 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5805 tmp, tmp2);
0322b26e
PM
5806 break;
5807 case 2:
02da0b2d
PM
5808 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5809 tmp, tmp2);
0322b26e
PM
5810 break;
5811 default:
cc13115b 5812 abort();
ad69471c
PB
5813 }
5814 break;
0322b26e 5815 case 7: /* VQSHL */
02da0b2d 5816 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5817 break;
ad69471c 5818 }
7d1b0095 5819 tcg_temp_free_i32(tmp2);
ad69471c
PB
5820
5821 if (op == 1 || op == 3) {
5822 /* Accumulate. */
dd8fbd78 5823 tmp2 = neon_load_reg(rd, pass);
5371cb81 5824 gen_neon_add(size, tmp, tmp2);
7d1b0095 5825 tcg_temp_free_i32(tmp2);
ad69471c
PB
5826 } else if (op == 4 || (op == 5 && u)) {
5827 /* Insert */
5828 switch (size) {
5829 case 0:
5830 if (op == 4)
ca9a32e4 5831 mask = 0xff >> -shift;
ad69471c 5832 else
ca9a32e4
JR
5833 mask = (uint8_t)(0xff << shift);
5834 mask |= mask << 8;
5835 mask |= mask << 16;
ad69471c
PB
5836 break;
5837 case 1:
5838 if (op == 4)
ca9a32e4 5839 mask = 0xffff >> -shift;
ad69471c 5840 else
ca9a32e4
JR
5841 mask = (uint16_t)(0xffff << shift);
5842 mask |= mask << 16;
ad69471c
PB
5843 break;
5844 case 2:
ca9a32e4
JR
5845 if (shift < -31 || shift > 31) {
5846 mask = 0;
5847 } else {
5848 if (op == 4)
5849 mask = 0xffffffffu >> -shift;
5850 else
5851 mask = 0xffffffffu << shift;
5852 }
ad69471c
PB
5853 break;
5854 default:
5855 abort();
5856 }
dd8fbd78 5857 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5858 tcg_gen_andi_i32(tmp, tmp, mask);
5859 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5860 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5861 tcg_temp_free_i32(tmp2);
ad69471c 5862 }
dd8fbd78 5863 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5864 }
5865 } /* for pass */
5866 } else if (op < 10) {
ad69471c 5867 /* Shift by immediate and narrow:
9ee6e8bb 5868 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5869 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5870 if (rm & 1) {
5871 return 1;
5872 }
9ee6e8bb
PB
5873 shift = shift - (1 << (size + 3));
5874 size++;
92cdfaeb 5875 if (size == 3) {
a7812ae4 5876 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5877 neon_load_reg64(cpu_V0, rm);
5878 neon_load_reg64(cpu_V1, rm + 1);
5879 for (pass = 0; pass < 2; pass++) {
5880 TCGv_i64 in;
5881 if (pass == 0) {
5882 in = cpu_V0;
5883 } else {
5884 in = cpu_V1;
5885 }
ad69471c 5886 if (q) {
0b36f4cd 5887 if (input_unsigned) {
92cdfaeb 5888 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5889 } else {
92cdfaeb 5890 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5891 }
ad69471c 5892 } else {
0b36f4cd 5893 if (input_unsigned) {
92cdfaeb 5894 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5895 } else {
92cdfaeb 5896 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5897 }
ad69471c 5898 }
7d1b0095 5899 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5900 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5901 neon_store_reg(rd, pass, tmp);
5902 } /* for pass */
5903 tcg_temp_free_i64(tmp64);
5904 } else {
5905 if (size == 1) {
5906 imm = (uint16_t)shift;
5907 imm |= imm << 16;
2c0262af 5908 } else {
92cdfaeb
PM
5909 /* size == 2 */
5910 imm = (uint32_t)shift;
5911 }
5912 tmp2 = tcg_const_i32(imm);
5913 tmp4 = neon_load_reg(rm + 1, 0);
5914 tmp5 = neon_load_reg(rm + 1, 1);
5915 for (pass = 0; pass < 2; pass++) {
5916 if (pass == 0) {
5917 tmp = neon_load_reg(rm, 0);
5918 } else {
5919 tmp = tmp4;
5920 }
0b36f4cd
CL
5921 gen_neon_shift_narrow(size, tmp, tmp2, q,
5922 input_unsigned);
92cdfaeb
PM
5923 if (pass == 0) {
5924 tmp3 = neon_load_reg(rm, 1);
5925 } else {
5926 tmp3 = tmp5;
5927 }
0b36f4cd
CL
5928 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5929 input_unsigned);
36aa55dc 5930 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5931 tcg_temp_free_i32(tmp);
5932 tcg_temp_free_i32(tmp3);
5933 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5934 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5935 neon_store_reg(rd, pass, tmp);
5936 } /* for pass */
c6067f04 5937 tcg_temp_free_i32(tmp2);
b75263d6 5938 }
9ee6e8bb 5939 } else if (op == 10) {
cc13115b
PM
5940 /* VSHLL, VMOVL */
5941 if (q || (rd & 1)) {
9ee6e8bb 5942 return 1;
cc13115b 5943 }
ad69471c
PB
5944 tmp = neon_load_reg(rm, 0);
5945 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5946 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5947 if (pass == 1)
5948 tmp = tmp2;
5949
5950 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5951
9ee6e8bb
PB
5952 if (shift != 0) {
5953 /* The shift is less than the width of the source
ad69471c
PB
5954 type, so we can just shift the whole register. */
5955 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5956 /* Widen the result of shift: we need to clear
5957 * the potential overflow bits resulting from
5958 * left bits of the narrow input appearing as
5959 * right bits of left the neighbour narrow
5960 * input. */
ad69471c
PB
5961 if (size < 2 || !u) {
5962 uint64_t imm64;
5963 if (size == 0) {
5964 imm = (0xffu >> (8 - shift));
5965 imm |= imm << 16;
acdf01ef 5966 } else if (size == 1) {
ad69471c 5967 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5968 } else {
5969 /* size == 2 */
5970 imm = 0xffffffff >> (32 - shift);
5971 }
5972 if (size < 2) {
5973 imm64 = imm | (((uint64_t)imm) << 32);
5974 } else {
5975 imm64 = imm;
9ee6e8bb 5976 }
acdf01ef 5977 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5978 }
5979 }
ad69471c 5980 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5981 }
f73534a5 5982 } else if (op >= 14) {
9ee6e8bb 5983 /* VCVT fixed-point. */
cc13115b
PM
5984 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5985 return 1;
5986 }
f73534a5
PM
5987 /* We have already masked out the must-be-1 top bit of imm6,
5988 * hence this 32-shift where the ARM ARM has 64-imm6.
5989 */
5990 shift = 32 - shift;
9ee6e8bb 5991 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5992 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5993 if (!(op & 1)) {
9ee6e8bb 5994 if (u)
5500b06c 5995 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5996 else
5500b06c 5997 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5998 } else {
5999 if (u)
5500b06c 6000 gen_vfp_toul(0, shift, 1);
9ee6e8bb 6001 else
5500b06c 6002 gen_vfp_tosl(0, shift, 1);
2c0262af 6003 }
4373f3ce 6004 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
6005 }
6006 } else {
9ee6e8bb
PB
6007 return 1;
6008 }
6009 } else { /* (insn & 0x00380080) == 0 */
6010 int invert;
7d80fee5
PM
6011 if (q && (rd & 1)) {
6012 return 1;
6013 }
9ee6e8bb
PB
6014
6015 op = (insn >> 8) & 0xf;
6016 /* One register and immediate. */
6017 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6018 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
6019 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6020 * We choose to not special-case this and will behave as if a
6021 * valid constant encoding of 0 had been given.
6022 */
9ee6e8bb
PB
6023 switch (op) {
6024 case 0: case 1:
6025 /* no-op */
6026 break;
6027 case 2: case 3:
6028 imm <<= 8;
6029 break;
6030 case 4: case 5:
6031 imm <<= 16;
6032 break;
6033 case 6: case 7:
6034 imm <<= 24;
6035 break;
6036 case 8: case 9:
6037 imm |= imm << 16;
6038 break;
6039 case 10: case 11:
6040 imm = (imm << 8) | (imm << 24);
6041 break;
6042 case 12:
8e31209e 6043 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
6044 break;
6045 case 13:
6046 imm = (imm << 16) | 0xffff;
6047 break;
6048 case 14:
6049 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6050 if (invert)
6051 imm = ~imm;
6052 break;
6053 case 15:
7d80fee5
PM
6054 if (invert) {
6055 return 1;
6056 }
9ee6e8bb
PB
6057 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6058 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6059 break;
6060 }
6061 if (invert)
6062 imm = ~imm;
6063
9ee6e8bb
PB
6064 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6065 if (op & 1 && op < 12) {
ad69471c 6066 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
6067 if (invert) {
6068 /* The immediate value has already been inverted, so
6069 BIC becomes AND. */
ad69471c 6070 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 6071 } else {
ad69471c 6072 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 6073 }
9ee6e8bb 6074 } else {
ad69471c 6075 /* VMOV, VMVN. */
7d1b0095 6076 tmp = tcg_temp_new_i32();
9ee6e8bb 6077 if (op == 14 && invert) {
a5a14945 6078 int n;
ad69471c
PB
6079 uint32_t val;
6080 val = 0;
9ee6e8bb
PB
6081 for (n = 0; n < 4; n++) {
6082 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 6083 val |= 0xff << (n * 8);
9ee6e8bb 6084 }
ad69471c
PB
6085 tcg_gen_movi_i32(tmp, val);
6086 } else {
6087 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 6088 }
9ee6e8bb 6089 }
ad69471c 6090 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6091 }
6092 }
e4b3861d 6093 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
6094 if (size != 3) {
6095 op = (insn >> 8) & 0xf;
6096 if ((insn & (1 << 6)) == 0) {
6097 /* Three registers of different lengths. */
6098 int src1_wide;
6099 int src2_wide;
6100 int prewiden;
526d0096
PM
6101 /* undefreq: bit 0 : UNDEF if size == 0
6102 * bit 1 : UNDEF if size == 1
6103 * bit 2 : UNDEF if size == 2
6104 * bit 3 : UNDEF if U == 1
6105 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
6106 */
6107 int undefreq;
6108 /* prewiden, src1_wide, src2_wide, undefreq */
6109 static const int neon_3reg_wide[16][4] = {
6110 {1, 0, 0, 0}, /* VADDL */
6111 {1, 1, 0, 0}, /* VADDW */
6112 {1, 0, 0, 0}, /* VSUBL */
6113 {1, 1, 0, 0}, /* VSUBW */
6114 {0, 1, 1, 0}, /* VADDHN */
6115 {0, 0, 0, 0}, /* VABAL */
6116 {0, 1, 1, 0}, /* VSUBHN */
6117 {0, 0, 0, 0}, /* VABDL */
6118 {0, 0, 0, 0}, /* VMLAL */
526d0096 6119 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 6120 {0, 0, 0, 0}, /* VMLSL */
526d0096 6121 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 6122 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 6123 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 6124 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 6125 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
6126 };
6127
6128 prewiden = neon_3reg_wide[op][0];
6129 src1_wide = neon_3reg_wide[op][1];
6130 src2_wide = neon_3reg_wide[op][2];
695272dc 6131 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 6132
526d0096
PM
6133 if ((undefreq & (1 << size)) ||
6134 ((undefreq & 8) && u)) {
695272dc
PM
6135 return 1;
6136 }
6137 if ((src1_wide && (rn & 1)) ||
6138 (src2_wide && (rm & 1)) ||
6139 (!src2_wide && (rd & 1))) {
ad69471c 6140 return 1;
695272dc 6141 }
ad69471c 6142
4e624eda
PM
6143 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6144 * outside the loop below as it only performs a single pass.
6145 */
6146 if (op == 14 && size == 2) {
6147 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6148
d614a513 6149 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
4e624eda
PM
6150 return 1;
6151 }
6152 tcg_rn = tcg_temp_new_i64();
6153 tcg_rm = tcg_temp_new_i64();
6154 tcg_rd = tcg_temp_new_i64();
6155 neon_load_reg64(tcg_rn, rn);
6156 neon_load_reg64(tcg_rm, rm);
6157 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6158 neon_store_reg64(tcg_rd, rd);
6159 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6160 neon_store_reg64(tcg_rd, rd + 1);
6161 tcg_temp_free_i64(tcg_rn);
6162 tcg_temp_free_i64(tcg_rm);
6163 tcg_temp_free_i64(tcg_rd);
6164 return 0;
6165 }
6166
9ee6e8bb
PB
6167 /* Avoid overlapping operands. Wide source operands are
6168 always aligned so will never overlap with wide
6169 destinations in problematic ways. */
8f8e3aa4 6170 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6171 tmp = neon_load_reg(rm, 1);
6172 neon_store_scratch(2, tmp);
8f8e3aa4 6173 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6174 tmp = neon_load_reg(rn, 1);
6175 neon_store_scratch(2, tmp);
9ee6e8bb 6176 }
39d5492a 6177 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 6178 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6179 if (src1_wide) {
6180 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 6181 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6182 } else {
ad69471c 6183 if (pass == 1 && rd == rn) {
dd8fbd78 6184 tmp = neon_load_scratch(2);
9ee6e8bb 6185 } else {
ad69471c
PB
6186 tmp = neon_load_reg(rn, pass);
6187 }
6188 if (prewiden) {
6189 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6190 }
6191 }
ad69471c
PB
6192 if (src2_wide) {
6193 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 6194 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6195 } else {
ad69471c 6196 if (pass == 1 && rd == rm) {
dd8fbd78 6197 tmp2 = neon_load_scratch(2);
9ee6e8bb 6198 } else {
ad69471c
PB
6199 tmp2 = neon_load_reg(rm, pass);
6200 }
6201 if (prewiden) {
6202 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6203 }
9ee6e8bb
PB
6204 }
6205 switch (op) {
6206 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6207 gen_neon_addl(size);
9ee6e8bb 6208 break;
79b0e534 6209 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6210 gen_neon_subl(size);
9ee6e8bb
PB
6211 break;
6212 case 5: case 7: /* VABAL, VABDL */
6213 switch ((size << 1) | u) {
ad69471c
PB
6214 case 0:
6215 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6216 break;
6217 case 1:
6218 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6219 break;
6220 case 2:
6221 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6222 break;
6223 case 3:
6224 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6225 break;
6226 case 4:
6227 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6228 break;
6229 case 5:
6230 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6231 break;
9ee6e8bb
PB
6232 default: abort();
6233 }
7d1b0095
PM
6234 tcg_temp_free_i32(tmp2);
6235 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6236 break;
6237 case 8: case 9: case 10: case 11: case 12: case 13:
6238 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6239 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6240 break;
6241 case 14: /* Polynomial VMULL */
e5ca24cb 6242 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6243 tcg_temp_free_i32(tmp2);
6244 tcg_temp_free_i32(tmp);
e5ca24cb 6245 break;
695272dc
PM
6246 default: /* 15 is RESERVED: caught earlier */
6247 abort();
9ee6e8bb 6248 }
ebcd88ce
PM
6249 if (op == 13) {
6250 /* VQDMULL */
6251 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6252 neon_store_reg64(cpu_V0, rd + pass);
6253 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6254 /* Accumulate. */
ebcd88ce 6255 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6256 switch (op) {
4dc064e6
PM
6257 case 10: /* VMLSL */
6258 gen_neon_negl(cpu_V0, size);
6259 /* Fall through */
6260 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6261 gen_neon_addl(size);
9ee6e8bb
PB
6262 break;
6263 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6264 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6265 if (op == 11) {
6266 gen_neon_negl(cpu_V0, size);
6267 }
ad69471c
PB
6268 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6269 break;
9ee6e8bb
PB
6270 default:
6271 abort();
6272 }
ad69471c 6273 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6274 } else if (op == 4 || op == 6) {
6275 /* Narrowing operation. */
7d1b0095 6276 tmp = tcg_temp_new_i32();
79b0e534 6277 if (!u) {
9ee6e8bb 6278 switch (size) {
ad69471c
PB
6279 case 0:
6280 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6281 break;
6282 case 1:
6283 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6284 break;
6285 case 2:
6286 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6287 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6288 break;
9ee6e8bb
PB
6289 default: abort();
6290 }
6291 } else {
6292 switch (size) {
ad69471c
PB
6293 case 0:
6294 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6295 break;
6296 case 1:
6297 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6298 break;
6299 case 2:
6300 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6301 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6302 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6303 break;
9ee6e8bb
PB
6304 default: abort();
6305 }
6306 }
ad69471c
PB
6307 if (pass == 0) {
6308 tmp3 = tmp;
6309 } else {
6310 neon_store_reg(rd, 0, tmp3);
6311 neon_store_reg(rd, 1, tmp);
6312 }
9ee6e8bb
PB
6313 } else {
6314 /* Write back the result. */
ad69471c 6315 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6316 }
6317 }
6318 } else {
3e3326df
PM
6319 /* Two registers and a scalar. NB that for ops of this form
6320 * the ARM ARM labels bit 24 as Q, but it is in our variable
6321 * 'u', not 'q'.
6322 */
6323 if (size == 0) {
6324 return 1;
6325 }
9ee6e8bb 6326 switch (op) {
9ee6e8bb 6327 case 1: /* Float VMLA scalar */
9ee6e8bb 6328 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6329 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6330 if (size == 1) {
6331 return 1;
6332 }
6333 /* fall through */
6334 case 0: /* Integer VMLA scalar */
6335 case 4: /* Integer VMLS scalar */
6336 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6337 case 12: /* VQDMULH scalar */
6338 case 13: /* VQRDMULH scalar */
3e3326df
PM
6339 if (u && ((rd | rn) & 1)) {
6340 return 1;
6341 }
dd8fbd78
FN
6342 tmp = neon_get_scalar(size, rm);
6343 neon_store_scratch(0, tmp);
9ee6e8bb 6344 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6345 tmp = neon_load_scratch(0);
6346 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6347 if (op == 12) {
6348 if (size == 1) {
02da0b2d 6349 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6350 } else {
02da0b2d 6351 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6352 }
6353 } else if (op == 13) {
6354 if (size == 1) {
02da0b2d 6355 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6356 } else {
02da0b2d 6357 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6358 }
6359 } else if (op & 1) {
aa47cfdd
PM
6360 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6361 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6362 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6363 } else {
6364 switch (size) {
dd8fbd78
FN
6365 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6366 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6367 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6368 default: abort();
9ee6e8bb
PB
6369 }
6370 }
7d1b0095 6371 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6372 if (op < 8) {
6373 /* Accumulate. */
dd8fbd78 6374 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6375 switch (op) {
6376 case 0:
dd8fbd78 6377 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6378 break;
6379 case 1:
aa47cfdd
PM
6380 {
6381 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6382 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6383 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6384 break;
aa47cfdd 6385 }
9ee6e8bb 6386 case 4:
dd8fbd78 6387 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6388 break;
6389 case 5:
aa47cfdd
PM
6390 {
6391 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6392 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6393 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6394 break;
aa47cfdd 6395 }
9ee6e8bb
PB
6396 default:
6397 abort();
6398 }
7d1b0095 6399 tcg_temp_free_i32(tmp2);
9ee6e8bb 6400 }
dd8fbd78 6401 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6402 }
6403 break;
9ee6e8bb 6404 case 3: /* VQDMLAL scalar */
9ee6e8bb 6405 case 7: /* VQDMLSL scalar */
9ee6e8bb 6406 case 11: /* VQDMULL scalar */
3e3326df 6407 if (u == 1) {
ad69471c 6408 return 1;
3e3326df
PM
6409 }
6410 /* fall through */
6411 case 2: /* VMLAL sclar */
6412 case 6: /* VMLSL scalar */
6413 case 10: /* VMULL scalar */
6414 if (rd & 1) {
6415 return 1;
6416 }
dd8fbd78 6417 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6418 /* We need a copy of tmp2 because gen_neon_mull
6419 * deletes it during pass 0. */
7d1b0095 6420 tmp4 = tcg_temp_new_i32();
c6067f04 6421 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6422 tmp3 = neon_load_reg(rn, 1);
ad69471c 6423
9ee6e8bb 6424 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6425 if (pass == 0) {
6426 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6427 } else {
dd8fbd78 6428 tmp = tmp3;
c6067f04 6429 tmp2 = tmp4;
9ee6e8bb 6430 }
ad69471c 6431 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6432 if (op != 11) {
6433 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6434 }
9ee6e8bb 6435 switch (op) {
4dc064e6
PM
6436 case 6:
6437 gen_neon_negl(cpu_V0, size);
6438 /* Fall through */
6439 case 2:
ad69471c 6440 gen_neon_addl(size);
9ee6e8bb
PB
6441 break;
6442 case 3: case 7:
ad69471c 6443 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6444 if (op == 7) {
6445 gen_neon_negl(cpu_V0, size);
6446 }
ad69471c 6447 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6448 break;
6449 case 10:
6450 /* no-op */
6451 break;
6452 case 11:
ad69471c 6453 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6454 break;
6455 default:
6456 abort();
6457 }
ad69471c 6458 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6459 }
dd8fbd78 6460
dd8fbd78 6461
9ee6e8bb
PB
6462 break;
6463 default: /* 14 and 15 are RESERVED */
6464 return 1;
6465 }
6466 }
6467 } else { /* size == 3 */
6468 if (!u) {
6469 /* Extract. */
9ee6e8bb 6470 imm = (insn >> 8) & 0xf;
ad69471c
PB
6471
6472 if (imm > 7 && !q)
6473 return 1;
6474
52579ea1
PM
6475 if (q && ((rd | rn | rm) & 1)) {
6476 return 1;
6477 }
6478
ad69471c
PB
6479 if (imm == 0) {
6480 neon_load_reg64(cpu_V0, rn);
6481 if (q) {
6482 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6483 }
ad69471c
PB
6484 } else if (imm == 8) {
6485 neon_load_reg64(cpu_V0, rn + 1);
6486 if (q) {
6487 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6488 }
ad69471c 6489 } else if (q) {
a7812ae4 6490 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6491 if (imm < 8) {
6492 neon_load_reg64(cpu_V0, rn);
a7812ae4 6493 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6494 } else {
6495 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6496 neon_load_reg64(tmp64, rm);
ad69471c
PB
6497 }
6498 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6499 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6500 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6501 if (imm < 8) {
6502 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6503 } else {
ad69471c
PB
6504 neon_load_reg64(cpu_V1, rm + 1);
6505 imm -= 8;
9ee6e8bb 6506 }
ad69471c 6507 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6508 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6509 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6510 tcg_temp_free_i64(tmp64);
ad69471c 6511 } else {
a7812ae4 6512 /* BUGFIX */
ad69471c 6513 neon_load_reg64(cpu_V0, rn);
a7812ae4 6514 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6515 neon_load_reg64(cpu_V1, rm);
a7812ae4 6516 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6517 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6518 }
6519 neon_store_reg64(cpu_V0, rd);
6520 if (q) {
6521 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6522 }
6523 } else if ((insn & (1 << 11)) == 0) {
6524 /* Two register misc. */
6525 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6526 size = (insn >> 18) & 3;
600b828c
PM
6527 /* UNDEF for unknown op values and bad op-size combinations */
6528 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6529 return 1;
6530 }
fc2a9b37
PM
6531 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6532 q && ((rm | rd) & 1)) {
6533 return 1;
6534 }
9ee6e8bb 6535 switch (op) {
600b828c 6536 case NEON_2RM_VREV64:
9ee6e8bb 6537 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6538 tmp = neon_load_reg(rm, pass * 2);
6539 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6540 switch (size) {
dd8fbd78
FN
6541 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6542 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6543 case 2: /* no-op */ break;
6544 default: abort();
6545 }
dd8fbd78 6546 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6547 if (size == 2) {
dd8fbd78 6548 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6549 } else {
9ee6e8bb 6550 switch (size) {
dd8fbd78
FN
6551 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6552 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6553 default: abort();
6554 }
dd8fbd78 6555 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6556 }
6557 }
6558 break;
600b828c
PM
6559 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6560 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
6561 for (pass = 0; pass < q + 1; pass++) {
6562 tmp = neon_load_reg(rm, pass * 2);
6563 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6564 tmp = neon_load_reg(rm, pass * 2 + 1);
6565 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6566 switch (size) {
6567 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6568 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6569 case 2: tcg_gen_add_i64(CPU_V001); break;
6570 default: abort();
6571 }
600b828c 6572 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 6573 /* Accumulate. */
ad69471c
PB
6574 neon_load_reg64(cpu_V1, rd + pass);
6575 gen_neon_addl(size);
9ee6e8bb 6576 }
ad69471c 6577 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6578 }
6579 break;
600b828c 6580 case NEON_2RM_VTRN:
9ee6e8bb 6581 if (size == 2) {
a5a14945 6582 int n;
9ee6e8bb 6583 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
6584 tmp = neon_load_reg(rm, n);
6585 tmp2 = neon_load_reg(rd, n + 1);
6586 neon_store_reg(rm, n, tmp2);
6587 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
6588 }
6589 } else {
6590 goto elementwise;
6591 }
6592 break;
600b828c 6593 case NEON_2RM_VUZP:
02acedf9 6594 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 6595 return 1;
9ee6e8bb
PB
6596 }
6597 break;
600b828c 6598 case NEON_2RM_VZIP:
d68a6f3a 6599 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 6600 return 1;
9ee6e8bb
PB
6601 }
6602 break;
600b828c
PM
6603 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6604 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
6605 if (rm & 1) {
6606 return 1;
6607 }
39d5492a 6608 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6609 for (pass = 0; pass < 2; pass++) {
ad69471c 6610 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 6611 tmp = tcg_temp_new_i32();
600b828c
PM
6612 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6613 tmp, cpu_V0);
ad69471c
PB
6614 if (pass == 0) {
6615 tmp2 = tmp;
6616 } else {
6617 neon_store_reg(rd, 0, tmp2);
6618 neon_store_reg(rd, 1, tmp);
9ee6e8bb 6619 }
9ee6e8bb
PB
6620 }
6621 break;
600b828c 6622 case NEON_2RM_VSHLL:
fc2a9b37 6623 if (q || (rd & 1)) {
9ee6e8bb 6624 return 1;
600b828c 6625 }
ad69471c
PB
6626 tmp = neon_load_reg(rm, 0);
6627 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6628 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6629 if (pass == 1)
6630 tmp = tmp2;
6631 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 6632 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 6633 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6634 }
6635 break;
600b828c 6636 case NEON_2RM_VCVT_F16_F32:
d614a513 6637 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
6638 q || (rm & 1)) {
6639 return 1;
6640 }
7d1b0095
PM
6641 tmp = tcg_temp_new_i32();
6642 tmp2 = tcg_temp_new_i32();
60011498 6643 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 6644 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 6645 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 6646 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6647 tcg_gen_shli_i32(tmp2, tmp2, 16);
6648 tcg_gen_or_i32(tmp2, tmp2, tmp);
6649 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 6650 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
6651 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6652 neon_store_reg(rd, 0, tmp2);
7d1b0095 6653 tmp2 = tcg_temp_new_i32();
2d981da7 6654 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6655 tcg_gen_shli_i32(tmp2, tmp2, 16);
6656 tcg_gen_or_i32(tmp2, tmp2, tmp);
6657 neon_store_reg(rd, 1, tmp2);
7d1b0095 6658 tcg_temp_free_i32(tmp);
60011498 6659 break;
600b828c 6660 case NEON_2RM_VCVT_F32_F16:
d614a513 6661 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
6662 q || (rd & 1)) {
6663 return 1;
6664 }
7d1b0095 6665 tmp3 = tcg_temp_new_i32();
60011498
PB
6666 tmp = neon_load_reg(rm, 0);
6667 tmp2 = neon_load_reg(rm, 1);
6668 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 6669 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6670 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6671 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 6672 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6673 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 6674 tcg_temp_free_i32(tmp);
60011498 6675 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 6676 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6677 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6678 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 6679 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6680 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
6681 tcg_temp_free_i32(tmp2);
6682 tcg_temp_free_i32(tmp3);
60011498 6683 break;
9d935509 6684 case NEON_2RM_AESE: case NEON_2RM_AESMC:
d614a513 6685 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
9d935509
AB
6686 || ((rm | rd) & 1)) {
6687 return 1;
6688 }
6689 tmp = tcg_const_i32(rd);
6690 tmp2 = tcg_const_i32(rm);
6691
6692 /* Bit 6 is the lowest opcode bit; it distinguishes between
6693 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6694 */
6695 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6696
6697 if (op == NEON_2RM_AESE) {
6698 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
6699 } else {
6700 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
6701 }
6702 tcg_temp_free_i32(tmp);
6703 tcg_temp_free_i32(tmp2);
6704 tcg_temp_free_i32(tmp3);
6705 break;
f1ecb913 6706 case NEON_2RM_SHA1H:
d614a513 6707 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
f1ecb913
AB
6708 || ((rm | rd) & 1)) {
6709 return 1;
6710 }
6711 tmp = tcg_const_i32(rd);
6712 tmp2 = tcg_const_i32(rm);
6713
6714 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
6715
6716 tcg_temp_free_i32(tmp);
6717 tcg_temp_free_i32(tmp2);
6718 break;
6719 case NEON_2RM_SHA1SU1:
6720 if ((rm | rd) & 1) {
6721 return 1;
6722 }
6723 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6724 if (q) {
d614a513 6725 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
f1ecb913
AB
6726 return 1;
6727 }
d614a513 6728 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
6729 return 1;
6730 }
6731 tmp = tcg_const_i32(rd);
6732 tmp2 = tcg_const_i32(rm);
6733 if (q) {
6734 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
6735 } else {
6736 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
6737 }
6738 tcg_temp_free_i32(tmp);
6739 tcg_temp_free_i32(tmp2);
6740 break;
9ee6e8bb
PB
6741 default:
6742 elementwise:
6743 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 6744 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6745 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6746 neon_reg_offset(rm, pass));
39d5492a 6747 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6748 } else {
dd8fbd78 6749 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
6750 }
6751 switch (op) {
600b828c 6752 case NEON_2RM_VREV32:
9ee6e8bb 6753 switch (size) {
dd8fbd78
FN
6754 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6755 case 1: gen_swap_half(tmp); break;
600b828c 6756 default: abort();
9ee6e8bb
PB
6757 }
6758 break;
600b828c 6759 case NEON_2RM_VREV16:
dd8fbd78 6760 gen_rev16(tmp);
9ee6e8bb 6761 break;
600b828c 6762 case NEON_2RM_VCLS:
9ee6e8bb 6763 switch (size) {
dd8fbd78
FN
6764 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6765 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6766 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6767 default: abort();
9ee6e8bb
PB
6768 }
6769 break;
600b828c 6770 case NEON_2RM_VCLZ:
9ee6e8bb 6771 switch (size) {
dd8fbd78
FN
6772 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6773 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6774 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 6775 default: abort();
9ee6e8bb
PB
6776 }
6777 break;
600b828c 6778 case NEON_2RM_VCNT:
dd8fbd78 6779 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6780 break;
600b828c 6781 case NEON_2RM_VMVN:
dd8fbd78 6782 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 6783 break;
600b828c 6784 case NEON_2RM_VQABS:
9ee6e8bb 6785 switch (size) {
02da0b2d
PM
6786 case 0:
6787 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6788 break;
6789 case 1:
6790 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6791 break;
6792 case 2:
6793 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6794 break;
600b828c 6795 default: abort();
9ee6e8bb
PB
6796 }
6797 break;
600b828c 6798 case NEON_2RM_VQNEG:
9ee6e8bb 6799 switch (size) {
02da0b2d
PM
6800 case 0:
6801 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6802 break;
6803 case 1:
6804 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6805 break;
6806 case 2:
6807 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6808 break;
600b828c 6809 default: abort();
9ee6e8bb
PB
6810 }
6811 break;
600b828c 6812 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6813 tmp2 = tcg_const_i32(0);
9ee6e8bb 6814 switch(size) {
dd8fbd78
FN
6815 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6816 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6817 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6818 default: abort();
9ee6e8bb 6819 }
39d5492a 6820 tcg_temp_free_i32(tmp2);
600b828c 6821 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6822 tcg_gen_not_i32(tmp, tmp);
600b828c 6823 }
9ee6e8bb 6824 break;
600b828c 6825 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6826 tmp2 = tcg_const_i32(0);
9ee6e8bb 6827 switch(size) {
dd8fbd78
FN
6828 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6829 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6830 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6831 default: abort();
9ee6e8bb 6832 }
39d5492a 6833 tcg_temp_free_i32(tmp2);
600b828c 6834 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6835 tcg_gen_not_i32(tmp, tmp);
600b828c 6836 }
9ee6e8bb 6837 break;
600b828c 6838 case NEON_2RM_VCEQ0:
dd8fbd78 6839 tmp2 = tcg_const_i32(0);
9ee6e8bb 6840 switch(size) {
dd8fbd78
FN
6841 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6842 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6843 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6844 default: abort();
9ee6e8bb 6845 }
39d5492a 6846 tcg_temp_free_i32(tmp2);
9ee6e8bb 6847 break;
600b828c 6848 case NEON_2RM_VABS:
9ee6e8bb 6849 switch(size) {
dd8fbd78
FN
6850 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6851 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6852 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6853 default: abort();
9ee6e8bb
PB
6854 }
6855 break;
600b828c 6856 case NEON_2RM_VNEG:
dd8fbd78
FN
6857 tmp2 = tcg_const_i32(0);
6858 gen_neon_rsb(size, tmp, tmp2);
39d5492a 6859 tcg_temp_free_i32(tmp2);
9ee6e8bb 6860 break;
600b828c 6861 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6862 {
6863 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6864 tmp2 = tcg_const_i32(0);
aa47cfdd 6865 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6866 tcg_temp_free_i32(tmp2);
aa47cfdd 6867 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6868 break;
aa47cfdd 6869 }
600b828c 6870 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6871 {
6872 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6873 tmp2 = tcg_const_i32(0);
aa47cfdd 6874 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6875 tcg_temp_free_i32(tmp2);
aa47cfdd 6876 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6877 break;
aa47cfdd 6878 }
600b828c 6879 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6880 {
6881 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6882 tmp2 = tcg_const_i32(0);
aa47cfdd 6883 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6884 tcg_temp_free_i32(tmp2);
aa47cfdd 6885 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6886 break;
aa47cfdd 6887 }
600b828c 6888 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6889 {
6890 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6891 tmp2 = tcg_const_i32(0);
aa47cfdd 6892 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6893 tcg_temp_free_i32(tmp2);
aa47cfdd 6894 tcg_temp_free_ptr(fpstatus);
0e326109 6895 break;
aa47cfdd 6896 }
600b828c 6897 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6898 {
6899 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6900 tmp2 = tcg_const_i32(0);
aa47cfdd 6901 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6902 tcg_temp_free_i32(tmp2);
aa47cfdd 6903 tcg_temp_free_ptr(fpstatus);
0e326109 6904 break;
aa47cfdd 6905 }
600b828c 6906 case NEON_2RM_VABS_F:
4373f3ce 6907 gen_vfp_abs(0);
9ee6e8bb 6908 break;
600b828c 6909 case NEON_2RM_VNEG_F:
4373f3ce 6910 gen_vfp_neg(0);
9ee6e8bb 6911 break;
600b828c 6912 case NEON_2RM_VSWP:
dd8fbd78
FN
6913 tmp2 = neon_load_reg(rd, pass);
6914 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6915 break;
600b828c 6916 case NEON_2RM_VTRN:
dd8fbd78 6917 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6918 switch (size) {
dd8fbd78
FN
6919 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6920 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6921 default: abort();
9ee6e8bb 6922 }
dd8fbd78 6923 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6924 break;
34f7b0a2
WN
6925 case NEON_2RM_VRINTN:
6926 case NEON_2RM_VRINTA:
6927 case NEON_2RM_VRINTM:
6928 case NEON_2RM_VRINTP:
6929 case NEON_2RM_VRINTZ:
6930 {
6931 TCGv_i32 tcg_rmode;
6932 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6933 int rmode;
6934
6935 if (op == NEON_2RM_VRINTZ) {
6936 rmode = FPROUNDING_ZERO;
6937 } else {
6938 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6939 }
6940
6941 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6942 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6943 cpu_env);
6944 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
6945 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6946 cpu_env);
6947 tcg_temp_free_ptr(fpstatus);
6948 tcg_temp_free_i32(tcg_rmode);
6949 break;
6950 }
2ce70625
WN
6951 case NEON_2RM_VRINTX:
6952 {
6953 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6954 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
6955 tcg_temp_free_ptr(fpstatus);
6956 break;
6957 }
901ad525
WN
6958 case NEON_2RM_VCVTAU:
6959 case NEON_2RM_VCVTAS:
6960 case NEON_2RM_VCVTNU:
6961 case NEON_2RM_VCVTNS:
6962 case NEON_2RM_VCVTPU:
6963 case NEON_2RM_VCVTPS:
6964 case NEON_2RM_VCVTMU:
6965 case NEON_2RM_VCVTMS:
6966 {
6967 bool is_signed = !extract32(insn, 7, 1);
6968 TCGv_ptr fpst = get_fpstatus_ptr(1);
6969 TCGv_i32 tcg_rmode, tcg_shift;
6970 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6971
6972 tcg_shift = tcg_const_i32(0);
6973 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6974 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6975 cpu_env);
6976
6977 if (is_signed) {
6978 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
6979 tcg_shift, fpst);
6980 } else {
6981 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
6982 tcg_shift, fpst);
6983 }
6984
6985 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6986 cpu_env);
6987 tcg_temp_free_i32(tcg_rmode);
6988 tcg_temp_free_i32(tcg_shift);
6989 tcg_temp_free_ptr(fpst);
6990 break;
6991 }
600b828c 6992 case NEON_2RM_VRECPE:
b6d4443a
AB
6993 {
6994 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6995 gen_helper_recpe_u32(tmp, tmp, fpstatus);
6996 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6997 break;
b6d4443a 6998 }
600b828c 6999 case NEON_2RM_VRSQRTE:
c2fb418e
AB
7000 {
7001 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7002 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7003 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7004 break;
c2fb418e 7005 }
600b828c 7006 case NEON_2RM_VRECPE_F:
b6d4443a
AB
7007 {
7008 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7009 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7010 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7011 break;
b6d4443a 7012 }
600b828c 7013 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
7014 {
7015 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7016 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7017 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7018 break;
c2fb418e 7019 }
600b828c 7020 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 7021 gen_vfp_sito(0, 1);
9ee6e8bb 7022 break;
600b828c 7023 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 7024 gen_vfp_uito(0, 1);
9ee6e8bb 7025 break;
600b828c 7026 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 7027 gen_vfp_tosiz(0, 1);
9ee6e8bb 7028 break;
600b828c 7029 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 7030 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
7031 break;
7032 default:
600b828c
PM
7033 /* Reserved op values were caught by the
7034 * neon_2rm_sizes[] check earlier.
7035 */
7036 abort();
9ee6e8bb 7037 }
600b828c 7038 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7039 tcg_gen_st_f32(cpu_F0s, cpu_env,
7040 neon_reg_offset(rd, pass));
9ee6e8bb 7041 } else {
dd8fbd78 7042 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7043 }
7044 }
7045 break;
7046 }
7047 } else if ((insn & (1 << 10)) == 0) {
7048 /* VTBL, VTBX. */
56907d77
PM
7049 int n = ((insn >> 8) & 3) + 1;
7050 if ((rn + n) > 32) {
7051 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7052 * helper function running off the end of the register file.
7053 */
7054 return 1;
7055 }
7056 n <<= 3;
9ee6e8bb 7057 if (insn & (1 << 6)) {
8f8e3aa4 7058 tmp = neon_load_reg(rd, 0);
9ee6e8bb 7059 } else {
7d1b0095 7060 tmp = tcg_temp_new_i32();
8f8e3aa4 7061 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7062 }
8f8e3aa4 7063 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
7064 tmp4 = tcg_const_i32(rn);
7065 tmp5 = tcg_const_i32(n);
9ef39277 7066 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 7067 tcg_temp_free_i32(tmp);
9ee6e8bb 7068 if (insn & (1 << 6)) {
8f8e3aa4 7069 tmp = neon_load_reg(rd, 1);
9ee6e8bb 7070 } else {
7d1b0095 7071 tmp = tcg_temp_new_i32();
8f8e3aa4 7072 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7073 }
8f8e3aa4 7074 tmp3 = neon_load_reg(rm, 1);
9ef39277 7075 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
7076 tcg_temp_free_i32(tmp5);
7077 tcg_temp_free_i32(tmp4);
8f8e3aa4 7078 neon_store_reg(rd, 0, tmp2);
3018f259 7079 neon_store_reg(rd, 1, tmp3);
7d1b0095 7080 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7081 } else if ((insn & 0x380) == 0) {
7082 /* VDUP */
133da6aa
JR
7083 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7084 return 1;
7085 }
9ee6e8bb 7086 if (insn & (1 << 19)) {
dd8fbd78 7087 tmp = neon_load_reg(rm, 1);
9ee6e8bb 7088 } else {
dd8fbd78 7089 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
7090 }
7091 if (insn & (1 << 16)) {
dd8fbd78 7092 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
7093 } else if (insn & (1 << 17)) {
7094 if ((insn >> 18) & 1)
dd8fbd78 7095 gen_neon_dup_high16(tmp);
9ee6e8bb 7096 else
dd8fbd78 7097 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
7098 }
7099 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 7100 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
7101 tcg_gen_mov_i32(tmp2, tmp);
7102 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 7103 }
7d1b0095 7104 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7105 } else {
7106 return 1;
7107 }
7108 }
7109 }
7110 return 0;
7111}
7112
7dcc1f89 7113static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7114{
4b6a83fb
PM
7115 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7116 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7117
7118 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7119
7120 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7121 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7122 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7123 return 1;
7124 }
d614a513 7125 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7126 return disas_iwmmxt_insn(s, insn);
d614a513 7127 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7128 return disas_dsp_insn(s, insn);
c0f4af17
PM
7129 }
7130 return 1;
4b6a83fb
PM
7131 }
7132
7133 /* Otherwise treat as a generic register access */
7134 is64 = (insn & (1 << 25)) == 0;
7135 if (!is64 && ((insn & (1 << 4)) == 0)) {
7136 /* cdp */
7137 return 1;
7138 }
7139
7140 crm = insn & 0xf;
7141 if (is64) {
7142 crn = 0;
7143 opc1 = (insn >> 4) & 0xf;
7144 opc2 = 0;
7145 rt2 = (insn >> 16) & 0xf;
7146 } else {
7147 crn = (insn >> 16) & 0xf;
7148 opc1 = (insn >> 21) & 7;
7149 opc2 = (insn >> 5) & 7;
7150 rt2 = 0;
7151 }
7152 isread = (insn >> 20) & 1;
7153 rt = (insn >> 12) & 0xf;
7154
60322b39 7155 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7156 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7157 if (ri) {
7158 /* Check access permissions */
dcbff19b 7159 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7160 return 1;
7161 }
7162
c0f4af17 7163 if (ri->accessfn ||
d614a513 7164 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7165 /* Emit code to perform further access permissions checks at
7166 * runtime; this may result in an exception.
c0f4af17
PM
7167 * Note that on XScale all cp0..c13 registers do an access check
7168 * call in order to handle c15_cpar.
f59df3f2
PM
7169 */
7170 TCGv_ptr tmpptr;
8bcbf37c
PM
7171 TCGv_i32 tcg_syn;
7172 uint32_t syndrome;
7173
7174 /* Note that since we are an implementation which takes an
7175 * exception on a trapped conditional instruction only if the
7176 * instruction passes its condition code check, we can take
7177 * advantage of the clause in the ARM ARM that allows us to set
7178 * the COND field in the instruction to 0xE in all cases.
7179 * We could fish the actual condition out of the insn (ARM)
7180 * or the condexec bits (Thumb) but it isn't necessary.
7181 */
7182 switch (cpnum) {
7183 case 14:
7184 if (is64) {
7185 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7186 isread, s->thumb);
7187 } else {
7188 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7189 rt, isread, s->thumb);
7190 }
7191 break;
7192 case 15:
7193 if (is64) {
7194 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7195 isread, s->thumb);
7196 } else {
7197 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7198 rt, isread, s->thumb);
7199 }
7200 break;
7201 default:
7202 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7203 * so this can only happen if this is an ARMv7 or earlier CPU,
7204 * in which case the syndrome information won't actually be
7205 * guest visible.
7206 */
d614a513 7207 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
7208 syndrome = syn_uncategorized();
7209 break;
7210 }
7211
43bfa4a1 7212 gen_set_condexec(s);
3977ee5d 7213 gen_set_pc_im(s, s->pc - 4);
f59df3f2 7214 tmpptr = tcg_const_ptr(ri);
8bcbf37c
PM
7215 tcg_syn = tcg_const_i32(syndrome);
7216 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn);
f59df3f2 7217 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7218 tcg_temp_free_i32(tcg_syn);
f59df3f2
PM
7219 }
7220
4b6a83fb
PM
7221 /* Handle special cases first */
7222 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7223 case ARM_CP_NOP:
7224 return 0;
7225 case ARM_CP_WFI:
7226 if (isread) {
7227 return 1;
7228 }
eaed129d 7229 gen_set_pc_im(s, s->pc);
4b6a83fb 7230 s->is_jmp = DISAS_WFI;
2bee5105 7231 return 0;
4b6a83fb
PM
7232 default:
7233 break;
7234 }
7235
bd79255d 7236 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7237 gen_io_start();
7238 }
7239
4b6a83fb
PM
7240 if (isread) {
7241 /* Read */
7242 if (is64) {
7243 TCGv_i64 tmp64;
7244 TCGv_i32 tmp;
7245 if (ri->type & ARM_CP_CONST) {
7246 tmp64 = tcg_const_i64(ri->resetvalue);
7247 } else if (ri->readfn) {
7248 TCGv_ptr tmpptr;
4b6a83fb
PM
7249 tmp64 = tcg_temp_new_i64();
7250 tmpptr = tcg_const_ptr(ri);
7251 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7252 tcg_temp_free_ptr(tmpptr);
7253 } else {
7254 tmp64 = tcg_temp_new_i64();
7255 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7256 }
7257 tmp = tcg_temp_new_i32();
ecc7b3aa 7258 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
7259 store_reg(s, rt, tmp);
7260 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 7261 tmp = tcg_temp_new_i32();
ecc7b3aa 7262 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 7263 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7264 store_reg(s, rt2, tmp);
7265 } else {
39d5492a 7266 TCGv_i32 tmp;
4b6a83fb
PM
7267 if (ri->type & ARM_CP_CONST) {
7268 tmp = tcg_const_i32(ri->resetvalue);
7269 } else if (ri->readfn) {
7270 TCGv_ptr tmpptr;
4b6a83fb
PM
7271 tmp = tcg_temp_new_i32();
7272 tmpptr = tcg_const_ptr(ri);
7273 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7274 tcg_temp_free_ptr(tmpptr);
7275 } else {
7276 tmp = load_cpu_offset(ri->fieldoffset);
7277 }
7278 if (rt == 15) {
7279 /* Destination register of r15 for 32 bit loads sets
7280 * the condition codes from the high 4 bits of the value
7281 */
7282 gen_set_nzcv(tmp);
7283 tcg_temp_free_i32(tmp);
7284 } else {
7285 store_reg(s, rt, tmp);
7286 }
7287 }
7288 } else {
7289 /* Write */
7290 if (ri->type & ARM_CP_CONST) {
7291 /* If not forbidden by access permissions, treat as WI */
7292 return 0;
7293 }
7294
7295 if (is64) {
39d5492a 7296 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7297 TCGv_i64 tmp64 = tcg_temp_new_i64();
7298 tmplo = load_reg(s, rt);
7299 tmphi = load_reg(s, rt2);
7300 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7301 tcg_temp_free_i32(tmplo);
7302 tcg_temp_free_i32(tmphi);
7303 if (ri->writefn) {
7304 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7305 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7306 tcg_temp_free_ptr(tmpptr);
7307 } else {
7308 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7309 }
7310 tcg_temp_free_i64(tmp64);
7311 } else {
7312 if (ri->writefn) {
39d5492a 7313 TCGv_i32 tmp;
4b6a83fb 7314 TCGv_ptr tmpptr;
4b6a83fb
PM
7315 tmp = load_reg(s, rt);
7316 tmpptr = tcg_const_ptr(ri);
7317 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7318 tcg_temp_free_ptr(tmpptr);
7319 tcg_temp_free_i32(tmp);
7320 } else {
39d5492a 7321 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7322 store_cpu_offset(tmp, ri->fieldoffset);
7323 }
7324 }
2452731c
PM
7325 }
7326
bd79255d 7327 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7328 /* I/O operations must end the TB here (whether read or write) */
7329 gen_io_end();
7330 gen_lookup_tb(s);
7331 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7332 /* We default to ending the TB on a coprocessor register write,
7333 * but allow this to be suppressed by the register definition
7334 * (usually only necessary to work around guest bugs).
7335 */
2452731c 7336 gen_lookup_tb(s);
4b6a83fb 7337 }
2452731c 7338
4b6a83fb
PM
7339 return 0;
7340 }
7341
626187d8
PM
7342 /* Unknown register; this might be a guest error or a QEMU
7343 * unimplemented feature.
7344 */
7345 if (is64) {
7346 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7347 "64 bit system register cp:%d opc1: %d crm:%d "
7348 "(%s)\n",
7349 isread ? "read" : "write", cpnum, opc1, crm,
7350 s->ns ? "non-secure" : "secure");
626187d8
PM
7351 } else {
7352 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7353 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7354 "(%s)\n",
7355 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7356 s->ns ? "non-secure" : "secure");
626187d8
PM
7357 }
7358
4a9a539f 7359 return 1;
9ee6e8bb
PB
7360}
7361
5e3f878a
PB
7362
7363/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7364static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7365{
39d5492a 7366 TCGv_i32 tmp;
7d1b0095 7367 tmp = tcg_temp_new_i32();
ecc7b3aa 7368 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 7369 store_reg(s, rlow, tmp);
7d1b0095 7370 tmp = tcg_temp_new_i32();
5e3f878a 7371 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 7372 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
7373 store_reg(s, rhigh, tmp);
7374}
7375
7376/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7377static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7378{
a7812ae4 7379 TCGv_i64 tmp;
39d5492a 7380 TCGv_i32 tmp2;
5e3f878a 7381
36aa55dc 7382 /* Load value and extend to 64 bits. */
a7812ae4 7383 tmp = tcg_temp_new_i64();
5e3f878a
PB
7384 tmp2 = load_reg(s, rlow);
7385 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7386 tcg_temp_free_i32(tmp2);
5e3f878a 7387 tcg_gen_add_i64(val, val, tmp);
b75263d6 7388 tcg_temp_free_i64(tmp);
5e3f878a
PB
7389}
7390
7391/* load and add a 64-bit value from a register pair. */
a7812ae4 7392static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7393{
a7812ae4 7394 TCGv_i64 tmp;
39d5492a
PM
7395 TCGv_i32 tmpl;
7396 TCGv_i32 tmph;
5e3f878a
PB
7397
7398 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7399 tmpl = load_reg(s, rlow);
7400 tmph = load_reg(s, rhigh);
a7812ae4 7401 tmp = tcg_temp_new_i64();
36aa55dc 7402 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7403 tcg_temp_free_i32(tmpl);
7404 tcg_temp_free_i32(tmph);
5e3f878a 7405 tcg_gen_add_i64(val, val, tmp);
b75263d6 7406 tcg_temp_free_i64(tmp);
5e3f878a
PB
7407}
7408
c9f10124 7409/* Set N and Z flags from hi|lo. */
39d5492a 7410static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7411{
c9f10124
RH
7412 tcg_gen_mov_i32(cpu_NF, hi);
7413 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7414}
7415
426f5abc
PB
7416/* Load/Store exclusive instructions are implemented by remembering
7417 the value/address loaded, and seeing if these are the same
b90372ad 7418 when the store is performed. This should be sufficient to implement
426f5abc
PB
7419 the architecturally mandated semantics, and avoids having to monitor
7420 regular stores.
7421
7422 In system emulation mode only one CPU will be running at once, so
7423 this sequence is effectively atomic. In user emulation mode we
7424 throw an exception and handle the atomic operation elsewhere. */
7425static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7426 TCGv_i32 addr, int size)
426f5abc 7427{
94ee24e7 7428 TCGv_i32 tmp = tcg_temp_new_i32();
426f5abc 7429
50225ad0
PM
7430 s->is_ldex = true;
7431
426f5abc
PB
7432 switch (size) {
7433 case 0:
6ce2faf4 7434 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
426f5abc
PB
7435 break;
7436 case 1:
30901475 7437 gen_aa32_ld16ua(tmp, addr, get_mem_index(s));
426f5abc
PB
7438 break;
7439 case 2:
7440 case 3:
30901475 7441 gen_aa32_ld32ua(tmp, addr, get_mem_index(s));
426f5abc
PB
7442 break;
7443 default:
7444 abort();
7445 }
03d05e2d 7446
426f5abc 7447 if (size == 3) {
39d5492a 7448 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d
PM
7449 TCGv_i32 tmp3 = tcg_temp_new_i32();
7450
2c9adbda 7451 tcg_gen_addi_i32(tmp2, addr, 4);
6ce2faf4 7452 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7d1b0095 7453 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7454 tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
7455 store_reg(s, rt2, tmp3);
7456 } else {
7457 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7458 }
03d05e2d
PM
7459
7460 store_reg(s, rt, tmp);
7461 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7462}
7463
7464static void gen_clrex(DisasContext *s)
7465{
03d05e2d 7466 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7467}
7468
7469#ifdef CONFIG_USER_ONLY
7470static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7471 TCGv_i32 addr, int size)
426f5abc 7472{
03d05e2d 7473 tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
426f5abc
PB
7474 tcg_gen_movi_i32(cpu_exclusive_info,
7475 size | (rd << 4) | (rt << 8) | (rt2 << 12));
d4a2dc67 7476 gen_exception_internal_insn(s, 4, EXCP_STREX);
426f5abc
PB
7477}
7478#else
7479static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7480 TCGv_i32 addr, int size)
426f5abc 7481{
39d5492a 7482 TCGv_i32 tmp;
03d05e2d 7483 TCGv_i64 val64, extaddr;
42a268c2
RH
7484 TCGLabel *done_label;
7485 TCGLabel *fail_label;
426f5abc
PB
7486
7487 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7488 [addr] = {Rt};
7489 {Rd} = 0;
7490 } else {
7491 {Rd} = 1;
7492 } */
7493 fail_label = gen_new_label();
7494 done_label = gen_new_label();
03d05e2d
PM
7495 extaddr = tcg_temp_new_i64();
7496 tcg_gen_extu_i32_i64(extaddr, addr);
7497 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7498 tcg_temp_free_i64(extaddr);
7499
94ee24e7 7500 tmp = tcg_temp_new_i32();
426f5abc
PB
7501 switch (size) {
7502 case 0:
6ce2faf4 7503 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
426f5abc
PB
7504 break;
7505 case 1:
6ce2faf4 7506 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
426f5abc
PB
7507 break;
7508 case 2:
7509 case 3:
6ce2faf4 7510 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
426f5abc
PB
7511 break;
7512 default:
7513 abort();
7514 }
03d05e2d
PM
7515
7516 val64 = tcg_temp_new_i64();
426f5abc 7517 if (size == 3) {
39d5492a 7518 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d 7519 TCGv_i32 tmp3 = tcg_temp_new_i32();
426f5abc 7520 tcg_gen_addi_i32(tmp2, addr, 4);
6ce2faf4 7521 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7d1b0095 7522 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7523 tcg_gen_concat_i32_i64(val64, tmp, tmp3);
7524 tcg_temp_free_i32(tmp3);
7525 } else {
7526 tcg_gen_extu_i32_i64(val64, tmp);
426f5abc 7527 }
03d05e2d
PM
7528 tcg_temp_free_i32(tmp);
7529
7530 tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
7531 tcg_temp_free_i64(val64);
7532
426f5abc
PB
7533 tmp = load_reg(s, rt);
7534 switch (size) {
7535 case 0:
6ce2faf4 7536 gen_aa32_st8(tmp, addr, get_mem_index(s));
426f5abc
PB
7537 break;
7538 case 1:
6ce2faf4 7539 gen_aa32_st16(tmp, addr, get_mem_index(s));
426f5abc
PB
7540 break;
7541 case 2:
7542 case 3:
6ce2faf4 7543 gen_aa32_st32(tmp, addr, get_mem_index(s));
426f5abc
PB
7544 break;
7545 default:
7546 abort();
7547 }
94ee24e7 7548 tcg_temp_free_i32(tmp);
426f5abc
PB
7549 if (size == 3) {
7550 tcg_gen_addi_i32(addr, addr, 4);
7551 tmp = load_reg(s, rt2);
6ce2faf4 7552 gen_aa32_st32(tmp, addr, get_mem_index(s));
94ee24e7 7553 tcg_temp_free_i32(tmp);
426f5abc
PB
7554 }
7555 tcg_gen_movi_i32(cpu_R[rd], 0);
7556 tcg_gen_br(done_label);
7557 gen_set_label(fail_label);
7558 tcg_gen_movi_i32(cpu_R[rd], 1);
7559 gen_set_label(done_label);
03d05e2d 7560 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7561}
7562#endif
7563
81465888
PM
7564/* gen_srs:
7565 * @env: CPUARMState
7566 * @s: DisasContext
7567 * @mode: mode field from insn (which stack to store to)
7568 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7569 * @writeback: true if writeback bit set
7570 *
7571 * Generate code for the SRS (Store Return State) insn.
7572 */
7573static void gen_srs(DisasContext *s,
7574 uint32_t mode, uint32_t amode, bool writeback)
7575{
7576 int32_t offset;
7577 TCGv_i32 addr = tcg_temp_new_i32();
7578 TCGv_i32 tmp = tcg_const_i32(mode);
7579 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7580 tcg_temp_free_i32(tmp);
7581 switch (amode) {
7582 case 0: /* DA */
7583 offset = -4;
7584 break;
7585 case 1: /* IA */
7586 offset = 0;
7587 break;
7588 case 2: /* DB */
7589 offset = -8;
7590 break;
7591 case 3: /* IB */
7592 offset = 4;
7593 break;
7594 default:
7595 abort();
7596 }
7597 tcg_gen_addi_i32(addr, addr, offset);
7598 tmp = load_reg(s, 14);
c1197795 7599 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 7600 tcg_temp_free_i32(tmp);
81465888
PM
7601 tmp = load_cpu_field(spsr);
7602 tcg_gen_addi_i32(addr, addr, 4);
c1197795 7603 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 7604 tcg_temp_free_i32(tmp);
81465888
PM
7605 if (writeback) {
7606 switch (amode) {
7607 case 0:
7608 offset = -8;
7609 break;
7610 case 1:
7611 offset = 4;
7612 break;
7613 case 2:
7614 offset = -4;
7615 break;
7616 case 3:
7617 offset = 0;
7618 break;
7619 default:
7620 abort();
7621 }
7622 tcg_gen_addi_i32(addr, addr, offset);
7623 tmp = tcg_const_i32(mode);
7624 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7625 tcg_temp_free_i32(tmp);
7626 }
7627 tcg_temp_free_i32(addr);
7628}
7629
f4df2210 7630static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 7631{
f4df2210 7632 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
7633 TCGv_i32 tmp;
7634 TCGv_i32 tmp2;
7635 TCGv_i32 tmp3;
7636 TCGv_i32 addr;
a7812ae4 7637 TCGv_i64 tmp64;
9ee6e8bb 7638
9ee6e8bb 7639 /* M variants do not implement ARM mode. */
b53d8923 7640 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 7641 goto illegal_op;
b53d8923 7642 }
9ee6e8bb
PB
7643 cond = insn >> 28;
7644 if (cond == 0xf){
be5e7a76
DES
7645 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7646 * choose to UNDEF. In ARMv5 and above the space is used
7647 * for miscellaneous unconditional instructions.
7648 */
7649 ARCH(5);
7650
9ee6e8bb
PB
7651 /* Unconditional instructions. */
7652 if (((insn >> 25) & 7) == 1) {
7653 /* NEON Data processing. */
d614a513 7654 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 7655 goto illegal_op;
d614a513 7656 }
9ee6e8bb 7657
7dcc1f89 7658 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 7659 goto illegal_op;
7dcc1f89 7660 }
9ee6e8bb
PB
7661 return;
7662 }
7663 if ((insn & 0x0f100000) == 0x04000000) {
7664 /* NEON load/store. */
d614a513 7665 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 7666 goto illegal_op;
d614a513 7667 }
9ee6e8bb 7668
7dcc1f89 7669 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 7670 goto illegal_op;
7dcc1f89 7671 }
9ee6e8bb
PB
7672 return;
7673 }
6a57f3eb
WN
7674 if ((insn & 0x0f000e10) == 0x0e000a00) {
7675 /* VFP. */
7dcc1f89 7676 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
7677 goto illegal_op;
7678 }
7679 return;
7680 }
3d185e5d
PM
7681 if (((insn & 0x0f30f000) == 0x0510f000) ||
7682 ((insn & 0x0f30f010) == 0x0710f000)) {
7683 if ((insn & (1 << 22)) == 0) {
7684 /* PLDW; v7MP */
d614a513 7685 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
7686 goto illegal_op;
7687 }
7688 }
7689 /* Otherwise PLD; v5TE+ */
be5e7a76 7690 ARCH(5TE);
3d185e5d
PM
7691 return;
7692 }
7693 if (((insn & 0x0f70f000) == 0x0450f000) ||
7694 ((insn & 0x0f70f010) == 0x0650f000)) {
7695 ARCH(7);
7696 return; /* PLI; V7 */
7697 }
7698 if (((insn & 0x0f700000) == 0x04100000) ||
7699 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 7700 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
7701 goto illegal_op;
7702 }
7703 return; /* v7MP: Unallocated memory hint: must NOP */
7704 }
7705
7706 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
7707 ARCH(6);
7708 /* setend */
10962fd5
PM
7709 if (((insn >> 9) & 1) != s->bswap_code) {
7710 /* Dynamic endianness switching not implemented. */
e0c270d9 7711 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
9ee6e8bb
PB
7712 goto illegal_op;
7713 }
7714 return;
7715 } else if ((insn & 0x0fffff00) == 0x057ff000) {
7716 switch ((insn >> 4) & 0xf) {
7717 case 1: /* clrex */
7718 ARCH(6K);
426f5abc 7719 gen_clrex(s);
9ee6e8bb
PB
7720 return;
7721 case 4: /* dsb */
7722 case 5: /* dmb */
9ee6e8bb
PB
7723 ARCH(7);
7724 /* We don't emulate caches so these are a no-op. */
7725 return;
6df99dec
SS
7726 case 6: /* isb */
7727 /* We need to break the TB after this insn to execute
7728 * self-modifying code correctly and also to take
7729 * any pending interrupts immediately.
7730 */
7731 gen_lookup_tb(s);
7732 return;
9ee6e8bb
PB
7733 default:
7734 goto illegal_op;
7735 }
7736 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
7737 /* srs */
81465888 7738 if (IS_USER(s)) {
9ee6e8bb 7739 goto illegal_op;
9ee6e8bb 7740 }
81465888
PM
7741 ARCH(6);
7742 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 7743 return;
ea825eee 7744 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 7745 /* rfe */
c67b6b71 7746 int32_t offset;
9ee6e8bb
PB
7747 if (IS_USER(s))
7748 goto illegal_op;
7749 ARCH(6);
7750 rn = (insn >> 16) & 0xf;
b0109805 7751 addr = load_reg(s, rn);
9ee6e8bb
PB
7752 i = (insn >> 23) & 3;
7753 switch (i) {
b0109805 7754 case 0: offset = -4; break; /* DA */
c67b6b71
FN
7755 case 1: offset = 0; break; /* IA */
7756 case 2: offset = -8; break; /* DB */
b0109805 7757 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
7758 default: abort();
7759 }
7760 if (offset)
b0109805
PB
7761 tcg_gen_addi_i32(addr, addr, offset);
7762 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 7763 tmp = tcg_temp_new_i32();
6ce2faf4 7764 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 7765 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 7766 tmp2 = tcg_temp_new_i32();
6ce2faf4 7767 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
7768 if (insn & (1 << 21)) {
7769 /* Base writeback. */
7770 switch (i) {
b0109805 7771 case 0: offset = -8; break;
c67b6b71
FN
7772 case 1: offset = 4; break;
7773 case 2: offset = -4; break;
b0109805 7774 case 3: offset = 0; break;
9ee6e8bb
PB
7775 default: abort();
7776 }
7777 if (offset)
b0109805
PB
7778 tcg_gen_addi_i32(addr, addr, offset);
7779 store_reg(s, rn, addr);
7780 } else {
7d1b0095 7781 tcg_temp_free_i32(addr);
9ee6e8bb 7782 }
b0109805 7783 gen_rfe(s, tmp, tmp2);
c67b6b71 7784 return;
9ee6e8bb
PB
7785 } else if ((insn & 0x0e000000) == 0x0a000000) {
7786 /* branch link and change to thumb (blx <offset>) */
7787 int32_t offset;
7788
7789 val = (uint32_t)s->pc;
7d1b0095 7790 tmp = tcg_temp_new_i32();
d9ba4830
PB
7791 tcg_gen_movi_i32(tmp, val);
7792 store_reg(s, 14, tmp);
9ee6e8bb
PB
7793 /* Sign-extend the 24-bit offset */
7794 offset = (((int32_t)insn) << 8) >> 8;
7795 /* offset * 4 + bit24 * 2 + (thumb bit) */
7796 val += (offset << 2) | ((insn >> 23) & 2) | 1;
7797 /* pipeline offset */
7798 val += 4;
be5e7a76 7799 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 7800 gen_bx_im(s, val);
9ee6e8bb
PB
7801 return;
7802 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 7803 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 7804 /* iWMMXt register transfer. */
c0f4af17 7805 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 7806 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 7807 return;
c0f4af17
PM
7808 }
7809 }
9ee6e8bb
PB
7810 }
7811 } else if ((insn & 0x0fe00000) == 0x0c400000) {
7812 /* Coprocessor double register transfer. */
be5e7a76 7813 ARCH(5TE);
9ee6e8bb
PB
7814 } else if ((insn & 0x0f000010) == 0x0e000010) {
7815 /* Additional coprocessor register transfer. */
7997d92f 7816 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
7817 uint32_t mask;
7818 uint32_t val;
7819 /* cps (privileged) */
7820 if (IS_USER(s))
7821 return;
7822 mask = val = 0;
7823 if (insn & (1 << 19)) {
7824 if (insn & (1 << 8))
7825 mask |= CPSR_A;
7826 if (insn & (1 << 7))
7827 mask |= CPSR_I;
7828 if (insn & (1 << 6))
7829 mask |= CPSR_F;
7830 if (insn & (1 << 18))
7831 val |= mask;
7832 }
7997d92f 7833 if (insn & (1 << 17)) {
9ee6e8bb
PB
7834 mask |= CPSR_M;
7835 val |= (insn & 0x1f);
7836 }
7837 if (mask) {
2fbac54b 7838 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
7839 }
7840 return;
7841 }
7842 goto illegal_op;
7843 }
7844 if (cond != 0xe) {
7845 /* if not always execute, we generate a conditional jump to
7846 next instruction */
7847 s->condlabel = gen_new_label();
39fb730a 7848 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
7849 s->condjmp = 1;
7850 }
7851 if ((insn & 0x0f900000) == 0x03000000) {
7852 if ((insn & (1 << 21)) == 0) {
7853 ARCH(6T2);
7854 rd = (insn >> 12) & 0xf;
7855 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7856 if ((insn & (1 << 22)) == 0) {
7857 /* MOVW */
7d1b0095 7858 tmp = tcg_temp_new_i32();
5e3f878a 7859 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
7860 } else {
7861 /* MOVT */
5e3f878a 7862 tmp = load_reg(s, rd);
86831435 7863 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7864 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 7865 }
5e3f878a 7866 store_reg(s, rd, tmp);
9ee6e8bb
PB
7867 } else {
7868 if (((insn >> 12) & 0xf) != 0xf)
7869 goto illegal_op;
7870 if (((insn >> 16) & 0xf) == 0) {
7871 gen_nop_hint(s, insn & 0xff);
7872 } else {
7873 /* CPSR = immediate */
7874 val = insn & 0xff;
7875 shift = ((insn >> 8) & 0xf) * 2;
7876 if (shift)
7877 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 7878 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
7879 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
7880 i, val)) {
9ee6e8bb 7881 goto illegal_op;
7dcc1f89 7882 }
9ee6e8bb
PB
7883 }
7884 }
7885 } else if ((insn & 0x0f900000) == 0x01000000
7886 && (insn & 0x00000090) != 0x00000090) {
7887 /* miscellaneous instructions */
7888 op1 = (insn >> 21) & 3;
7889 sh = (insn >> 4) & 0xf;
7890 rm = insn & 0xf;
7891 switch (sh) {
7892 case 0x0: /* move program status register */
7893 if (op1 & 1) {
7894 /* PSR = reg */
2fbac54b 7895 tmp = load_reg(s, rm);
9ee6e8bb 7896 i = ((op1 & 2) != 0);
7dcc1f89 7897 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
7898 goto illegal_op;
7899 } else {
7900 /* reg = PSR */
7901 rd = (insn >> 12) & 0xf;
7902 if (op1 & 2) {
7903 if (IS_USER(s))
7904 goto illegal_op;
d9ba4830 7905 tmp = load_cpu_field(spsr);
9ee6e8bb 7906 } else {
7d1b0095 7907 tmp = tcg_temp_new_i32();
9ef39277 7908 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 7909 }
d9ba4830 7910 store_reg(s, rd, tmp);
9ee6e8bb
PB
7911 }
7912 break;
7913 case 0x1:
7914 if (op1 == 1) {
7915 /* branch/exchange thumb (bx). */
be5e7a76 7916 ARCH(4T);
d9ba4830
PB
7917 tmp = load_reg(s, rm);
7918 gen_bx(s, tmp);
9ee6e8bb
PB
7919 } else if (op1 == 3) {
7920 /* clz */
be5e7a76 7921 ARCH(5);
9ee6e8bb 7922 rd = (insn >> 12) & 0xf;
1497c961
PB
7923 tmp = load_reg(s, rm);
7924 gen_helper_clz(tmp, tmp);
7925 store_reg(s, rd, tmp);
9ee6e8bb
PB
7926 } else {
7927 goto illegal_op;
7928 }
7929 break;
7930 case 0x2:
7931 if (op1 == 1) {
7932 ARCH(5J); /* bxj */
7933 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7934 tmp = load_reg(s, rm);
7935 gen_bx(s, tmp);
9ee6e8bb
PB
7936 } else {
7937 goto illegal_op;
7938 }
7939 break;
7940 case 0x3:
7941 if (op1 != 1)
7942 goto illegal_op;
7943
be5e7a76 7944 ARCH(5);
9ee6e8bb 7945 /* branch link/exchange thumb (blx) */
d9ba4830 7946 tmp = load_reg(s, rm);
7d1b0095 7947 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
7948 tcg_gen_movi_i32(tmp2, s->pc);
7949 store_reg(s, 14, tmp2);
7950 gen_bx(s, tmp);
9ee6e8bb 7951 break;
eb0ecd5a
WN
7952 case 0x4:
7953 {
7954 /* crc32/crc32c */
7955 uint32_t c = extract32(insn, 8, 4);
7956
7957 /* Check this CPU supports ARMv8 CRC instructions.
7958 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
7959 * Bits 8, 10 and 11 should be zero.
7960 */
d614a513 7961 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
eb0ecd5a
WN
7962 (c & 0xd) != 0) {
7963 goto illegal_op;
7964 }
7965
7966 rn = extract32(insn, 16, 4);
7967 rd = extract32(insn, 12, 4);
7968
7969 tmp = load_reg(s, rn);
7970 tmp2 = load_reg(s, rm);
aa633469
PM
7971 if (op1 == 0) {
7972 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
7973 } else if (op1 == 1) {
7974 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
7975 }
eb0ecd5a
WN
7976 tmp3 = tcg_const_i32(1 << op1);
7977 if (c & 0x2) {
7978 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
7979 } else {
7980 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
7981 }
7982 tcg_temp_free_i32(tmp2);
7983 tcg_temp_free_i32(tmp3);
7984 store_reg(s, rd, tmp);
7985 break;
7986 }
9ee6e8bb 7987 case 0x5: /* saturating add/subtract */
be5e7a76 7988 ARCH(5TE);
9ee6e8bb
PB
7989 rd = (insn >> 12) & 0xf;
7990 rn = (insn >> 16) & 0xf;
b40d0353 7991 tmp = load_reg(s, rm);
5e3f878a 7992 tmp2 = load_reg(s, rn);
9ee6e8bb 7993 if (op1 & 2)
9ef39277 7994 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 7995 if (op1 & 1)
9ef39277 7996 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 7997 else
9ef39277 7998 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 7999 tcg_temp_free_i32(tmp2);
5e3f878a 8000 store_reg(s, rd, tmp);
9ee6e8bb 8001 break;
49e14940 8002 case 7:
d4a2dc67
PM
8003 {
8004 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e
PM
8005 switch (op1) {
8006 case 1:
8007 /* bkpt */
8008 ARCH(5);
8009 gen_exception_insn(s, 4, EXCP_BKPT,
73710361
GB
8010 syn_aa32_bkpt(imm16, false),
8011 default_exception_el(s));
37e6456e
PM
8012 break;
8013 case 2:
8014 /* Hypervisor call (v7) */
8015 ARCH(7);
8016 if (IS_USER(s)) {
8017 goto illegal_op;
8018 }
8019 gen_hvc(s, imm16);
8020 break;
8021 case 3:
8022 /* Secure monitor call (v6+) */
8023 ARCH(6K);
8024 if (IS_USER(s)) {
8025 goto illegal_op;
8026 }
8027 gen_smc(s);
8028 break;
8029 default:
49e14940
AL
8030 goto illegal_op;
8031 }
9ee6e8bb 8032 break;
d4a2dc67 8033 }
9ee6e8bb
PB
8034 case 0x8: /* signed multiply */
8035 case 0xa:
8036 case 0xc:
8037 case 0xe:
be5e7a76 8038 ARCH(5TE);
9ee6e8bb
PB
8039 rs = (insn >> 8) & 0xf;
8040 rn = (insn >> 12) & 0xf;
8041 rd = (insn >> 16) & 0xf;
8042 if (op1 == 1) {
8043 /* (32 * 16) >> 16 */
5e3f878a
PB
8044 tmp = load_reg(s, rm);
8045 tmp2 = load_reg(s, rs);
9ee6e8bb 8046 if (sh & 4)
5e3f878a 8047 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8048 else
5e3f878a 8049 gen_sxth(tmp2);
a7812ae4
PB
8050 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8051 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8052 tmp = tcg_temp_new_i32();
ecc7b3aa 8053 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 8054 tcg_temp_free_i64(tmp64);
9ee6e8bb 8055 if ((sh & 2) == 0) {
5e3f878a 8056 tmp2 = load_reg(s, rn);
9ef39277 8057 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8058 tcg_temp_free_i32(tmp2);
9ee6e8bb 8059 }
5e3f878a 8060 store_reg(s, rd, tmp);
9ee6e8bb
PB
8061 } else {
8062 /* 16 * 16 */
5e3f878a
PB
8063 tmp = load_reg(s, rm);
8064 tmp2 = load_reg(s, rs);
8065 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 8066 tcg_temp_free_i32(tmp2);
9ee6e8bb 8067 if (op1 == 2) {
a7812ae4
PB
8068 tmp64 = tcg_temp_new_i64();
8069 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8070 tcg_temp_free_i32(tmp);
a7812ae4
PB
8071 gen_addq(s, tmp64, rn, rd);
8072 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 8073 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8074 } else {
8075 if (op1 == 0) {
5e3f878a 8076 tmp2 = load_reg(s, rn);
9ef39277 8077 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8078 tcg_temp_free_i32(tmp2);
9ee6e8bb 8079 }
5e3f878a 8080 store_reg(s, rd, tmp);
9ee6e8bb
PB
8081 }
8082 }
8083 break;
8084 default:
8085 goto illegal_op;
8086 }
8087 } else if (((insn & 0x0e000000) == 0 &&
8088 (insn & 0x00000090) != 0x90) ||
8089 ((insn & 0x0e000000) == (1 << 25))) {
8090 int set_cc, logic_cc, shiftop;
8091
8092 op1 = (insn >> 21) & 0xf;
8093 set_cc = (insn >> 20) & 1;
8094 logic_cc = table_logic_cc[op1] & set_cc;
8095
8096 /* data processing instruction */
8097 if (insn & (1 << 25)) {
8098 /* immediate operand */
8099 val = insn & 0xff;
8100 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 8101 if (shift) {
9ee6e8bb 8102 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 8103 }
7d1b0095 8104 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
8105 tcg_gen_movi_i32(tmp2, val);
8106 if (logic_cc && shift) {
8107 gen_set_CF_bit31(tmp2);
8108 }
9ee6e8bb
PB
8109 } else {
8110 /* register */
8111 rm = (insn) & 0xf;
e9bb4aa9 8112 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8113 shiftop = (insn >> 5) & 3;
8114 if (!(insn & (1 << 4))) {
8115 shift = (insn >> 7) & 0x1f;
e9bb4aa9 8116 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
8117 } else {
8118 rs = (insn >> 8) & 0xf;
8984bd2e 8119 tmp = load_reg(s, rs);
e9bb4aa9 8120 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
8121 }
8122 }
8123 if (op1 != 0x0f && op1 != 0x0d) {
8124 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
8125 tmp = load_reg(s, rn);
8126 } else {
39d5492a 8127 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
8128 }
8129 rd = (insn >> 12) & 0xf;
8130 switch(op1) {
8131 case 0x00:
e9bb4aa9
JR
8132 tcg_gen_and_i32(tmp, tmp, tmp2);
8133 if (logic_cc) {
8134 gen_logic_CC(tmp);
8135 }
7dcc1f89 8136 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8137 break;
8138 case 0x01:
e9bb4aa9
JR
8139 tcg_gen_xor_i32(tmp, tmp, tmp2);
8140 if (logic_cc) {
8141 gen_logic_CC(tmp);
8142 }
7dcc1f89 8143 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8144 break;
8145 case 0x02:
8146 if (set_cc && rd == 15) {
8147 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 8148 if (IS_USER(s)) {
9ee6e8bb 8149 goto illegal_op;
e9bb4aa9 8150 }
72485ec4 8151 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 8152 gen_exception_return(s, tmp);
9ee6e8bb 8153 } else {
e9bb4aa9 8154 if (set_cc) {
72485ec4 8155 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8156 } else {
8157 tcg_gen_sub_i32(tmp, tmp, tmp2);
8158 }
7dcc1f89 8159 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8160 }
8161 break;
8162 case 0x03:
e9bb4aa9 8163 if (set_cc) {
72485ec4 8164 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8165 } else {
8166 tcg_gen_sub_i32(tmp, tmp2, tmp);
8167 }
7dcc1f89 8168 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8169 break;
8170 case 0x04:
e9bb4aa9 8171 if (set_cc) {
72485ec4 8172 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8173 } else {
8174 tcg_gen_add_i32(tmp, tmp, tmp2);
8175 }
7dcc1f89 8176 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8177 break;
8178 case 0x05:
e9bb4aa9 8179 if (set_cc) {
49b4c31e 8180 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8181 } else {
8182 gen_add_carry(tmp, tmp, tmp2);
8183 }
7dcc1f89 8184 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8185 break;
8186 case 0x06:
e9bb4aa9 8187 if (set_cc) {
2de68a49 8188 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8189 } else {
8190 gen_sub_carry(tmp, tmp, tmp2);
8191 }
7dcc1f89 8192 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8193 break;
8194 case 0x07:
e9bb4aa9 8195 if (set_cc) {
2de68a49 8196 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8197 } else {
8198 gen_sub_carry(tmp, tmp2, tmp);
8199 }
7dcc1f89 8200 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8201 break;
8202 case 0x08:
8203 if (set_cc) {
e9bb4aa9
JR
8204 tcg_gen_and_i32(tmp, tmp, tmp2);
8205 gen_logic_CC(tmp);
9ee6e8bb 8206 }
7d1b0095 8207 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8208 break;
8209 case 0x09:
8210 if (set_cc) {
e9bb4aa9
JR
8211 tcg_gen_xor_i32(tmp, tmp, tmp2);
8212 gen_logic_CC(tmp);
9ee6e8bb 8213 }
7d1b0095 8214 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8215 break;
8216 case 0x0a:
8217 if (set_cc) {
72485ec4 8218 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 8219 }
7d1b0095 8220 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8221 break;
8222 case 0x0b:
8223 if (set_cc) {
72485ec4 8224 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 8225 }
7d1b0095 8226 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8227 break;
8228 case 0x0c:
e9bb4aa9
JR
8229 tcg_gen_or_i32(tmp, tmp, tmp2);
8230 if (logic_cc) {
8231 gen_logic_CC(tmp);
8232 }
7dcc1f89 8233 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8234 break;
8235 case 0x0d:
8236 if (logic_cc && rd == 15) {
8237 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 8238 if (IS_USER(s)) {
9ee6e8bb 8239 goto illegal_op;
e9bb4aa9
JR
8240 }
8241 gen_exception_return(s, tmp2);
9ee6e8bb 8242 } else {
e9bb4aa9
JR
8243 if (logic_cc) {
8244 gen_logic_CC(tmp2);
8245 }
7dcc1f89 8246 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8247 }
8248 break;
8249 case 0x0e:
f669df27 8250 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
8251 if (logic_cc) {
8252 gen_logic_CC(tmp);
8253 }
7dcc1f89 8254 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8255 break;
8256 default:
8257 case 0x0f:
e9bb4aa9
JR
8258 tcg_gen_not_i32(tmp2, tmp2);
8259 if (logic_cc) {
8260 gen_logic_CC(tmp2);
8261 }
7dcc1f89 8262 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8263 break;
8264 }
e9bb4aa9 8265 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 8266 tcg_temp_free_i32(tmp2);
e9bb4aa9 8267 }
9ee6e8bb
PB
8268 } else {
8269 /* other instructions */
8270 op1 = (insn >> 24) & 0xf;
8271 switch(op1) {
8272 case 0x0:
8273 case 0x1:
8274 /* multiplies, extra load/stores */
8275 sh = (insn >> 5) & 3;
8276 if (sh == 0) {
8277 if (op1 == 0x0) {
8278 rd = (insn >> 16) & 0xf;
8279 rn = (insn >> 12) & 0xf;
8280 rs = (insn >> 8) & 0xf;
8281 rm = (insn) & 0xf;
8282 op1 = (insn >> 20) & 0xf;
8283 switch (op1) {
8284 case 0: case 1: case 2: case 3: case 6:
8285 /* 32 bit mul */
5e3f878a
PB
8286 tmp = load_reg(s, rs);
8287 tmp2 = load_reg(s, rm);
8288 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8289 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8290 if (insn & (1 << 22)) {
8291 /* Subtract (mls) */
8292 ARCH(6T2);
5e3f878a
PB
8293 tmp2 = load_reg(s, rn);
8294 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 8295 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8296 } else if (insn & (1 << 21)) {
8297 /* Add */
5e3f878a
PB
8298 tmp2 = load_reg(s, rn);
8299 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8300 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8301 }
8302 if (insn & (1 << 20))
5e3f878a
PB
8303 gen_logic_CC(tmp);
8304 store_reg(s, rd, tmp);
9ee6e8bb 8305 break;
8aac08b1
AJ
8306 case 4:
8307 /* 64 bit mul double accumulate (UMAAL) */
8308 ARCH(6);
8309 tmp = load_reg(s, rs);
8310 tmp2 = load_reg(s, rm);
8311 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8312 gen_addq_lo(s, tmp64, rn);
8313 gen_addq_lo(s, tmp64, rd);
8314 gen_storeq_reg(s, rn, rd, tmp64);
8315 tcg_temp_free_i64(tmp64);
8316 break;
8317 case 8: case 9: case 10: case 11:
8318 case 12: case 13: case 14: case 15:
8319 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
8320 tmp = load_reg(s, rs);
8321 tmp2 = load_reg(s, rm);
8aac08b1 8322 if (insn & (1 << 22)) {
c9f10124 8323 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 8324 } else {
c9f10124 8325 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
8326 }
8327 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
8328 TCGv_i32 al = load_reg(s, rn);
8329 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8330 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8331 tcg_temp_free_i32(al);
8332 tcg_temp_free_i32(ah);
9ee6e8bb 8333 }
8aac08b1 8334 if (insn & (1 << 20)) {
c9f10124 8335 gen_logicq_cc(tmp, tmp2);
8aac08b1 8336 }
c9f10124
RH
8337 store_reg(s, rn, tmp);
8338 store_reg(s, rd, tmp2);
9ee6e8bb 8339 break;
8aac08b1
AJ
8340 default:
8341 goto illegal_op;
9ee6e8bb
PB
8342 }
8343 } else {
8344 rn = (insn >> 16) & 0xf;
8345 rd = (insn >> 12) & 0xf;
8346 if (insn & (1 << 23)) {
8347 /* load/store exclusive */
2359bf80 8348 int op2 = (insn >> 8) & 3;
86753403 8349 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8350
8351 switch (op2) {
8352 case 0: /* lda/stl */
8353 if (op1 == 1) {
8354 goto illegal_op;
8355 }
8356 ARCH(8);
8357 break;
8358 case 1: /* reserved */
8359 goto illegal_op;
8360 case 2: /* ldaex/stlex */
8361 ARCH(8);
8362 break;
8363 case 3: /* ldrex/strex */
8364 if (op1) {
8365 ARCH(6K);
8366 } else {
8367 ARCH(6);
8368 }
8369 break;
8370 }
8371
3174f8e9 8372 addr = tcg_temp_local_new_i32();
98a46317 8373 load_reg_var(s, addr, rn);
2359bf80
MR
8374
8375 /* Since the emulation does not have barriers,
8376 the acquire/release semantics need no special
8377 handling */
8378 if (op2 == 0) {
8379 if (insn & (1 << 20)) {
8380 tmp = tcg_temp_new_i32();
8381 switch (op1) {
8382 case 0: /* lda */
6ce2faf4 8383 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
2359bf80
MR
8384 break;
8385 case 2: /* ldab */
6ce2faf4 8386 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
2359bf80
MR
8387 break;
8388 case 3: /* ldah */
6ce2faf4 8389 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
2359bf80
MR
8390 break;
8391 default:
8392 abort();
8393 }
8394 store_reg(s, rd, tmp);
8395 } else {
8396 rm = insn & 0xf;
8397 tmp = load_reg(s, rm);
8398 switch (op1) {
8399 case 0: /* stl */
6ce2faf4 8400 gen_aa32_st32(tmp, addr, get_mem_index(s));
2359bf80
MR
8401 break;
8402 case 2: /* stlb */
6ce2faf4 8403 gen_aa32_st8(tmp, addr, get_mem_index(s));
2359bf80
MR
8404 break;
8405 case 3: /* stlh */
6ce2faf4 8406 gen_aa32_st16(tmp, addr, get_mem_index(s));
2359bf80
MR
8407 break;
8408 default:
8409 abort();
8410 }
8411 tcg_temp_free_i32(tmp);
8412 }
8413 } else if (insn & (1 << 20)) {
86753403
PB
8414 switch (op1) {
8415 case 0: /* ldrex */
426f5abc 8416 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8417 break;
8418 case 1: /* ldrexd */
426f5abc 8419 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8420 break;
8421 case 2: /* ldrexb */
426f5abc 8422 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8423 break;
8424 case 3: /* ldrexh */
426f5abc 8425 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8426 break;
8427 default:
8428 abort();
8429 }
9ee6e8bb
PB
8430 } else {
8431 rm = insn & 0xf;
86753403
PB
8432 switch (op1) {
8433 case 0: /* strex */
426f5abc 8434 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8435 break;
8436 case 1: /* strexd */
502e64fe 8437 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
8438 break;
8439 case 2: /* strexb */
426f5abc 8440 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
8441 break;
8442 case 3: /* strexh */
426f5abc 8443 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
8444 break;
8445 default:
8446 abort();
8447 }
9ee6e8bb 8448 }
39d5492a 8449 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8450 } else {
8451 /* SWP instruction */
8452 rm = (insn) & 0xf;
8453
8984bd2e
PB
8454 /* ??? This is not really atomic. However we know
8455 we never have multiple CPUs running in parallel,
8456 so it is good enough. */
8457 addr = load_reg(s, rn);
8458 tmp = load_reg(s, rm);
5a839c0d 8459 tmp2 = tcg_temp_new_i32();
9ee6e8bb 8460 if (insn & (1 << 22)) {
6ce2faf4
EI
8461 gen_aa32_ld8u(tmp2, addr, get_mem_index(s));
8462 gen_aa32_st8(tmp, addr, get_mem_index(s));
9ee6e8bb 8463 } else {
6ce2faf4
EI
8464 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
8465 gen_aa32_st32(tmp, addr, get_mem_index(s));
9ee6e8bb 8466 }
5a839c0d 8467 tcg_temp_free_i32(tmp);
7d1b0095 8468 tcg_temp_free_i32(addr);
8984bd2e 8469 store_reg(s, rd, tmp2);
9ee6e8bb
PB
8470 }
8471 }
8472 } else {
8473 int address_offset;
3960c336
PM
8474 bool load = insn & (1 << 20);
8475 bool doubleword = false;
9ee6e8bb
PB
8476 /* Misc load/store */
8477 rn = (insn >> 16) & 0xf;
8478 rd = (insn >> 12) & 0xf;
3960c336
PM
8479
8480 if (!load && (sh & 2)) {
8481 /* doubleword */
8482 ARCH(5TE);
8483 if (rd & 1) {
8484 /* UNPREDICTABLE; we choose to UNDEF */
8485 goto illegal_op;
8486 }
8487 load = (sh & 1) == 0;
8488 doubleword = true;
8489 }
8490
b0109805 8491 addr = load_reg(s, rn);
9ee6e8bb 8492 if (insn & (1 << 24))
b0109805 8493 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb 8494 address_offset = 0;
3960c336
PM
8495
8496 if (doubleword) {
8497 if (!load) {
9ee6e8bb 8498 /* store */
b0109805 8499 tmp = load_reg(s, rd);
6ce2faf4 8500 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 8501 tcg_temp_free_i32(tmp);
b0109805
PB
8502 tcg_gen_addi_i32(addr, addr, 4);
8503 tmp = load_reg(s, rd + 1);
6ce2faf4 8504 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 8505 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8506 } else {
8507 /* load */
5a839c0d 8508 tmp = tcg_temp_new_i32();
6ce2faf4 8509 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805
PB
8510 store_reg(s, rd, tmp);
8511 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8512 tmp = tcg_temp_new_i32();
6ce2faf4 8513 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9ee6e8bb 8514 rd++;
9ee6e8bb
PB
8515 }
8516 address_offset = -4;
3960c336
PM
8517 } else if (load) {
8518 /* load */
8519 tmp = tcg_temp_new_i32();
8520 switch (sh) {
8521 case 1:
8522 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
8523 break;
8524 case 2:
8525 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
8526 break;
8527 default:
8528 case 3:
8529 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
8530 break;
8531 }
9ee6e8bb
PB
8532 } else {
8533 /* store */
b0109805 8534 tmp = load_reg(s, rd);
6ce2faf4 8535 gen_aa32_st16(tmp, addr, get_mem_index(s));
5a839c0d 8536 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8537 }
8538 /* Perform base writeback before the loaded value to
8539 ensure correct behavior with overlapping index registers.
b6af0975 8540 ldrd with base writeback is undefined if the
9ee6e8bb
PB
8541 destination and index registers overlap. */
8542 if (!(insn & (1 << 24))) {
b0109805
PB
8543 gen_add_datah_offset(s, insn, address_offset, addr);
8544 store_reg(s, rn, addr);
9ee6e8bb
PB
8545 } else if (insn & (1 << 21)) {
8546 if (address_offset)
b0109805
PB
8547 tcg_gen_addi_i32(addr, addr, address_offset);
8548 store_reg(s, rn, addr);
8549 } else {
7d1b0095 8550 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8551 }
8552 if (load) {
8553 /* Complete the load. */
b0109805 8554 store_reg(s, rd, tmp);
9ee6e8bb
PB
8555 }
8556 }
8557 break;
8558 case 0x4:
8559 case 0x5:
8560 goto do_ldst;
8561 case 0x6:
8562 case 0x7:
8563 if (insn & (1 << 4)) {
8564 ARCH(6);
8565 /* Armv6 Media instructions. */
8566 rm = insn & 0xf;
8567 rn = (insn >> 16) & 0xf;
2c0262af 8568 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
8569 rs = (insn >> 8) & 0xf;
8570 switch ((insn >> 23) & 3) {
8571 case 0: /* Parallel add/subtract. */
8572 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
8573 tmp = load_reg(s, rn);
8574 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8575 sh = (insn >> 5) & 7;
8576 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8577 goto illegal_op;
6ddbc6e4 8578 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 8579 tcg_temp_free_i32(tmp2);
6ddbc6e4 8580 store_reg(s, rd, tmp);
9ee6e8bb
PB
8581 break;
8582 case 1:
8583 if ((insn & 0x00700020) == 0) {
6c95676b 8584 /* Halfword pack. */
3670669c
PB
8585 tmp = load_reg(s, rn);
8586 tmp2 = load_reg(s, rm);
9ee6e8bb 8587 shift = (insn >> 7) & 0x1f;
3670669c
PB
8588 if (insn & (1 << 6)) {
8589 /* pkhtb */
22478e79
AZ
8590 if (shift == 0)
8591 shift = 31;
8592 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 8593 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 8594 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
8595 } else {
8596 /* pkhbt */
22478e79
AZ
8597 if (shift)
8598 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 8599 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
8600 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8601 }
8602 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8603 tcg_temp_free_i32(tmp2);
3670669c 8604 store_reg(s, rd, tmp);
9ee6e8bb
PB
8605 } else if ((insn & 0x00200020) == 0x00200000) {
8606 /* [us]sat */
6ddbc6e4 8607 tmp = load_reg(s, rm);
9ee6e8bb
PB
8608 shift = (insn >> 7) & 0x1f;
8609 if (insn & (1 << 6)) {
8610 if (shift == 0)
8611 shift = 31;
6ddbc6e4 8612 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8613 } else {
6ddbc6e4 8614 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
8615 }
8616 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8617 tmp2 = tcg_const_i32(sh);
8618 if (insn & (1 << 22))
9ef39277 8619 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 8620 else
9ef39277 8621 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 8622 tcg_temp_free_i32(tmp2);
6ddbc6e4 8623 store_reg(s, rd, tmp);
9ee6e8bb
PB
8624 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8625 /* [us]sat16 */
6ddbc6e4 8626 tmp = load_reg(s, rm);
9ee6e8bb 8627 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8628 tmp2 = tcg_const_i32(sh);
8629 if (insn & (1 << 22))
9ef39277 8630 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8631 else
9ef39277 8632 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8633 tcg_temp_free_i32(tmp2);
6ddbc6e4 8634 store_reg(s, rd, tmp);
9ee6e8bb
PB
8635 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8636 /* Select bytes. */
6ddbc6e4
PB
8637 tmp = load_reg(s, rn);
8638 tmp2 = load_reg(s, rm);
7d1b0095 8639 tmp3 = tcg_temp_new_i32();
0ecb72a5 8640 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 8641 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8642 tcg_temp_free_i32(tmp3);
8643 tcg_temp_free_i32(tmp2);
6ddbc6e4 8644 store_reg(s, rd, tmp);
9ee6e8bb 8645 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 8646 tmp = load_reg(s, rm);
9ee6e8bb 8647 shift = (insn >> 10) & 3;
1301f322 8648 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8649 rotate, a shift is sufficient. */
8650 if (shift != 0)
f669df27 8651 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8652 op1 = (insn >> 20) & 7;
8653 switch (op1) {
5e3f878a
PB
8654 case 0: gen_sxtb16(tmp); break;
8655 case 2: gen_sxtb(tmp); break;
8656 case 3: gen_sxth(tmp); break;
8657 case 4: gen_uxtb16(tmp); break;
8658 case 6: gen_uxtb(tmp); break;
8659 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
8660 default: goto illegal_op;
8661 }
8662 if (rn != 15) {
5e3f878a 8663 tmp2 = load_reg(s, rn);
9ee6e8bb 8664 if ((op1 & 3) == 0) {
5e3f878a 8665 gen_add16(tmp, tmp2);
9ee6e8bb 8666 } else {
5e3f878a 8667 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8668 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8669 }
8670 }
6c95676b 8671 store_reg(s, rd, tmp);
9ee6e8bb
PB
8672 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
8673 /* rev */
b0109805 8674 tmp = load_reg(s, rm);
9ee6e8bb
PB
8675 if (insn & (1 << 22)) {
8676 if (insn & (1 << 7)) {
b0109805 8677 gen_revsh(tmp);
9ee6e8bb
PB
8678 } else {
8679 ARCH(6T2);
b0109805 8680 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8681 }
8682 } else {
8683 if (insn & (1 << 7))
b0109805 8684 gen_rev16(tmp);
9ee6e8bb 8685 else
66896cb8 8686 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 8687 }
b0109805 8688 store_reg(s, rd, tmp);
9ee6e8bb
PB
8689 } else {
8690 goto illegal_op;
8691 }
8692 break;
8693 case 2: /* Multiplies (Type 3). */
41e9564d
PM
8694 switch ((insn >> 20) & 0x7) {
8695 case 5:
8696 if (((insn >> 6) ^ (insn >> 7)) & 1) {
8697 /* op2 not 00x or 11x : UNDEF */
8698 goto illegal_op;
8699 }
838fa72d
AJ
8700 /* Signed multiply most significant [accumulate].
8701 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
8702 tmp = load_reg(s, rm);
8703 tmp2 = load_reg(s, rs);
a7812ae4 8704 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 8705
955a7dd5 8706 if (rd != 15) {
838fa72d 8707 tmp = load_reg(s, rd);
9ee6e8bb 8708 if (insn & (1 << 6)) {
838fa72d 8709 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 8710 } else {
838fa72d 8711 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
8712 }
8713 }
838fa72d
AJ
8714 if (insn & (1 << 5)) {
8715 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8716 }
8717 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8718 tmp = tcg_temp_new_i32();
ecc7b3aa 8719 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 8720 tcg_temp_free_i64(tmp64);
955a7dd5 8721 store_reg(s, rn, tmp);
41e9564d
PM
8722 break;
8723 case 0:
8724 case 4:
8725 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8726 if (insn & (1 << 7)) {
8727 goto illegal_op;
8728 }
8729 tmp = load_reg(s, rm);
8730 tmp2 = load_reg(s, rs);
9ee6e8bb 8731 if (insn & (1 << 5))
5e3f878a
PB
8732 gen_swap_half(tmp2);
8733 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8734 if (insn & (1 << 22)) {
5e3f878a 8735 /* smlald, smlsld */
33bbd75a
PC
8736 TCGv_i64 tmp64_2;
8737
a7812ae4 8738 tmp64 = tcg_temp_new_i64();
33bbd75a 8739 tmp64_2 = tcg_temp_new_i64();
a7812ae4 8740 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 8741 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 8742 tcg_temp_free_i32(tmp);
33bbd75a
PC
8743 tcg_temp_free_i32(tmp2);
8744 if (insn & (1 << 6)) {
8745 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
8746 } else {
8747 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
8748 }
8749 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
8750 gen_addq(s, tmp64, rd, rn);
8751 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 8752 tcg_temp_free_i64(tmp64);
9ee6e8bb 8753 } else {
5e3f878a 8754 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
8755 if (insn & (1 << 6)) {
8756 /* This subtraction cannot overflow. */
8757 tcg_gen_sub_i32(tmp, tmp, tmp2);
8758 } else {
8759 /* This addition cannot overflow 32 bits;
8760 * however it may overflow considered as a
8761 * signed operation, in which case we must set
8762 * the Q flag.
8763 */
8764 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8765 }
8766 tcg_temp_free_i32(tmp2);
22478e79 8767 if (rd != 15)
9ee6e8bb 8768 {
22478e79 8769 tmp2 = load_reg(s, rd);
9ef39277 8770 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8771 tcg_temp_free_i32(tmp2);
9ee6e8bb 8772 }
22478e79 8773 store_reg(s, rn, tmp);
9ee6e8bb 8774 }
41e9564d 8775 break;
b8b8ea05
PM
8776 case 1:
8777 case 3:
8778 /* SDIV, UDIV */
d614a513 8779 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
b8b8ea05
PM
8780 goto illegal_op;
8781 }
8782 if (((insn >> 5) & 7) || (rd != 15)) {
8783 goto illegal_op;
8784 }
8785 tmp = load_reg(s, rm);
8786 tmp2 = load_reg(s, rs);
8787 if (insn & (1 << 21)) {
8788 gen_helper_udiv(tmp, tmp, tmp2);
8789 } else {
8790 gen_helper_sdiv(tmp, tmp, tmp2);
8791 }
8792 tcg_temp_free_i32(tmp2);
8793 store_reg(s, rn, tmp);
8794 break;
41e9564d
PM
8795 default:
8796 goto illegal_op;
9ee6e8bb
PB
8797 }
8798 break;
8799 case 3:
8800 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
8801 switch (op1) {
8802 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
8803 ARCH(6);
8804 tmp = load_reg(s, rm);
8805 tmp2 = load_reg(s, rs);
8806 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8807 tcg_temp_free_i32(tmp2);
ded9d295
AZ
8808 if (rd != 15) {
8809 tmp2 = load_reg(s, rd);
6ddbc6e4 8810 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8811 tcg_temp_free_i32(tmp2);
9ee6e8bb 8812 }
ded9d295 8813 store_reg(s, rn, tmp);
9ee6e8bb
PB
8814 break;
8815 case 0x20: case 0x24: case 0x28: case 0x2c:
8816 /* Bitfield insert/clear. */
8817 ARCH(6T2);
8818 shift = (insn >> 7) & 0x1f;
8819 i = (insn >> 16) & 0x1f;
45140a57
KB
8820 if (i < shift) {
8821 /* UNPREDICTABLE; we choose to UNDEF */
8822 goto illegal_op;
8823 }
9ee6e8bb
PB
8824 i = i + 1 - shift;
8825 if (rm == 15) {
7d1b0095 8826 tmp = tcg_temp_new_i32();
5e3f878a 8827 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 8828 } else {
5e3f878a 8829 tmp = load_reg(s, rm);
9ee6e8bb
PB
8830 }
8831 if (i != 32) {
5e3f878a 8832 tmp2 = load_reg(s, rd);
d593c48e 8833 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 8834 tcg_temp_free_i32(tmp2);
9ee6e8bb 8835 }
5e3f878a 8836 store_reg(s, rd, tmp);
9ee6e8bb
PB
8837 break;
8838 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
8839 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 8840 ARCH(6T2);
5e3f878a 8841 tmp = load_reg(s, rm);
9ee6e8bb
PB
8842 shift = (insn >> 7) & 0x1f;
8843 i = ((insn >> 16) & 0x1f) + 1;
8844 if (shift + i > 32)
8845 goto illegal_op;
8846 if (i < 32) {
8847 if (op1 & 0x20) {
5e3f878a 8848 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 8849 } else {
5e3f878a 8850 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
8851 }
8852 }
5e3f878a 8853 store_reg(s, rd, tmp);
9ee6e8bb
PB
8854 break;
8855 default:
8856 goto illegal_op;
8857 }
8858 break;
8859 }
8860 break;
8861 }
8862 do_ldst:
8863 /* Check for undefined extension instructions
8864 * per the ARM Bible IE:
8865 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
8866 */
8867 sh = (0xf << 20) | (0xf << 4);
8868 if (op1 == 0x7 && ((insn & sh) == sh))
8869 {
8870 goto illegal_op;
8871 }
8872 /* load/store byte/word */
8873 rn = (insn >> 16) & 0xf;
8874 rd = (insn >> 12) & 0xf;
b0109805 8875 tmp2 = load_reg(s, rn);
a99caa48
PM
8876 if ((insn & 0x01200000) == 0x00200000) {
8877 /* ldrt/strt */
579d21cc 8878 i = get_a32_user_mem_index(s);
a99caa48
PM
8879 } else {
8880 i = get_mem_index(s);
8881 }
9ee6e8bb 8882 if (insn & (1 << 24))
b0109805 8883 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
8884 if (insn & (1 << 20)) {
8885 /* load */
5a839c0d 8886 tmp = tcg_temp_new_i32();
9ee6e8bb 8887 if (insn & (1 << 22)) {
08307563 8888 gen_aa32_ld8u(tmp, tmp2, i);
9ee6e8bb 8889 } else {
08307563 8890 gen_aa32_ld32u(tmp, tmp2, i);
9ee6e8bb 8891 }
9ee6e8bb
PB
8892 } else {
8893 /* store */
b0109805 8894 tmp = load_reg(s, rd);
5a839c0d 8895 if (insn & (1 << 22)) {
08307563 8896 gen_aa32_st8(tmp, tmp2, i);
5a839c0d 8897 } else {
08307563 8898 gen_aa32_st32(tmp, tmp2, i);
5a839c0d
PM
8899 }
8900 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8901 }
8902 if (!(insn & (1 << 24))) {
b0109805
PB
8903 gen_add_data_offset(s, insn, tmp2);
8904 store_reg(s, rn, tmp2);
8905 } else if (insn & (1 << 21)) {
8906 store_reg(s, rn, tmp2);
8907 } else {
7d1b0095 8908 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8909 }
8910 if (insn & (1 << 20)) {
8911 /* Complete the load. */
7dcc1f89 8912 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
8913 }
8914 break;
8915 case 0x08:
8916 case 0x09:
8917 {
da3e53dd
PM
8918 int j, n, loaded_base;
8919 bool exc_return = false;
8920 bool is_load = extract32(insn, 20, 1);
8921 bool user = false;
39d5492a 8922 TCGv_i32 loaded_var;
9ee6e8bb
PB
8923 /* load/store multiple words */
8924 /* XXX: store correct base if write back */
9ee6e8bb 8925 if (insn & (1 << 22)) {
da3e53dd 8926 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
8927 if (IS_USER(s))
8928 goto illegal_op; /* only usable in supervisor mode */
8929
da3e53dd
PM
8930 if (is_load && extract32(insn, 15, 1)) {
8931 exc_return = true;
8932 } else {
8933 user = true;
8934 }
9ee6e8bb
PB
8935 }
8936 rn = (insn >> 16) & 0xf;
b0109805 8937 addr = load_reg(s, rn);
9ee6e8bb
PB
8938
8939 /* compute total size */
8940 loaded_base = 0;
39d5492a 8941 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
8942 n = 0;
8943 for(i=0;i<16;i++) {
8944 if (insn & (1 << i))
8945 n++;
8946 }
8947 /* XXX: test invalid n == 0 case ? */
8948 if (insn & (1 << 23)) {
8949 if (insn & (1 << 24)) {
8950 /* pre increment */
b0109805 8951 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8952 } else {
8953 /* post increment */
8954 }
8955 } else {
8956 if (insn & (1 << 24)) {
8957 /* pre decrement */
b0109805 8958 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
8959 } else {
8960 /* post decrement */
8961 if (n != 1)
b0109805 8962 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
8963 }
8964 }
8965 j = 0;
8966 for(i=0;i<16;i++) {
8967 if (insn & (1 << i)) {
da3e53dd 8968 if (is_load) {
9ee6e8bb 8969 /* load */
5a839c0d 8970 tmp = tcg_temp_new_i32();
6ce2faf4 8971 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
be5e7a76 8972 if (user) {
b75263d6 8973 tmp2 = tcg_const_i32(i);
1ce94f81 8974 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 8975 tcg_temp_free_i32(tmp2);
7d1b0095 8976 tcg_temp_free_i32(tmp);
9ee6e8bb 8977 } else if (i == rn) {
b0109805 8978 loaded_var = tmp;
9ee6e8bb
PB
8979 loaded_base = 1;
8980 } else {
7dcc1f89 8981 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
8982 }
8983 } else {
8984 /* store */
8985 if (i == 15) {
8986 /* special case: r15 = PC + 8 */
8987 val = (long)s->pc + 4;
7d1b0095 8988 tmp = tcg_temp_new_i32();
b0109805 8989 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 8990 } else if (user) {
7d1b0095 8991 tmp = tcg_temp_new_i32();
b75263d6 8992 tmp2 = tcg_const_i32(i);
9ef39277 8993 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 8994 tcg_temp_free_i32(tmp2);
9ee6e8bb 8995 } else {
b0109805 8996 tmp = load_reg(s, i);
9ee6e8bb 8997 }
6ce2faf4 8998 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 8999 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9000 }
9001 j++;
9002 /* no need to add after the last transfer */
9003 if (j != n)
b0109805 9004 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9005 }
9006 }
9007 if (insn & (1 << 21)) {
9008 /* write back */
9009 if (insn & (1 << 23)) {
9010 if (insn & (1 << 24)) {
9011 /* pre increment */
9012 } else {
9013 /* post increment */
b0109805 9014 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9015 }
9016 } else {
9017 if (insn & (1 << 24)) {
9018 /* pre decrement */
9019 if (n != 1)
b0109805 9020 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9021 } else {
9022 /* post decrement */
b0109805 9023 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9024 }
9025 }
b0109805
PB
9026 store_reg(s, rn, addr);
9027 } else {
7d1b0095 9028 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9029 }
9030 if (loaded_base) {
b0109805 9031 store_reg(s, rn, loaded_var);
9ee6e8bb 9032 }
da3e53dd 9033 if (exc_return) {
9ee6e8bb 9034 /* Restore CPSR from SPSR. */
d9ba4830 9035 tmp = load_cpu_field(spsr);
4051e12c 9036 gen_set_cpsr(tmp, CPSR_ERET_MASK);
7d1b0095 9037 tcg_temp_free_i32(tmp);
577bf808 9038 s->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
9039 }
9040 }
9041 break;
9042 case 0xa:
9043 case 0xb:
9044 {
9045 int32_t offset;
9046
9047 /* branch (and link) */
9048 val = (int32_t)s->pc;
9049 if (insn & (1 << 24)) {
7d1b0095 9050 tmp = tcg_temp_new_i32();
5e3f878a
PB
9051 tcg_gen_movi_i32(tmp, val);
9052 store_reg(s, 14, tmp);
9ee6e8bb 9053 }
534df156
PM
9054 offset = sextract32(insn << 2, 0, 26);
9055 val += offset + 4;
9ee6e8bb
PB
9056 gen_jmp(s, val);
9057 }
9058 break;
9059 case 0xc:
9060 case 0xd:
9061 case 0xe:
6a57f3eb
WN
9062 if (((insn >> 8) & 0xe) == 10) {
9063 /* VFP. */
7dcc1f89 9064 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9065 goto illegal_op;
9066 }
7dcc1f89 9067 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 9068 /* Coprocessor. */
9ee6e8bb 9069 goto illegal_op;
6a57f3eb 9070 }
9ee6e8bb
PB
9071 break;
9072 case 0xf:
9073 /* swi */
eaed129d 9074 gen_set_pc_im(s, s->pc);
d4a2dc67 9075 s->svc_imm = extract32(insn, 0, 24);
9ee6e8bb
PB
9076 s->is_jmp = DISAS_SWI;
9077 break;
9078 default:
9079 illegal_op:
73710361
GB
9080 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9081 default_exception_el(s));
9ee6e8bb
PB
9082 break;
9083 }
9084 }
9085}
9086
9087/* Return true if this is a Thumb-2 logical op. */
9088static int
9089thumb2_logic_op(int op)
9090{
9091 return (op < 8);
9092}
9093
9094/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9095 then set condition code flags based on the result of the operation.
9096 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9097 to the high bit of T1.
9098 Returns zero if the opcode is valid. */
9099
9100static int
39d5492a
PM
9101gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9102 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
9103{
9104 int logic_cc;
9105
9106 logic_cc = 0;
9107 switch (op) {
9108 case 0: /* and */
396e467c 9109 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
9110 logic_cc = conds;
9111 break;
9112 case 1: /* bic */
f669df27 9113 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
9114 logic_cc = conds;
9115 break;
9116 case 2: /* orr */
396e467c 9117 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
9118 logic_cc = conds;
9119 break;
9120 case 3: /* orn */
29501f1b 9121 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
9122 logic_cc = conds;
9123 break;
9124 case 4: /* eor */
396e467c 9125 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
9126 logic_cc = conds;
9127 break;
9128 case 8: /* add */
9129 if (conds)
72485ec4 9130 gen_add_CC(t0, t0, t1);
9ee6e8bb 9131 else
396e467c 9132 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
9133 break;
9134 case 10: /* adc */
9135 if (conds)
49b4c31e 9136 gen_adc_CC(t0, t0, t1);
9ee6e8bb 9137 else
396e467c 9138 gen_adc(t0, t1);
9ee6e8bb
PB
9139 break;
9140 case 11: /* sbc */
2de68a49
RH
9141 if (conds) {
9142 gen_sbc_CC(t0, t0, t1);
9143 } else {
396e467c 9144 gen_sub_carry(t0, t0, t1);
2de68a49 9145 }
9ee6e8bb
PB
9146 break;
9147 case 13: /* sub */
9148 if (conds)
72485ec4 9149 gen_sub_CC(t0, t0, t1);
9ee6e8bb 9150 else
396e467c 9151 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
9152 break;
9153 case 14: /* rsb */
9154 if (conds)
72485ec4 9155 gen_sub_CC(t0, t1, t0);
9ee6e8bb 9156 else
396e467c 9157 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
9158 break;
9159 default: /* 5, 6, 7, 9, 12, 15. */
9160 return 1;
9161 }
9162 if (logic_cc) {
396e467c 9163 gen_logic_CC(t0);
9ee6e8bb 9164 if (shifter_out)
396e467c 9165 gen_set_CF_bit31(t1);
9ee6e8bb
PB
9166 }
9167 return 0;
9168}
9169
9170/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9171 is not legal. */
0ecb72a5 9172static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 9173{
b0109805 9174 uint32_t insn, imm, shift, offset;
9ee6e8bb 9175 uint32_t rd, rn, rm, rs;
39d5492a
PM
9176 TCGv_i32 tmp;
9177 TCGv_i32 tmp2;
9178 TCGv_i32 tmp3;
9179 TCGv_i32 addr;
a7812ae4 9180 TCGv_i64 tmp64;
9ee6e8bb
PB
9181 int op;
9182 int shiftop;
9183 int conds;
9184 int logic_cc;
9185
d614a513
PM
9186 if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
9187 || arm_dc_feature(s, ARM_FEATURE_M))) {
601d70b9 9188 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
9189 16-bit instructions to get correct prefetch abort behavior. */
9190 insn = insn_hw1;
9191 if ((insn & (1 << 12)) == 0) {
be5e7a76 9192 ARCH(5);
9ee6e8bb
PB
9193 /* Second half of blx. */
9194 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
9195 tmp = load_reg(s, 14);
9196 tcg_gen_addi_i32(tmp, tmp, offset);
9197 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 9198
7d1b0095 9199 tmp2 = tcg_temp_new_i32();
b0109805 9200 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9201 store_reg(s, 14, tmp2);
9202 gen_bx(s, tmp);
9ee6e8bb
PB
9203 return 0;
9204 }
9205 if (insn & (1 << 11)) {
9206 /* Second half of bl. */
9207 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 9208 tmp = load_reg(s, 14);
6a0d8a1d 9209 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 9210
7d1b0095 9211 tmp2 = tcg_temp_new_i32();
b0109805 9212 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9213 store_reg(s, 14, tmp2);
9214 gen_bx(s, tmp);
9ee6e8bb
PB
9215 return 0;
9216 }
9217 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9218 /* Instruction spans a page boundary. Implement it as two
9219 16-bit instructions in case the second half causes an
9220 prefetch abort. */
9221 offset = ((int32_t)insn << 21) >> 9;
396e467c 9222 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
9223 return 0;
9224 }
9225 /* Fall through to 32-bit decode. */
9226 }
9227
d31dd73e 9228 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
9229 s->pc += 2;
9230 insn |= (uint32_t)insn_hw1 << 16;
9231
9232 if ((insn & 0xf800e800) != 0xf000e800) {
9233 ARCH(6T2);
9234 }
9235
9236 rn = (insn >> 16) & 0xf;
9237 rs = (insn >> 12) & 0xf;
9238 rd = (insn >> 8) & 0xf;
9239 rm = insn & 0xf;
9240 switch ((insn >> 25) & 0xf) {
9241 case 0: case 1: case 2: case 3:
9242 /* 16-bit instructions. Should never happen. */
9243 abort();
9244 case 4:
9245 if (insn & (1 << 22)) {
9246 /* Other load/store, table branch. */
9247 if (insn & 0x01200000) {
9248 /* Load/store doubleword. */
9249 if (rn == 15) {
7d1b0095 9250 addr = tcg_temp_new_i32();
b0109805 9251 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 9252 } else {
b0109805 9253 addr = load_reg(s, rn);
9ee6e8bb
PB
9254 }
9255 offset = (insn & 0xff) * 4;
9256 if ((insn & (1 << 23)) == 0)
9257 offset = -offset;
9258 if (insn & (1 << 24)) {
b0109805 9259 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
9260 offset = 0;
9261 }
9262 if (insn & (1 << 20)) {
9263 /* ldrd */
e2592fad 9264 tmp = tcg_temp_new_i32();
6ce2faf4 9265 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805
PB
9266 store_reg(s, rs, tmp);
9267 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9268 tmp = tcg_temp_new_i32();
6ce2faf4 9269 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 9270 store_reg(s, rd, tmp);
9ee6e8bb
PB
9271 } else {
9272 /* strd */
b0109805 9273 tmp = load_reg(s, rs);
6ce2faf4 9274 gen_aa32_st32(tmp, addr, get_mem_index(s));
e2592fad 9275 tcg_temp_free_i32(tmp);
b0109805
PB
9276 tcg_gen_addi_i32(addr, addr, 4);
9277 tmp = load_reg(s, rd);
6ce2faf4 9278 gen_aa32_st32(tmp, addr, get_mem_index(s));
e2592fad 9279 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9280 }
9281 if (insn & (1 << 21)) {
9282 /* Base writeback. */
9283 if (rn == 15)
9284 goto illegal_op;
b0109805
PB
9285 tcg_gen_addi_i32(addr, addr, offset - 4);
9286 store_reg(s, rn, addr);
9287 } else {
7d1b0095 9288 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9289 }
9290 } else if ((insn & (1 << 23)) == 0) {
9291 /* Load/store exclusive word. */
39d5492a 9292 addr = tcg_temp_local_new_i32();
98a46317 9293 load_reg_var(s, addr, rn);
426f5abc 9294 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 9295 if (insn & (1 << 20)) {
426f5abc 9296 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 9297 } else {
426f5abc 9298 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 9299 }
39d5492a 9300 tcg_temp_free_i32(addr);
2359bf80 9301 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
9302 /* Table Branch. */
9303 if (rn == 15) {
7d1b0095 9304 addr = tcg_temp_new_i32();
b0109805 9305 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 9306 } else {
b0109805 9307 addr = load_reg(s, rn);
9ee6e8bb 9308 }
b26eefb6 9309 tmp = load_reg(s, rm);
b0109805 9310 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
9311 if (insn & (1 << 4)) {
9312 /* tbh */
b0109805 9313 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9314 tcg_temp_free_i32(tmp);
e2592fad 9315 tmp = tcg_temp_new_i32();
6ce2faf4 9316 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9ee6e8bb 9317 } else { /* tbb */
7d1b0095 9318 tcg_temp_free_i32(tmp);
e2592fad 9319 tmp = tcg_temp_new_i32();
6ce2faf4 9320 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9ee6e8bb 9321 }
7d1b0095 9322 tcg_temp_free_i32(addr);
b0109805
PB
9323 tcg_gen_shli_i32(tmp, tmp, 1);
9324 tcg_gen_addi_i32(tmp, tmp, s->pc);
9325 store_reg(s, 15, tmp);
9ee6e8bb 9326 } else {
2359bf80 9327 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 9328 op = (insn >> 4) & 0x3;
2359bf80
MR
9329 switch (op2) {
9330 case 0:
426f5abc 9331 goto illegal_op;
2359bf80
MR
9332 case 1:
9333 /* Load/store exclusive byte/halfword/doubleword */
9334 if (op == 2) {
9335 goto illegal_op;
9336 }
9337 ARCH(7);
9338 break;
9339 case 2:
9340 /* Load-acquire/store-release */
9341 if (op == 3) {
9342 goto illegal_op;
9343 }
9344 /* Fall through */
9345 case 3:
9346 /* Load-acquire/store-release exclusive */
9347 ARCH(8);
9348 break;
426f5abc 9349 }
39d5492a 9350 addr = tcg_temp_local_new_i32();
98a46317 9351 load_reg_var(s, addr, rn);
2359bf80
MR
9352 if (!(op2 & 1)) {
9353 if (insn & (1 << 20)) {
9354 tmp = tcg_temp_new_i32();
9355 switch (op) {
9356 case 0: /* ldab */
6ce2faf4 9357 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
2359bf80
MR
9358 break;
9359 case 1: /* ldah */
6ce2faf4 9360 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
2359bf80
MR
9361 break;
9362 case 2: /* lda */
6ce2faf4 9363 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
2359bf80
MR
9364 break;
9365 default:
9366 abort();
9367 }
9368 store_reg(s, rs, tmp);
9369 } else {
9370 tmp = load_reg(s, rs);
9371 switch (op) {
9372 case 0: /* stlb */
6ce2faf4 9373 gen_aa32_st8(tmp, addr, get_mem_index(s));
2359bf80
MR
9374 break;
9375 case 1: /* stlh */
6ce2faf4 9376 gen_aa32_st16(tmp, addr, get_mem_index(s));
2359bf80
MR
9377 break;
9378 case 2: /* stl */
6ce2faf4 9379 gen_aa32_st32(tmp, addr, get_mem_index(s));
2359bf80
MR
9380 break;
9381 default:
9382 abort();
9383 }
9384 tcg_temp_free_i32(tmp);
9385 }
9386 } else if (insn & (1 << 20)) {
426f5abc 9387 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 9388 } else {
426f5abc 9389 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 9390 }
39d5492a 9391 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9392 }
9393 } else {
9394 /* Load/store multiple, RFE, SRS. */
9395 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 9396 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 9397 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 9398 goto illegal_op;
00115976 9399 }
9ee6e8bb
PB
9400 if (insn & (1 << 20)) {
9401 /* rfe */
b0109805
PB
9402 addr = load_reg(s, rn);
9403 if ((insn & (1 << 24)) == 0)
9404 tcg_gen_addi_i32(addr, addr, -8);
9405 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 9406 tmp = tcg_temp_new_i32();
6ce2faf4 9407 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 9408 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9409 tmp2 = tcg_temp_new_i32();
6ce2faf4 9410 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9411 if (insn & (1 << 21)) {
9412 /* Base writeback. */
b0109805
PB
9413 if (insn & (1 << 24)) {
9414 tcg_gen_addi_i32(addr, addr, 4);
9415 } else {
9416 tcg_gen_addi_i32(addr, addr, -4);
9417 }
9418 store_reg(s, rn, addr);
9419 } else {
7d1b0095 9420 tcg_temp_free_i32(addr);
9ee6e8bb 9421 }
b0109805 9422 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
9423 } else {
9424 /* srs */
81465888
PM
9425 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9426 insn & (1 << 21));
9ee6e8bb
PB
9427 }
9428 } else {
5856d44e 9429 int i, loaded_base = 0;
39d5492a 9430 TCGv_i32 loaded_var;
9ee6e8bb 9431 /* Load/store multiple. */
b0109805 9432 addr = load_reg(s, rn);
9ee6e8bb
PB
9433 offset = 0;
9434 for (i = 0; i < 16; i++) {
9435 if (insn & (1 << i))
9436 offset += 4;
9437 }
9438 if (insn & (1 << 24)) {
b0109805 9439 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9440 }
9441
39d5492a 9442 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9443 for (i = 0; i < 16; i++) {
9444 if ((insn & (1 << i)) == 0)
9445 continue;
9446 if (insn & (1 << 20)) {
9447 /* Load. */
e2592fad 9448 tmp = tcg_temp_new_i32();
6ce2faf4 9449 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9ee6e8bb 9450 if (i == 15) {
b0109805 9451 gen_bx(s, tmp);
5856d44e
YO
9452 } else if (i == rn) {
9453 loaded_var = tmp;
9454 loaded_base = 1;
9ee6e8bb 9455 } else {
b0109805 9456 store_reg(s, i, tmp);
9ee6e8bb
PB
9457 }
9458 } else {
9459 /* Store. */
b0109805 9460 tmp = load_reg(s, i);
6ce2faf4 9461 gen_aa32_st32(tmp, addr, get_mem_index(s));
e2592fad 9462 tcg_temp_free_i32(tmp);
9ee6e8bb 9463 }
b0109805 9464 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 9465 }
5856d44e
YO
9466 if (loaded_base) {
9467 store_reg(s, rn, loaded_var);
9468 }
9ee6e8bb
PB
9469 if (insn & (1 << 21)) {
9470 /* Base register writeback. */
9471 if (insn & (1 << 24)) {
b0109805 9472 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9473 }
9474 /* Fault if writeback register is in register list. */
9475 if (insn & (1 << rn))
9476 goto illegal_op;
b0109805
PB
9477 store_reg(s, rn, addr);
9478 } else {
7d1b0095 9479 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9480 }
9481 }
9482 }
9483 break;
2af9ab77
JB
9484 case 5:
9485
9ee6e8bb 9486 op = (insn >> 21) & 0xf;
2af9ab77 9487 if (op == 6) {
62b44f05
AR
9488 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9489 goto illegal_op;
9490 }
2af9ab77
JB
9491 /* Halfword pack. */
9492 tmp = load_reg(s, rn);
9493 tmp2 = load_reg(s, rm);
9494 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9495 if (insn & (1 << 5)) {
9496 /* pkhtb */
9497 if (shift == 0)
9498 shift = 31;
9499 tcg_gen_sari_i32(tmp2, tmp2, shift);
9500 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9501 tcg_gen_ext16u_i32(tmp2, tmp2);
9502 } else {
9503 /* pkhbt */
9504 if (shift)
9505 tcg_gen_shli_i32(tmp2, tmp2, shift);
9506 tcg_gen_ext16u_i32(tmp, tmp);
9507 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9508 }
9509 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9510 tcg_temp_free_i32(tmp2);
3174f8e9
FN
9511 store_reg(s, rd, tmp);
9512 } else {
2af9ab77
JB
9513 /* Data processing register constant shift. */
9514 if (rn == 15) {
7d1b0095 9515 tmp = tcg_temp_new_i32();
2af9ab77
JB
9516 tcg_gen_movi_i32(tmp, 0);
9517 } else {
9518 tmp = load_reg(s, rn);
9519 }
9520 tmp2 = load_reg(s, rm);
9521
9522 shiftop = (insn >> 4) & 3;
9523 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9524 conds = (insn & (1 << 20)) != 0;
9525 logic_cc = (conds && thumb2_logic_op(op));
9526 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9527 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9528 goto illegal_op;
7d1b0095 9529 tcg_temp_free_i32(tmp2);
2af9ab77
JB
9530 if (rd != 15) {
9531 store_reg(s, rd, tmp);
9532 } else {
7d1b0095 9533 tcg_temp_free_i32(tmp);
2af9ab77 9534 }
3174f8e9 9535 }
9ee6e8bb
PB
9536 break;
9537 case 13: /* Misc data processing. */
9538 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9539 if (op < 4 && (insn & 0xf000) != 0xf000)
9540 goto illegal_op;
9541 switch (op) {
9542 case 0: /* Register controlled shift. */
8984bd2e
PB
9543 tmp = load_reg(s, rn);
9544 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9545 if ((insn & 0x70) != 0)
9546 goto illegal_op;
9547 op = (insn >> 21) & 3;
8984bd2e
PB
9548 logic_cc = (insn & (1 << 20)) != 0;
9549 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9550 if (logic_cc)
9551 gen_logic_CC(tmp);
7dcc1f89 9552 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9553 break;
9554 case 1: /* Sign/zero extend. */
62b44f05
AR
9555 op = (insn >> 20) & 7;
9556 switch (op) {
9557 case 0: /* SXTAH, SXTH */
9558 case 1: /* UXTAH, UXTH */
9559 case 4: /* SXTAB, SXTB */
9560 case 5: /* UXTAB, UXTB */
9561 break;
9562 case 2: /* SXTAB16, SXTB16 */
9563 case 3: /* UXTAB16, UXTB16 */
9564 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9565 goto illegal_op;
9566 }
9567 break;
9568 default:
9569 goto illegal_op;
9570 }
9571 if (rn != 15) {
9572 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9573 goto illegal_op;
9574 }
9575 }
5e3f878a 9576 tmp = load_reg(s, rm);
9ee6e8bb 9577 shift = (insn >> 4) & 3;
1301f322 9578 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9579 rotate, a shift is sufficient. */
9580 if (shift != 0)
f669df27 9581 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9582 op = (insn >> 20) & 7;
9583 switch (op) {
5e3f878a
PB
9584 case 0: gen_sxth(tmp); break;
9585 case 1: gen_uxth(tmp); break;
9586 case 2: gen_sxtb16(tmp); break;
9587 case 3: gen_uxtb16(tmp); break;
9588 case 4: gen_sxtb(tmp); break;
9589 case 5: gen_uxtb(tmp); break;
62b44f05
AR
9590 default:
9591 g_assert_not_reached();
9ee6e8bb
PB
9592 }
9593 if (rn != 15) {
5e3f878a 9594 tmp2 = load_reg(s, rn);
9ee6e8bb 9595 if ((op >> 1) == 1) {
5e3f878a 9596 gen_add16(tmp, tmp2);
9ee6e8bb 9597 } else {
5e3f878a 9598 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9599 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9600 }
9601 }
5e3f878a 9602 store_reg(s, rd, tmp);
9ee6e8bb
PB
9603 break;
9604 case 2: /* SIMD add/subtract. */
62b44f05
AR
9605 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9606 goto illegal_op;
9607 }
9ee6e8bb
PB
9608 op = (insn >> 20) & 7;
9609 shift = (insn >> 4) & 7;
9610 if ((op & 3) == 3 || (shift & 3) == 3)
9611 goto illegal_op;
6ddbc6e4
PB
9612 tmp = load_reg(s, rn);
9613 tmp2 = load_reg(s, rm);
9614 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 9615 tcg_temp_free_i32(tmp2);
6ddbc6e4 9616 store_reg(s, rd, tmp);
9ee6e8bb
PB
9617 break;
9618 case 3: /* Other data processing. */
9619 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9620 if (op < 4) {
9621 /* Saturating add/subtract. */
62b44f05
AR
9622 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9623 goto illegal_op;
9624 }
d9ba4830
PB
9625 tmp = load_reg(s, rn);
9626 tmp2 = load_reg(s, rm);
9ee6e8bb 9627 if (op & 1)
9ef39277 9628 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 9629 if (op & 2)
9ef39277 9630 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 9631 else
9ef39277 9632 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 9633 tcg_temp_free_i32(tmp2);
9ee6e8bb 9634 } else {
62b44f05
AR
9635 switch (op) {
9636 case 0x0a: /* rbit */
9637 case 0x08: /* rev */
9638 case 0x09: /* rev16 */
9639 case 0x0b: /* revsh */
9640 case 0x18: /* clz */
9641 break;
9642 case 0x10: /* sel */
9643 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9644 goto illegal_op;
9645 }
9646 break;
9647 case 0x20: /* crc32/crc32c */
9648 case 0x21:
9649 case 0x22:
9650 case 0x28:
9651 case 0x29:
9652 case 0x2a:
9653 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
9654 goto illegal_op;
9655 }
9656 break;
9657 default:
9658 goto illegal_op;
9659 }
d9ba4830 9660 tmp = load_reg(s, rn);
9ee6e8bb
PB
9661 switch (op) {
9662 case 0x0a: /* rbit */
d9ba4830 9663 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9664 break;
9665 case 0x08: /* rev */
66896cb8 9666 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
9667 break;
9668 case 0x09: /* rev16 */
d9ba4830 9669 gen_rev16(tmp);
9ee6e8bb
PB
9670 break;
9671 case 0x0b: /* revsh */
d9ba4830 9672 gen_revsh(tmp);
9ee6e8bb
PB
9673 break;
9674 case 0x10: /* sel */
d9ba4830 9675 tmp2 = load_reg(s, rm);
7d1b0095 9676 tmp3 = tcg_temp_new_i32();
0ecb72a5 9677 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 9678 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9679 tcg_temp_free_i32(tmp3);
9680 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9681 break;
9682 case 0x18: /* clz */
d9ba4830 9683 gen_helper_clz(tmp, tmp);
9ee6e8bb 9684 break;
eb0ecd5a
WN
9685 case 0x20:
9686 case 0x21:
9687 case 0x22:
9688 case 0x28:
9689 case 0x29:
9690 case 0x2a:
9691 {
9692 /* crc32/crc32c */
9693 uint32_t sz = op & 0x3;
9694 uint32_t c = op & 0x8;
9695
eb0ecd5a 9696 tmp2 = load_reg(s, rm);
aa633469
PM
9697 if (sz == 0) {
9698 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
9699 } else if (sz == 1) {
9700 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
9701 }
eb0ecd5a
WN
9702 tmp3 = tcg_const_i32(1 << sz);
9703 if (c) {
9704 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
9705 } else {
9706 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
9707 }
9708 tcg_temp_free_i32(tmp2);
9709 tcg_temp_free_i32(tmp3);
9710 break;
9711 }
9ee6e8bb 9712 default:
62b44f05 9713 g_assert_not_reached();
9ee6e8bb
PB
9714 }
9715 }
d9ba4830 9716 store_reg(s, rd, tmp);
9ee6e8bb
PB
9717 break;
9718 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
9719 switch ((insn >> 20) & 7) {
9720 case 0: /* 32 x 32 -> 32 */
9721 case 7: /* Unsigned sum of absolute differences. */
9722 break;
9723 case 1: /* 16 x 16 -> 32 */
9724 case 2: /* Dual multiply add. */
9725 case 3: /* 32 * 16 -> 32msb */
9726 case 4: /* Dual multiply subtract. */
9727 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9728 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9729 goto illegal_op;
9730 }
9731 break;
9732 }
9ee6e8bb 9733 op = (insn >> 4) & 0xf;
d9ba4830
PB
9734 tmp = load_reg(s, rn);
9735 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9736 switch ((insn >> 20) & 7) {
9737 case 0: /* 32 x 32 -> 32 */
d9ba4830 9738 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 9739 tcg_temp_free_i32(tmp2);
9ee6e8bb 9740 if (rs != 15) {
d9ba4830 9741 tmp2 = load_reg(s, rs);
9ee6e8bb 9742 if (op)
d9ba4830 9743 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 9744 else
d9ba4830 9745 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9746 tcg_temp_free_i32(tmp2);
9ee6e8bb 9747 }
9ee6e8bb
PB
9748 break;
9749 case 1: /* 16 x 16 -> 32 */
d9ba4830 9750 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 9751 tcg_temp_free_i32(tmp2);
9ee6e8bb 9752 if (rs != 15) {
d9ba4830 9753 tmp2 = load_reg(s, rs);
9ef39277 9754 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9755 tcg_temp_free_i32(tmp2);
9ee6e8bb 9756 }
9ee6e8bb
PB
9757 break;
9758 case 2: /* Dual multiply add. */
9759 case 4: /* Dual multiply subtract. */
9760 if (op)
d9ba4830
PB
9761 gen_swap_half(tmp2);
9762 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9763 if (insn & (1 << 22)) {
e1d177b9 9764 /* This subtraction cannot overflow. */
d9ba4830 9765 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9766 } else {
e1d177b9
PM
9767 /* This addition cannot overflow 32 bits;
9768 * however it may overflow considered as a signed
9769 * operation, in which case we must set the Q flag.
9770 */
9ef39277 9771 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9772 }
7d1b0095 9773 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9774 if (rs != 15)
9775 {
d9ba4830 9776 tmp2 = load_reg(s, rs);
9ef39277 9777 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9778 tcg_temp_free_i32(tmp2);
9ee6e8bb 9779 }
9ee6e8bb
PB
9780 break;
9781 case 3: /* 32 * 16 -> 32msb */
9782 if (op)
d9ba4830 9783 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 9784 else
d9ba4830 9785 gen_sxth(tmp2);
a7812ae4
PB
9786 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9787 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 9788 tmp = tcg_temp_new_i32();
ecc7b3aa 9789 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 9790 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9791 if (rs != 15)
9792 {
d9ba4830 9793 tmp2 = load_reg(s, rs);
9ef39277 9794 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9795 tcg_temp_free_i32(tmp2);
9ee6e8bb 9796 }
9ee6e8bb 9797 break;
838fa72d
AJ
9798 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9799 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 9800 if (rs != 15) {
838fa72d
AJ
9801 tmp = load_reg(s, rs);
9802 if (insn & (1 << 20)) {
9803 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 9804 } else {
838fa72d 9805 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 9806 }
2c0262af 9807 }
838fa72d
AJ
9808 if (insn & (1 << 4)) {
9809 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9810 }
9811 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9812 tmp = tcg_temp_new_i32();
ecc7b3aa 9813 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 9814 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9815 break;
9816 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 9817 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9818 tcg_temp_free_i32(tmp2);
9ee6e8bb 9819 if (rs != 15) {
d9ba4830
PB
9820 tmp2 = load_reg(s, rs);
9821 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9822 tcg_temp_free_i32(tmp2);
5fd46862 9823 }
9ee6e8bb 9824 break;
2c0262af 9825 }
d9ba4830 9826 store_reg(s, rd, tmp);
2c0262af 9827 break;
9ee6e8bb
PB
9828 case 6: case 7: /* 64-bit multiply, Divide. */
9829 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
9830 tmp = load_reg(s, rn);
9831 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9832 if ((op & 0x50) == 0x10) {
9833 /* sdiv, udiv */
d614a513 9834 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 9835 goto illegal_op;
47789990 9836 }
9ee6e8bb 9837 if (op & 0x20)
5e3f878a 9838 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 9839 else
5e3f878a 9840 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 9841 tcg_temp_free_i32(tmp2);
5e3f878a 9842 store_reg(s, rd, tmp);
9ee6e8bb
PB
9843 } else if ((op & 0xe) == 0xc) {
9844 /* Dual multiply accumulate long. */
62b44f05
AR
9845 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9846 tcg_temp_free_i32(tmp);
9847 tcg_temp_free_i32(tmp2);
9848 goto illegal_op;
9849 }
9ee6e8bb 9850 if (op & 1)
5e3f878a
PB
9851 gen_swap_half(tmp2);
9852 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9853 if (op & 0x10) {
5e3f878a 9854 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 9855 } else {
5e3f878a 9856 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 9857 }
7d1b0095 9858 tcg_temp_free_i32(tmp2);
a7812ae4
PB
9859 /* BUGFIX */
9860 tmp64 = tcg_temp_new_i64();
9861 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9862 tcg_temp_free_i32(tmp);
a7812ae4
PB
9863 gen_addq(s, tmp64, rs, rd);
9864 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 9865 tcg_temp_free_i64(tmp64);
2c0262af 9866 } else {
9ee6e8bb
PB
9867 if (op & 0x20) {
9868 /* Unsigned 64-bit multiply */
a7812ae4 9869 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 9870 } else {
9ee6e8bb
PB
9871 if (op & 8) {
9872 /* smlalxy */
62b44f05
AR
9873 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9874 tcg_temp_free_i32(tmp2);
9875 tcg_temp_free_i32(tmp);
9876 goto illegal_op;
9877 }
5e3f878a 9878 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 9879 tcg_temp_free_i32(tmp2);
a7812ae4
PB
9880 tmp64 = tcg_temp_new_i64();
9881 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9882 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9883 } else {
9884 /* Signed 64-bit multiply */
a7812ae4 9885 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 9886 }
b5ff1b31 9887 }
9ee6e8bb
PB
9888 if (op & 4) {
9889 /* umaal */
62b44f05
AR
9890 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9891 tcg_temp_free_i64(tmp64);
9892 goto illegal_op;
9893 }
a7812ae4
PB
9894 gen_addq_lo(s, tmp64, rs);
9895 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
9896 } else if (op & 0x40) {
9897 /* 64-bit accumulate. */
a7812ae4 9898 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 9899 }
a7812ae4 9900 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 9901 tcg_temp_free_i64(tmp64);
5fd46862 9902 }
2c0262af 9903 break;
9ee6e8bb
PB
9904 }
9905 break;
9906 case 6: case 7: case 14: case 15:
9907 /* Coprocessor. */
9908 if (((insn >> 24) & 3) == 3) {
9909 /* Translate into the equivalent ARM encoding. */
f06053e3 9910 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 9911 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 9912 goto illegal_op;
7dcc1f89 9913 }
6a57f3eb 9914 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 9915 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9916 goto illegal_op;
9917 }
9ee6e8bb
PB
9918 } else {
9919 if (insn & (1 << 28))
9920 goto illegal_op;
7dcc1f89 9921 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 9922 goto illegal_op;
7dcc1f89 9923 }
9ee6e8bb
PB
9924 }
9925 break;
9926 case 8: case 9: case 10: case 11:
9927 if (insn & (1 << 15)) {
9928 /* Branches, misc control. */
9929 if (insn & 0x5000) {
9930 /* Unconditional branch. */
9931 /* signextend(hw1[10:0]) -> offset[:12]. */
9932 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
9933 /* hw1[10:0] -> offset[11:1]. */
9934 offset |= (insn & 0x7ff) << 1;
9935 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
9936 offset[24:22] already have the same value because of the
9937 sign extension above. */
9938 offset ^= ((~insn) & (1 << 13)) << 10;
9939 offset ^= ((~insn) & (1 << 11)) << 11;
9940
9ee6e8bb
PB
9941 if (insn & (1 << 14)) {
9942 /* Branch and link. */
3174f8e9 9943 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 9944 }
3b46e624 9945
b0109805 9946 offset += s->pc;
9ee6e8bb
PB
9947 if (insn & (1 << 12)) {
9948 /* b/bl */
b0109805 9949 gen_jmp(s, offset);
9ee6e8bb
PB
9950 } else {
9951 /* blx */
b0109805 9952 offset &= ~(uint32_t)2;
be5e7a76 9953 /* thumb2 bx, no need to check */
b0109805 9954 gen_bx_im(s, offset);
2c0262af 9955 }
9ee6e8bb
PB
9956 } else if (((insn >> 23) & 7) == 7) {
9957 /* Misc control */
9958 if (insn & (1 << 13))
9959 goto illegal_op;
9960
9961 if (insn & (1 << 26)) {
37e6456e
PM
9962 if (!(insn & (1 << 20))) {
9963 /* Hypervisor call (v7) */
9964 int imm16 = extract32(insn, 16, 4) << 12
9965 | extract32(insn, 0, 12);
9966 ARCH(7);
9967 if (IS_USER(s)) {
9968 goto illegal_op;
9969 }
9970 gen_hvc(s, imm16);
9971 } else {
9972 /* Secure monitor call (v6+) */
9973 ARCH(6K);
9974 if (IS_USER(s)) {
9975 goto illegal_op;
9976 }
9977 gen_smc(s);
9978 }
2c0262af 9979 } else {
9ee6e8bb
PB
9980 op = (insn >> 20) & 7;
9981 switch (op) {
9982 case 0: /* msr cpsr. */
b53d8923 9983 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
9984 tmp = load_reg(s, rn);
9985 addr = tcg_const_i32(insn & 0xff);
9986 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9987 tcg_temp_free_i32(addr);
7d1b0095 9988 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9989 gen_lookup_tb(s);
9990 break;
9991 }
9992 /* fall through */
9993 case 1: /* msr spsr. */
b53d8923 9994 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 9995 goto illegal_op;
b53d8923 9996 }
2fbac54b
FN
9997 tmp = load_reg(s, rn);
9998 if (gen_set_psr(s,
7dcc1f89 9999 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 10000 op == 1, tmp))
9ee6e8bb
PB
10001 goto illegal_op;
10002 break;
10003 case 2: /* cps, nop-hint. */
10004 if (((insn >> 8) & 7) == 0) {
10005 gen_nop_hint(s, insn & 0xff);
10006 }
10007 /* Implemented as NOP in user mode. */
10008 if (IS_USER(s))
10009 break;
10010 offset = 0;
10011 imm = 0;
10012 if (insn & (1 << 10)) {
10013 if (insn & (1 << 7))
10014 offset |= CPSR_A;
10015 if (insn & (1 << 6))
10016 offset |= CPSR_I;
10017 if (insn & (1 << 5))
10018 offset |= CPSR_F;
10019 if (insn & (1 << 9))
10020 imm = CPSR_A | CPSR_I | CPSR_F;
10021 }
10022 if (insn & (1 << 8)) {
10023 offset |= 0x1f;
10024 imm |= (insn & 0x1f);
10025 }
10026 if (offset) {
2fbac54b 10027 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
10028 }
10029 break;
10030 case 3: /* Special control operations. */
426f5abc 10031 ARCH(7);
9ee6e8bb
PB
10032 op = (insn >> 4) & 0xf;
10033 switch (op) {
10034 case 2: /* clrex */
426f5abc 10035 gen_clrex(s);
9ee6e8bb
PB
10036 break;
10037 case 4: /* dsb */
10038 case 5: /* dmb */
9ee6e8bb 10039 /* These execute as NOPs. */
9ee6e8bb 10040 break;
6df99dec
SS
10041 case 6: /* isb */
10042 /* We need to break the TB after this insn
10043 * to execute self-modifying code correctly
10044 * and also to take any pending interrupts
10045 * immediately.
10046 */
10047 gen_lookup_tb(s);
10048 break;
9ee6e8bb
PB
10049 default:
10050 goto illegal_op;
10051 }
10052 break;
10053 case 4: /* bxj */
10054 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
10055 tmp = load_reg(s, rn);
10056 gen_bx(s, tmp);
9ee6e8bb
PB
10057 break;
10058 case 5: /* Exception return. */
b8b45b68
RV
10059 if (IS_USER(s)) {
10060 goto illegal_op;
10061 }
10062 if (rn != 14 || rd != 15) {
10063 goto illegal_op;
10064 }
10065 tmp = load_reg(s, rn);
10066 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10067 gen_exception_return(s, tmp);
10068 break;
9ee6e8bb 10069 case 6: /* mrs cpsr. */
7d1b0095 10070 tmp = tcg_temp_new_i32();
b53d8923 10071 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10072 addr = tcg_const_i32(insn & 0xff);
10073 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 10074 tcg_temp_free_i32(addr);
9ee6e8bb 10075 } else {
9ef39277 10076 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 10077 }
8984bd2e 10078 store_reg(s, rd, tmp);
9ee6e8bb
PB
10079 break;
10080 case 7: /* mrs spsr. */
10081 /* Not accessible in user mode. */
b53d8923 10082 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10083 goto illegal_op;
b53d8923 10084 }
d9ba4830
PB
10085 tmp = load_cpu_field(spsr);
10086 store_reg(s, rd, tmp);
9ee6e8bb 10087 break;
2c0262af
FB
10088 }
10089 }
9ee6e8bb
PB
10090 } else {
10091 /* Conditional branch. */
10092 op = (insn >> 22) & 0xf;
10093 /* Generate a conditional jump to next instruction. */
10094 s->condlabel = gen_new_label();
39fb730a 10095 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
10096 s->condjmp = 1;
10097
10098 /* offset[11:1] = insn[10:0] */
10099 offset = (insn & 0x7ff) << 1;
10100 /* offset[17:12] = insn[21:16]. */
10101 offset |= (insn & 0x003f0000) >> 4;
10102 /* offset[31:20] = insn[26]. */
10103 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10104 /* offset[18] = insn[13]. */
10105 offset |= (insn & (1 << 13)) << 5;
10106 /* offset[19] = insn[11]. */
10107 offset |= (insn & (1 << 11)) << 8;
10108
10109 /* jump to the offset */
b0109805 10110 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
10111 }
10112 } else {
10113 /* Data processing immediate. */
10114 if (insn & (1 << 25)) {
10115 if (insn & (1 << 24)) {
10116 if (insn & (1 << 20))
10117 goto illegal_op;
10118 /* Bitfield/Saturate. */
10119 op = (insn >> 21) & 7;
10120 imm = insn & 0x1f;
10121 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 10122 if (rn == 15) {
7d1b0095 10123 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
10124 tcg_gen_movi_i32(tmp, 0);
10125 } else {
10126 tmp = load_reg(s, rn);
10127 }
9ee6e8bb
PB
10128 switch (op) {
10129 case 2: /* Signed bitfield extract. */
10130 imm++;
10131 if (shift + imm > 32)
10132 goto illegal_op;
10133 if (imm < 32)
6ddbc6e4 10134 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
10135 break;
10136 case 6: /* Unsigned bitfield extract. */
10137 imm++;
10138 if (shift + imm > 32)
10139 goto illegal_op;
10140 if (imm < 32)
6ddbc6e4 10141 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
10142 break;
10143 case 3: /* Bitfield insert/clear. */
10144 if (imm < shift)
10145 goto illegal_op;
10146 imm = imm + 1 - shift;
10147 if (imm != 32) {
6ddbc6e4 10148 tmp2 = load_reg(s, rd);
d593c48e 10149 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 10150 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10151 }
10152 break;
10153 case 7:
10154 goto illegal_op;
10155 default: /* Saturate. */
9ee6e8bb
PB
10156 if (shift) {
10157 if (op & 1)
6ddbc6e4 10158 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 10159 else
6ddbc6e4 10160 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 10161 }
6ddbc6e4 10162 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
10163 if (op & 4) {
10164 /* Unsigned. */
62b44f05
AR
10165 if ((op & 1) && shift == 0) {
10166 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10167 tcg_temp_free_i32(tmp);
10168 tcg_temp_free_i32(tmp2);
10169 goto illegal_op;
10170 }
9ef39277 10171 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10172 } else {
9ef39277 10173 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 10174 }
2c0262af 10175 } else {
9ee6e8bb 10176 /* Signed. */
62b44f05
AR
10177 if ((op & 1) && shift == 0) {
10178 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10179 tcg_temp_free_i32(tmp);
10180 tcg_temp_free_i32(tmp2);
10181 goto illegal_op;
10182 }
9ef39277 10183 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10184 } else {
9ef39277 10185 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 10186 }
2c0262af 10187 }
b75263d6 10188 tcg_temp_free_i32(tmp2);
9ee6e8bb 10189 break;
2c0262af 10190 }
6ddbc6e4 10191 store_reg(s, rd, tmp);
9ee6e8bb
PB
10192 } else {
10193 imm = ((insn & 0x04000000) >> 15)
10194 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10195 if (insn & (1 << 22)) {
10196 /* 16-bit immediate. */
10197 imm |= (insn >> 4) & 0xf000;
10198 if (insn & (1 << 23)) {
10199 /* movt */
5e3f878a 10200 tmp = load_reg(s, rd);
86831435 10201 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 10202 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 10203 } else {
9ee6e8bb 10204 /* movw */
7d1b0095 10205 tmp = tcg_temp_new_i32();
5e3f878a 10206 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
10207 }
10208 } else {
9ee6e8bb
PB
10209 /* Add/sub 12-bit immediate. */
10210 if (rn == 15) {
b0109805 10211 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 10212 if (insn & (1 << 23))
b0109805 10213 offset -= imm;
9ee6e8bb 10214 else
b0109805 10215 offset += imm;
7d1b0095 10216 tmp = tcg_temp_new_i32();
5e3f878a 10217 tcg_gen_movi_i32(tmp, offset);
2c0262af 10218 } else {
5e3f878a 10219 tmp = load_reg(s, rn);
9ee6e8bb 10220 if (insn & (1 << 23))
5e3f878a 10221 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 10222 else
5e3f878a 10223 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 10224 }
9ee6e8bb 10225 }
5e3f878a 10226 store_reg(s, rd, tmp);
191abaa2 10227 }
9ee6e8bb
PB
10228 } else {
10229 int shifter_out = 0;
10230 /* modified 12-bit immediate. */
10231 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10232 imm = (insn & 0xff);
10233 switch (shift) {
10234 case 0: /* XY */
10235 /* Nothing to do. */
10236 break;
10237 case 1: /* 00XY00XY */
10238 imm |= imm << 16;
10239 break;
10240 case 2: /* XY00XY00 */
10241 imm |= imm << 16;
10242 imm <<= 8;
10243 break;
10244 case 3: /* XYXYXYXY */
10245 imm |= imm << 16;
10246 imm |= imm << 8;
10247 break;
10248 default: /* Rotated constant. */
10249 shift = (shift << 1) | (imm >> 7);
10250 imm |= 0x80;
10251 imm = imm << (32 - shift);
10252 shifter_out = 1;
10253 break;
b5ff1b31 10254 }
7d1b0095 10255 tmp2 = tcg_temp_new_i32();
3174f8e9 10256 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 10257 rn = (insn >> 16) & 0xf;
3174f8e9 10258 if (rn == 15) {
7d1b0095 10259 tmp = tcg_temp_new_i32();
3174f8e9
FN
10260 tcg_gen_movi_i32(tmp, 0);
10261 } else {
10262 tmp = load_reg(s, rn);
10263 }
9ee6e8bb
PB
10264 op = (insn >> 21) & 0xf;
10265 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 10266 shifter_out, tmp, tmp2))
9ee6e8bb 10267 goto illegal_op;
7d1b0095 10268 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10269 rd = (insn >> 8) & 0xf;
10270 if (rd != 15) {
3174f8e9
FN
10271 store_reg(s, rd, tmp);
10272 } else {
7d1b0095 10273 tcg_temp_free_i32(tmp);
2c0262af 10274 }
2c0262af 10275 }
9ee6e8bb
PB
10276 }
10277 break;
10278 case 12: /* Load/store single data item. */
10279 {
10280 int postinc = 0;
10281 int writeback = 0;
a99caa48 10282 int memidx;
9ee6e8bb 10283 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 10284 if (disas_neon_ls_insn(s, insn)) {
c1713132 10285 goto illegal_op;
7dcc1f89 10286 }
9ee6e8bb
PB
10287 break;
10288 }
a2fdc890
PM
10289 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10290 if (rs == 15) {
10291 if (!(insn & (1 << 20))) {
10292 goto illegal_op;
10293 }
10294 if (op != 2) {
10295 /* Byte or halfword load space with dest == r15 : memory hints.
10296 * Catch them early so we don't emit pointless addressing code.
10297 * This space is a mix of:
10298 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10299 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10300 * cores)
10301 * unallocated hints, which must be treated as NOPs
10302 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10303 * which is easiest for the decoding logic
10304 * Some space which must UNDEF
10305 */
10306 int op1 = (insn >> 23) & 3;
10307 int op2 = (insn >> 6) & 0x3f;
10308 if (op & 2) {
10309 goto illegal_op;
10310 }
10311 if (rn == 15) {
02afbf64
PM
10312 /* UNPREDICTABLE, unallocated hint or
10313 * PLD/PLDW/PLI (literal)
10314 */
a2fdc890
PM
10315 return 0;
10316 }
10317 if (op1 & 1) {
02afbf64 10318 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10319 }
10320 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 10321 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10322 }
10323 /* UNDEF space, or an UNPREDICTABLE */
10324 return 1;
10325 }
10326 }
a99caa48 10327 memidx = get_mem_index(s);
9ee6e8bb 10328 if (rn == 15) {
7d1b0095 10329 addr = tcg_temp_new_i32();
9ee6e8bb
PB
10330 /* PC relative. */
10331 /* s->pc has already been incremented by 4. */
10332 imm = s->pc & 0xfffffffc;
10333 if (insn & (1 << 23))
10334 imm += insn & 0xfff;
10335 else
10336 imm -= insn & 0xfff;
b0109805 10337 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 10338 } else {
b0109805 10339 addr = load_reg(s, rn);
9ee6e8bb
PB
10340 if (insn & (1 << 23)) {
10341 /* Positive offset. */
10342 imm = insn & 0xfff;
b0109805 10343 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 10344 } else {
9ee6e8bb 10345 imm = insn & 0xff;
2a0308c5
PM
10346 switch ((insn >> 8) & 0xf) {
10347 case 0x0: /* Shifted Register. */
9ee6e8bb 10348 shift = (insn >> 4) & 0xf;
2a0308c5
PM
10349 if (shift > 3) {
10350 tcg_temp_free_i32(addr);
18c9b560 10351 goto illegal_op;
2a0308c5 10352 }
b26eefb6 10353 tmp = load_reg(s, rm);
9ee6e8bb 10354 if (shift)
b26eefb6 10355 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 10356 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10357 tcg_temp_free_i32(tmp);
9ee6e8bb 10358 break;
2a0308c5 10359 case 0xc: /* Negative offset. */
b0109805 10360 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 10361 break;
2a0308c5 10362 case 0xe: /* User privilege. */
b0109805 10363 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 10364 memidx = get_a32_user_mem_index(s);
9ee6e8bb 10365 break;
2a0308c5 10366 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
10367 imm = -imm;
10368 /* Fall through. */
2a0308c5 10369 case 0xb: /* Post-increment. */
9ee6e8bb
PB
10370 postinc = 1;
10371 writeback = 1;
10372 break;
2a0308c5 10373 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
10374 imm = -imm;
10375 /* Fall through. */
2a0308c5 10376 case 0xf: /* Pre-increment. */
b0109805 10377 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
10378 writeback = 1;
10379 break;
10380 default:
2a0308c5 10381 tcg_temp_free_i32(addr);
b7bcbe95 10382 goto illegal_op;
9ee6e8bb
PB
10383 }
10384 }
10385 }
9ee6e8bb
PB
10386 if (insn & (1 << 20)) {
10387 /* Load. */
5a839c0d 10388 tmp = tcg_temp_new_i32();
a2fdc890 10389 switch (op) {
5a839c0d 10390 case 0:
a99caa48 10391 gen_aa32_ld8u(tmp, addr, memidx);
5a839c0d
PM
10392 break;
10393 case 4:
a99caa48 10394 gen_aa32_ld8s(tmp, addr, memidx);
5a839c0d
PM
10395 break;
10396 case 1:
a99caa48 10397 gen_aa32_ld16u(tmp, addr, memidx);
5a839c0d
PM
10398 break;
10399 case 5:
a99caa48 10400 gen_aa32_ld16s(tmp, addr, memidx);
5a839c0d
PM
10401 break;
10402 case 2:
a99caa48 10403 gen_aa32_ld32u(tmp, addr, memidx);
5a839c0d 10404 break;
2a0308c5 10405 default:
5a839c0d 10406 tcg_temp_free_i32(tmp);
2a0308c5
PM
10407 tcg_temp_free_i32(addr);
10408 goto illegal_op;
a2fdc890
PM
10409 }
10410 if (rs == 15) {
10411 gen_bx(s, tmp);
9ee6e8bb 10412 } else {
a2fdc890 10413 store_reg(s, rs, tmp);
9ee6e8bb
PB
10414 }
10415 } else {
10416 /* Store. */
b0109805 10417 tmp = load_reg(s, rs);
9ee6e8bb 10418 switch (op) {
5a839c0d 10419 case 0:
a99caa48 10420 gen_aa32_st8(tmp, addr, memidx);
5a839c0d
PM
10421 break;
10422 case 1:
a99caa48 10423 gen_aa32_st16(tmp, addr, memidx);
5a839c0d
PM
10424 break;
10425 case 2:
a99caa48 10426 gen_aa32_st32(tmp, addr, memidx);
5a839c0d 10427 break;
2a0308c5 10428 default:
5a839c0d 10429 tcg_temp_free_i32(tmp);
2a0308c5
PM
10430 tcg_temp_free_i32(addr);
10431 goto illegal_op;
b7bcbe95 10432 }
5a839c0d 10433 tcg_temp_free_i32(tmp);
2c0262af 10434 }
9ee6e8bb 10435 if (postinc)
b0109805
PB
10436 tcg_gen_addi_i32(addr, addr, imm);
10437 if (writeback) {
10438 store_reg(s, rn, addr);
10439 } else {
7d1b0095 10440 tcg_temp_free_i32(addr);
b0109805 10441 }
9ee6e8bb
PB
10442 }
10443 break;
10444 default:
10445 goto illegal_op;
2c0262af 10446 }
9ee6e8bb
PB
10447 return 0;
10448illegal_op:
10449 return 1;
2c0262af
FB
10450}
10451
0ecb72a5 10452static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
10453{
10454 uint32_t val, insn, op, rm, rn, rd, shift, cond;
10455 int32_t offset;
10456 int i;
39d5492a
PM
10457 TCGv_i32 tmp;
10458 TCGv_i32 tmp2;
10459 TCGv_i32 addr;
99c475ab 10460
9ee6e8bb
PB
10461 if (s->condexec_mask) {
10462 cond = s->condexec_cond;
bedd2912
JB
10463 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
10464 s->condlabel = gen_new_label();
39fb730a 10465 arm_gen_test_cc(cond ^ 1, s->condlabel);
bedd2912
JB
10466 s->condjmp = 1;
10467 }
9ee6e8bb
PB
10468 }
10469
d31dd73e 10470 insn = arm_lduw_code(env, s->pc, s->bswap_code);
99c475ab 10471 s->pc += 2;
b5ff1b31 10472
99c475ab
FB
10473 switch (insn >> 12) {
10474 case 0: case 1:
396e467c 10475
99c475ab
FB
10476 rd = insn & 7;
10477 op = (insn >> 11) & 3;
10478 if (op == 3) {
10479 /* add/subtract */
10480 rn = (insn >> 3) & 7;
396e467c 10481 tmp = load_reg(s, rn);
99c475ab
FB
10482 if (insn & (1 << 10)) {
10483 /* immediate */
7d1b0095 10484 tmp2 = tcg_temp_new_i32();
396e467c 10485 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
10486 } else {
10487 /* reg */
10488 rm = (insn >> 6) & 7;
396e467c 10489 tmp2 = load_reg(s, rm);
99c475ab 10490 }
9ee6e8bb
PB
10491 if (insn & (1 << 9)) {
10492 if (s->condexec_mask)
396e467c 10493 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10494 else
72485ec4 10495 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
10496 } else {
10497 if (s->condexec_mask)
396e467c 10498 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 10499 else
72485ec4 10500 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 10501 }
7d1b0095 10502 tcg_temp_free_i32(tmp2);
396e467c 10503 store_reg(s, rd, tmp);
99c475ab
FB
10504 } else {
10505 /* shift immediate */
10506 rm = (insn >> 3) & 7;
10507 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
10508 tmp = load_reg(s, rm);
10509 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10510 if (!s->condexec_mask)
10511 gen_logic_CC(tmp);
10512 store_reg(s, rd, tmp);
99c475ab
FB
10513 }
10514 break;
10515 case 2: case 3:
10516 /* arithmetic large immediate */
10517 op = (insn >> 11) & 3;
10518 rd = (insn >> 8) & 0x7;
396e467c 10519 if (op == 0) { /* mov */
7d1b0095 10520 tmp = tcg_temp_new_i32();
396e467c 10521 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 10522 if (!s->condexec_mask)
396e467c
FN
10523 gen_logic_CC(tmp);
10524 store_reg(s, rd, tmp);
10525 } else {
10526 tmp = load_reg(s, rd);
7d1b0095 10527 tmp2 = tcg_temp_new_i32();
396e467c
FN
10528 tcg_gen_movi_i32(tmp2, insn & 0xff);
10529 switch (op) {
10530 case 1: /* cmp */
72485ec4 10531 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10532 tcg_temp_free_i32(tmp);
10533 tcg_temp_free_i32(tmp2);
396e467c
FN
10534 break;
10535 case 2: /* add */
10536 if (s->condexec_mask)
10537 tcg_gen_add_i32(tmp, tmp, tmp2);
10538 else
72485ec4 10539 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 10540 tcg_temp_free_i32(tmp2);
396e467c
FN
10541 store_reg(s, rd, tmp);
10542 break;
10543 case 3: /* sub */
10544 if (s->condexec_mask)
10545 tcg_gen_sub_i32(tmp, tmp, tmp2);
10546 else
72485ec4 10547 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 10548 tcg_temp_free_i32(tmp2);
396e467c
FN
10549 store_reg(s, rd, tmp);
10550 break;
10551 }
99c475ab 10552 }
99c475ab
FB
10553 break;
10554 case 4:
10555 if (insn & (1 << 11)) {
10556 rd = (insn >> 8) & 7;
5899f386
FB
10557 /* load pc-relative. Bit 1 of PC is ignored. */
10558 val = s->pc + 2 + ((insn & 0xff) * 4);
10559 val &= ~(uint32_t)2;
7d1b0095 10560 addr = tcg_temp_new_i32();
b0109805 10561 tcg_gen_movi_i32(addr, val);
c40c8556 10562 tmp = tcg_temp_new_i32();
6ce2faf4 10563 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
7d1b0095 10564 tcg_temp_free_i32(addr);
b0109805 10565 store_reg(s, rd, tmp);
99c475ab
FB
10566 break;
10567 }
10568 if (insn & (1 << 10)) {
10569 /* data processing extended or blx */
10570 rd = (insn & 7) | ((insn >> 4) & 8);
10571 rm = (insn >> 3) & 0xf;
10572 op = (insn >> 8) & 3;
10573 switch (op) {
10574 case 0: /* add */
396e467c
FN
10575 tmp = load_reg(s, rd);
10576 tmp2 = load_reg(s, rm);
10577 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10578 tcg_temp_free_i32(tmp2);
396e467c 10579 store_reg(s, rd, tmp);
99c475ab
FB
10580 break;
10581 case 1: /* cmp */
396e467c
FN
10582 tmp = load_reg(s, rd);
10583 tmp2 = load_reg(s, rm);
72485ec4 10584 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10585 tcg_temp_free_i32(tmp2);
10586 tcg_temp_free_i32(tmp);
99c475ab
FB
10587 break;
10588 case 2: /* mov/cpy */
396e467c
FN
10589 tmp = load_reg(s, rm);
10590 store_reg(s, rd, tmp);
99c475ab
FB
10591 break;
10592 case 3:/* branch [and link] exchange thumb register */
b0109805 10593 tmp = load_reg(s, rm);
99c475ab 10594 if (insn & (1 << 7)) {
be5e7a76 10595 ARCH(5);
99c475ab 10596 val = (uint32_t)s->pc | 1;
7d1b0095 10597 tmp2 = tcg_temp_new_i32();
b0109805
PB
10598 tcg_gen_movi_i32(tmp2, val);
10599 store_reg(s, 14, tmp2);
99c475ab 10600 }
be5e7a76 10601 /* already thumb, no need to check */
d9ba4830 10602 gen_bx(s, tmp);
99c475ab
FB
10603 break;
10604 }
10605 break;
10606 }
10607
10608 /* data processing register */
10609 rd = insn & 7;
10610 rm = (insn >> 3) & 7;
10611 op = (insn >> 6) & 0xf;
10612 if (op == 2 || op == 3 || op == 4 || op == 7) {
10613 /* the shift/rotate ops want the operands backwards */
10614 val = rm;
10615 rm = rd;
10616 rd = val;
10617 val = 1;
10618 } else {
10619 val = 0;
10620 }
10621
396e467c 10622 if (op == 9) { /* neg */
7d1b0095 10623 tmp = tcg_temp_new_i32();
396e467c
FN
10624 tcg_gen_movi_i32(tmp, 0);
10625 } else if (op != 0xf) { /* mvn doesn't read its first operand */
10626 tmp = load_reg(s, rd);
10627 } else {
39d5492a 10628 TCGV_UNUSED_I32(tmp);
396e467c 10629 }
99c475ab 10630
396e467c 10631 tmp2 = load_reg(s, rm);
5899f386 10632 switch (op) {
99c475ab 10633 case 0x0: /* and */
396e467c 10634 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 10635 if (!s->condexec_mask)
396e467c 10636 gen_logic_CC(tmp);
99c475ab
FB
10637 break;
10638 case 0x1: /* eor */
396e467c 10639 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 10640 if (!s->condexec_mask)
396e467c 10641 gen_logic_CC(tmp);
99c475ab
FB
10642 break;
10643 case 0x2: /* lsl */
9ee6e8bb 10644 if (s->condexec_mask) {
365af80e 10645 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 10646 } else {
9ef39277 10647 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10648 gen_logic_CC(tmp2);
9ee6e8bb 10649 }
99c475ab
FB
10650 break;
10651 case 0x3: /* lsr */
9ee6e8bb 10652 if (s->condexec_mask) {
365af80e 10653 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 10654 } else {
9ef39277 10655 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10656 gen_logic_CC(tmp2);
9ee6e8bb 10657 }
99c475ab
FB
10658 break;
10659 case 0x4: /* asr */
9ee6e8bb 10660 if (s->condexec_mask) {
365af80e 10661 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 10662 } else {
9ef39277 10663 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10664 gen_logic_CC(tmp2);
9ee6e8bb 10665 }
99c475ab
FB
10666 break;
10667 case 0x5: /* adc */
49b4c31e 10668 if (s->condexec_mask) {
396e467c 10669 gen_adc(tmp, tmp2);
49b4c31e
RH
10670 } else {
10671 gen_adc_CC(tmp, tmp, tmp2);
10672 }
99c475ab
FB
10673 break;
10674 case 0x6: /* sbc */
2de68a49 10675 if (s->condexec_mask) {
396e467c 10676 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
10677 } else {
10678 gen_sbc_CC(tmp, tmp, tmp2);
10679 }
99c475ab
FB
10680 break;
10681 case 0x7: /* ror */
9ee6e8bb 10682 if (s->condexec_mask) {
f669df27
AJ
10683 tcg_gen_andi_i32(tmp, tmp, 0x1f);
10684 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 10685 } else {
9ef39277 10686 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10687 gen_logic_CC(tmp2);
9ee6e8bb 10688 }
99c475ab
FB
10689 break;
10690 case 0x8: /* tst */
396e467c
FN
10691 tcg_gen_and_i32(tmp, tmp, tmp2);
10692 gen_logic_CC(tmp);
99c475ab 10693 rd = 16;
5899f386 10694 break;
99c475ab 10695 case 0x9: /* neg */
9ee6e8bb 10696 if (s->condexec_mask)
396e467c 10697 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 10698 else
72485ec4 10699 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
10700 break;
10701 case 0xa: /* cmp */
72485ec4 10702 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
10703 rd = 16;
10704 break;
10705 case 0xb: /* cmn */
72485ec4 10706 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
10707 rd = 16;
10708 break;
10709 case 0xc: /* orr */
396e467c 10710 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 10711 if (!s->condexec_mask)
396e467c 10712 gen_logic_CC(tmp);
99c475ab
FB
10713 break;
10714 case 0xd: /* mul */
7b2919a0 10715 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 10716 if (!s->condexec_mask)
396e467c 10717 gen_logic_CC(tmp);
99c475ab
FB
10718 break;
10719 case 0xe: /* bic */
f669df27 10720 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 10721 if (!s->condexec_mask)
396e467c 10722 gen_logic_CC(tmp);
99c475ab
FB
10723 break;
10724 case 0xf: /* mvn */
396e467c 10725 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 10726 if (!s->condexec_mask)
396e467c 10727 gen_logic_CC(tmp2);
99c475ab 10728 val = 1;
5899f386 10729 rm = rd;
99c475ab
FB
10730 break;
10731 }
10732 if (rd != 16) {
396e467c
FN
10733 if (val) {
10734 store_reg(s, rm, tmp2);
10735 if (op != 0xf)
7d1b0095 10736 tcg_temp_free_i32(tmp);
396e467c
FN
10737 } else {
10738 store_reg(s, rd, tmp);
7d1b0095 10739 tcg_temp_free_i32(tmp2);
396e467c
FN
10740 }
10741 } else {
7d1b0095
PM
10742 tcg_temp_free_i32(tmp);
10743 tcg_temp_free_i32(tmp2);
99c475ab
FB
10744 }
10745 break;
10746
10747 case 5:
10748 /* load/store register offset. */
10749 rd = insn & 7;
10750 rn = (insn >> 3) & 7;
10751 rm = (insn >> 6) & 7;
10752 op = (insn >> 9) & 7;
b0109805 10753 addr = load_reg(s, rn);
b26eefb6 10754 tmp = load_reg(s, rm);
b0109805 10755 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10756 tcg_temp_free_i32(tmp);
99c475ab 10757
c40c8556 10758 if (op < 3) { /* store */
b0109805 10759 tmp = load_reg(s, rd);
c40c8556
PM
10760 } else {
10761 tmp = tcg_temp_new_i32();
10762 }
99c475ab
FB
10763
10764 switch (op) {
10765 case 0: /* str */
6ce2faf4 10766 gen_aa32_st32(tmp, addr, get_mem_index(s));
99c475ab
FB
10767 break;
10768 case 1: /* strh */
6ce2faf4 10769 gen_aa32_st16(tmp, addr, get_mem_index(s));
99c475ab
FB
10770 break;
10771 case 2: /* strb */
6ce2faf4 10772 gen_aa32_st8(tmp, addr, get_mem_index(s));
99c475ab
FB
10773 break;
10774 case 3: /* ldrsb */
6ce2faf4 10775 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
99c475ab
FB
10776 break;
10777 case 4: /* ldr */
6ce2faf4 10778 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
99c475ab
FB
10779 break;
10780 case 5: /* ldrh */
6ce2faf4 10781 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
99c475ab
FB
10782 break;
10783 case 6: /* ldrb */
6ce2faf4 10784 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
99c475ab
FB
10785 break;
10786 case 7: /* ldrsh */
6ce2faf4 10787 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
99c475ab
FB
10788 break;
10789 }
c40c8556 10790 if (op >= 3) { /* load */
b0109805 10791 store_reg(s, rd, tmp);
c40c8556
PM
10792 } else {
10793 tcg_temp_free_i32(tmp);
10794 }
7d1b0095 10795 tcg_temp_free_i32(addr);
99c475ab
FB
10796 break;
10797
10798 case 6:
10799 /* load/store word immediate offset */
10800 rd = insn & 7;
10801 rn = (insn >> 3) & 7;
b0109805 10802 addr = load_reg(s, rn);
99c475ab 10803 val = (insn >> 4) & 0x7c;
b0109805 10804 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10805
10806 if (insn & (1 << 11)) {
10807 /* load */
c40c8556 10808 tmp = tcg_temp_new_i32();
6ce2faf4 10809 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 10810 store_reg(s, rd, tmp);
99c475ab
FB
10811 } else {
10812 /* store */
b0109805 10813 tmp = load_reg(s, rd);
6ce2faf4 10814 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10815 tcg_temp_free_i32(tmp);
99c475ab 10816 }
7d1b0095 10817 tcg_temp_free_i32(addr);
99c475ab
FB
10818 break;
10819
10820 case 7:
10821 /* load/store byte immediate offset */
10822 rd = insn & 7;
10823 rn = (insn >> 3) & 7;
b0109805 10824 addr = load_reg(s, rn);
99c475ab 10825 val = (insn >> 6) & 0x1f;
b0109805 10826 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10827
10828 if (insn & (1 << 11)) {
10829 /* load */
c40c8556 10830 tmp = tcg_temp_new_i32();
6ce2faf4 10831 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
b0109805 10832 store_reg(s, rd, tmp);
99c475ab
FB
10833 } else {
10834 /* store */
b0109805 10835 tmp = load_reg(s, rd);
6ce2faf4 10836 gen_aa32_st8(tmp, addr, get_mem_index(s));
c40c8556 10837 tcg_temp_free_i32(tmp);
99c475ab 10838 }
7d1b0095 10839 tcg_temp_free_i32(addr);
99c475ab
FB
10840 break;
10841
10842 case 8:
10843 /* load/store halfword immediate offset */
10844 rd = insn & 7;
10845 rn = (insn >> 3) & 7;
b0109805 10846 addr = load_reg(s, rn);
99c475ab 10847 val = (insn >> 5) & 0x3e;
b0109805 10848 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10849
10850 if (insn & (1 << 11)) {
10851 /* load */
c40c8556 10852 tmp = tcg_temp_new_i32();
6ce2faf4 10853 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
b0109805 10854 store_reg(s, rd, tmp);
99c475ab
FB
10855 } else {
10856 /* store */
b0109805 10857 tmp = load_reg(s, rd);
6ce2faf4 10858 gen_aa32_st16(tmp, addr, get_mem_index(s));
c40c8556 10859 tcg_temp_free_i32(tmp);
99c475ab 10860 }
7d1b0095 10861 tcg_temp_free_i32(addr);
99c475ab
FB
10862 break;
10863
10864 case 9:
10865 /* load/store from stack */
10866 rd = (insn >> 8) & 7;
b0109805 10867 addr = load_reg(s, 13);
99c475ab 10868 val = (insn & 0xff) * 4;
b0109805 10869 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10870
10871 if (insn & (1 << 11)) {
10872 /* load */
c40c8556 10873 tmp = tcg_temp_new_i32();
6ce2faf4 10874 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 10875 store_reg(s, rd, tmp);
99c475ab
FB
10876 } else {
10877 /* store */
b0109805 10878 tmp = load_reg(s, rd);
6ce2faf4 10879 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10880 tcg_temp_free_i32(tmp);
99c475ab 10881 }
7d1b0095 10882 tcg_temp_free_i32(addr);
99c475ab
FB
10883 break;
10884
10885 case 10:
10886 /* add to high reg */
10887 rd = (insn >> 8) & 7;
5899f386
FB
10888 if (insn & (1 << 11)) {
10889 /* SP */
5e3f878a 10890 tmp = load_reg(s, 13);
5899f386
FB
10891 } else {
10892 /* PC. bit 1 is ignored. */
7d1b0095 10893 tmp = tcg_temp_new_i32();
5e3f878a 10894 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 10895 }
99c475ab 10896 val = (insn & 0xff) * 4;
5e3f878a
PB
10897 tcg_gen_addi_i32(tmp, tmp, val);
10898 store_reg(s, rd, tmp);
99c475ab
FB
10899 break;
10900
10901 case 11:
10902 /* misc */
10903 op = (insn >> 8) & 0xf;
10904 switch (op) {
10905 case 0:
10906 /* adjust stack pointer */
b26eefb6 10907 tmp = load_reg(s, 13);
99c475ab
FB
10908 val = (insn & 0x7f) * 4;
10909 if (insn & (1 << 7))
6a0d8a1d 10910 val = -(int32_t)val;
b26eefb6
PB
10911 tcg_gen_addi_i32(tmp, tmp, val);
10912 store_reg(s, 13, tmp);
99c475ab
FB
10913 break;
10914
9ee6e8bb
PB
10915 case 2: /* sign/zero extend. */
10916 ARCH(6);
10917 rd = insn & 7;
10918 rm = (insn >> 3) & 7;
b0109805 10919 tmp = load_reg(s, rm);
9ee6e8bb 10920 switch ((insn >> 6) & 3) {
b0109805
PB
10921 case 0: gen_sxth(tmp); break;
10922 case 1: gen_sxtb(tmp); break;
10923 case 2: gen_uxth(tmp); break;
10924 case 3: gen_uxtb(tmp); break;
9ee6e8bb 10925 }
b0109805 10926 store_reg(s, rd, tmp);
9ee6e8bb 10927 break;
99c475ab
FB
10928 case 4: case 5: case 0xc: case 0xd:
10929 /* push/pop */
b0109805 10930 addr = load_reg(s, 13);
5899f386
FB
10931 if (insn & (1 << 8))
10932 offset = 4;
99c475ab 10933 else
5899f386
FB
10934 offset = 0;
10935 for (i = 0; i < 8; i++) {
10936 if (insn & (1 << i))
10937 offset += 4;
10938 }
10939 if ((insn & (1 << 11)) == 0) {
b0109805 10940 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 10941 }
99c475ab
FB
10942 for (i = 0; i < 8; i++) {
10943 if (insn & (1 << i)) {
10944 if (insn & (1 << 11)) {
10945 /* pop */
c40c8556 10946 tmp = tcg_temp_new_i32();
6ce2faf4 10947 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 10948 store_reg(s, i, tmp);
99c475ab
FB
10949 } else {
10950 /* push */
b0109805 10951 tmp = load_reg(s, i);
6ce2faf4 10952 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10953 tcg_temp_free_i32(tmp);
99c475ab 10954 }
5899f386 10955 /* advance to the next address. */
b0109805 10956 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
10957 }
10958 }
39d5492a 10959 TCGV_UNUSED_I32(tmp);
99c475ab
FB
10960 if (insn & (1 << 8)) {
10961 if (insn & (1 << 11)) {
10962 /* pop pc */
c40c8556 10963 tmp = tcg_temp_new_i32();
6ce2faf4 10964 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
99c475ab
FB
10965 /* don't set the pc until the rest of the instruction
10966 has completed */
10967 } else {
10968 /* push lr */
b0109805 10969 tmp = load_reg(s, 14);
6ce2faf4 10970 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10971 tcg_temp_free_i32(tmp);
99c475ab 10972 }
b0109805 10973 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 10974 }
5899f386 10975 if ((insn & (1 << 11)) == 0) {
b0109805 10976 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 10977 }
99c475ab 10978 /* write back the new stack pointer */
b0109805 10979 store_reg(s, 13, addr);
99c475ab 10980 /* set the new PC value */
be5e7a76 10981 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 10982 store_reg_from_load(s, 15, tmp);
be5e7a76 10983 }
99c475ab
FB
10984 break;
10985
9ee6e8bb
PB
10986 case 1: case 3: case 9: case 11: /* czb */
10987 rm = insn & 7;
d9ba4830 10988 tmp = load_reg(s, rm);
9ee6e8bb
PB
10989 s->condlabel = gen_new_label();
10990 s->condjmp = 1;
10991 if (insn & (1 << 11))
cb63669a 10992 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 10993 else
cb63669a 10994 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 10995 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10996 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
10997 val = (uint32_t)s->pc + 2;
10998 val += offset;
10999 gen_jmp(s, val);
11000 break;
11001
11002 case 15: /* IT, nop-hint. */
11003 if ((insn & 0xf) == 0) {
11004 gen_nop_hint(s, (insn >> 4) & 0xf);
11005 break;
11006 }
11007 /* If Then. */
11008 s->condexec_cond = (insn >> 4) & 0xe;
11009 s->condexec_mask = insn & 0x1f;
11010 /* No actual code generated for this insn, just setup state. */
11011 break;
11012
06c949e6 11013 case 0xe: /* bkpt */
d4a2dc67
PM
11014 {
11015 int imm8 = extract32(insn, 0, 8);
be5e7a76 11016 ARCH(5);
73710361
GB
11017 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
11018 default_exception_el(s));
06c949e6 11019 break;
d4a2dc67 11020 }
06c949e6 11021
9ee6e8bb
PB
11022 case 0xa: /* rev */
11023 ARCH(6);
11024 rn = (insn >> 3) & 0x7;
11025 rd = insn & 0x7;
b0109805 11026 tmp = load_reg(s, rn);
9ee6e8bb 11027 switch ((insn >> 6) & 3) {
66896cb8 11028 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
11029 case 1: gen_rev16(tmp); break;
11030 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
11031 default: goto illegal_op;
11032 }
b0109805 11033 store_reg(s, rd, tmp);
9ee6e8bb
PB
11034 break;
11035
d9e028c1
PM
11036 case 6:
11037 switch ((insn >> 5) & 7) {
11038 case 2:
11039 /* setend */
11040 ARCH(6);
10962fd5
PM
11041 if (((insn >> 3) & 1) != s->bswap_code) {
11042 /* Dynamic endianness switching not implemented. */
e0c270d9 11043 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
d9e028c1
PM
11044 goto illegal_op;
11045 }
9ee6e8bb 11046 break;
d9e028c1
PM
11047 case 3:
11048 /* cps */
11049 ARCH(6);
11050 if (IS_USER(s)) {
11051 break;
8984bd2e 11052 }
b53d8923 11053 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
11054 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11055 /* FAULTMASK */
11056 if (insn & 1) {
11057 addr = tcg_const_i32(19);
11058 gen_helper_v7m_msr(cpu_env, addr, tmp);
11059 tcg_temp_free_i32(addr);
11060 }
11061 /* PRIMASK */
11062 if (insn & 2) {
11063 addr = tcg_const_i32(16);
11064 gen_helper_v7m_msr(cpu_env, addr, tmp);
11065 tcg_temp_free_i32(addr);
11066 }
11067 tcg_temp_free_i32(tmp);
11068 gen_lookup_tb(s);
11069 } else {
11070 if (insn & (1 << 4)) {
11071 shift = CPSR_A | CPSR_I | CPSR_F;
11072 } else {
11073 shift = 0;
11074 }
11075 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 11076 }
d9e028c1
PM
11077 break;
11078 default:
11079 goto undef;
9ee6e8bb
PB
11080 }
11081 break;
11082
99c475ab
FB
11083 default:
11084 goto undef;
11085 }
11086 break;
11087
11088 case 12:
a7d3970d 11089 {
99c475ab 11090 /* load/store multiple */
39d5492a
PM
11091 TCGv_i32 loaded_var;
11092 TCGV_UNUSED_I32(loaded_var);
99c475ab 11093 rn = (insn >> 8) & 0x7;
b0109805 11094 addr = load_reg(s, rn);
99c475ab
FB
11095 for (i = 0; i < 8; i++) {
11096 if (insn & (1 << i)) {
99c475ab
FB
11097 if (insn & (1 << 11)) {
11098 /* load */
c40c8556 11099 tmp = tcg_temp_new_i32();
6ce2faf4 11100 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
a7d3970d
PM
11101 if (i == rn) {
11102 loaded_var = tmp;
11103 } else {
11104 store_reg(s, i, tmp);
11105 }
99c475ab
FB
11106 } else {
11107 /* store */
b0109805 11108 tmp = load_reg(s, i);
6ce2faf4 11109 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 11110 tcg_temp_free_i32(tmp);
99c475ab 11111 }
5899f386 11112 /* advance to the next address */
b0109805 11113 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11114 }
11115 }
b0109805 11116 if ((insn & (1 << rn)) == 0) {
a7d3970d 11117 /* base reg not in list: base register writeback */
b0109805
PB
11118 store_reg(s, rn, addr);
11119 } else {
a7d3970d
PM
11120 /* base reg in list: if load, complete it now */
11121 if (insn & (1 << 11)) {
11122 store_reg(s, rn, loaded_var);
11123 }
7d1b0095 11124 tcg_temp_free_i32(addr);
b0109805 11125 }
99c475ab 11126 break;
a7d3970d 11127 }
99c475ab
FB
11128 case 13:
11129 /* conditional branch or swi */
11130 cond = (insn >> 8) & 0xf;
11131 if (cond == 0xe)
11132 goto undef;
11133
11134 if (cond == 0xf) {
11135 /* swi */
eaed129d 11136 gen_set_pc_im(s, s->pc);
d4a2dc67 11137 s->svc_imm = extract32(insn, 0, 8);
9ee6e8bb 11138 s->is_jmp = DISAS_SWI;
99c475ab
FB
11139 break;
11140 }
11141 /* generate a conditional jump to next instruction */
e50e6a20 11142 s->condlabel = gen_new_label();
39fb730a 11143 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 11144 s->condjmp = 1;
99c475ab
FB
11145
11146 /* jump to the offset */
5899f386 11147 val = (uint32_t)s->pc + 2;
99c475ab 11148 offset = ((int32_t)insn << 24) >> 24;
5899f386 11149 val += offset << 1;
8aaca4c0 11150 gen_jmp(s, val);
99c475ab
FB
11151 break;
11152
11153 case 14:
358bf29e 11154 if (insn & (1 << 11)) {
9ee6e8bb
PB
11155 if (disas_thumb2_insn(env, s, insn))
11156 goto undef32;
358bf29e
PB
11157 break;
11158 }
9ee6e8bb 11159 /* unconditional branch */
99c475ab
FB
11160 val = (uint32_t)s->pc;
11161 offset = ((int32_t)insn << 21) >> 21;
11162 val += (offset << 1) + 2;
8aaca4c0 11163 gen_jmp(s, val);
99c475ab
FB
11164 break;
11165
11166 case 15:
9ee6e8bb 11167 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 11168 goto undef32;
9ee6e8bb 11169 break;
99c475ab
FB
11170 }
11171 return;
9ee6e8bb 11172undef32:
73710361
GB
11173 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11174 default_exception_el(s));
9ee6e8bb
PB
11175 return;
11176illegal_op:
99c475ab 11177undef:
73710361
GB
11178 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11179 default_exception_el(s));
99c475ab
FB
11180}
11181
541ebcd4
PM
11182static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11183{
11184 /* Return true if the insn at dc->pc might cross a page boundary.
11185 * (False positives are OK, false negatives are not.)
11186 */
11187 uint16_t insn;
11188
11189 if ((s->pc & 3) == 0) {
11190 /* At a 4-aligned address we can't be crossing a page */
11191 return false;
11192 }
11193
11194 /* This must be a Thumb insn */
11195 insn = arm_lduw_code(env, s->pc, s->bswap_code);
11196
11197 if ((insn >> 11) >= 0x1d) {
11198 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
11199 * First half of a 32-bit Thumb insn. Thumb-1 cores might
11200 * end up actually treating this as two 16-bit insns (see the
11201 * code at the start of disas_thumb2_insn()) but we don't bother
11202 * to check for that as it is unlikely, and false positives here
11203 * are harmless.
11204 */
11205 return true;
11206 }
11207 /* Definitely a 16-bit insn, can't be crossing a page. */
11208 return false;
11209}
11210
2c0262af 11211/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
4e5e1215
RH
11212 basic block 'tb'. */
11213void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 11214{
4e5e1215 11215 ARMCPU *cpu = arm_env_get_cpu(env);
ed2803da 11216 CPUState *cs = CPU(cpu);
2c0262af 11217 DisasContext dc1, *dc = &dc1;
0fa85d43 11218 target_ulong pc_start;
0a2461fa 11219 target_ulong next_page_start;
2e70f6ef
PB
11220 int num_insns;
11221 int max_insns;
541ebcd4 11222 bool end_of_page;
3b46e624 11223
2c0262af 11224 /* generate intermediate code */
40f860cd
PM
11225
11226 /* The A64 decoder has its own top level loop, because it doesn't need
11227 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
11228 */
11229 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
4e5e1215 11230 gen_intermediate_code_a64(cpu, tb);
40f860cd
PM
11231 return;
11232 }
11233
0fa85d43 11234 pc_start = tb->pc;
3b46e624 11235
2c0262af
FB
11236 dc->tb = tb;
11237
2c0262af
FB
11238 dc->is_jmp = DISAS_NEXT;
11239 dc->pc = pc_start;
ed2803da 11240 dc->singlestep_enabled = cs->singlestep_enabled;
e50e6a20 11241 dc->condjmp = 0;
3926cc84 11242
40f860cd 11243 dc->aarch64 = 0;
cef9ee70
SS
11244 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11245 * there is no secure EL1, so we route exceptions to EL3.
11246 */
11247 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11248 !arm_el_is_aa64(env, 3);
40f860cd
PM
11249 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
11250 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
11251 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
11252 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
c1e37810
PM
11253 dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
11254 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 11255#if !defined(CONFIG_USER_ONLY)
c1e37810 11256 dc->user = (dc->current_el == 0);
3926cc84 11257#endif
3f342b9e 11258 dc->ns = ARM_TBFLAG_NS(tb->flags);
9dbbc748 11259 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
40f860cd
PM
11260 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
11261 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
11262 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
c0f4af17 11263 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
60322b39 11264 dc->cp_regs = cpu->cp_regs;
a984e42c 11265 dc->features = env->features;
40f860cd 11266
50225ad0
PM
11267 /* Single step state. The code-generation logic here is:
11268 * SS_ACTIVE == 0:
11269 * generate code with no special handling for single-stepping (except
11270 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11271 * this happens anyway because those changes are all system register or
11272 * PSTATE writes).
11273 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11274 * emit code for one insn
11275 * emit code to clear PSTATE.SS
11276 * emit code to generate software step exception for completed step
11277 * end TB (as usual for having generated an exception)
11278 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11279 * emit code to generate a software step exception
11280 * end the TB
11281 */
11282 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
11283 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
11284 dc->is_ldex = false;
11285 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11286
a7812ae4
PB
11287 cpu_F0s = tcg_temp_new_i32();
11288 cpu_F1s = tcg_temp_new_i32();
11289 cpu_F0d = tcg_temp_new_i64();
11290 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
11291 cpu_V0 = cpu_F0d;
11292 cpu_V1 = cpu_F1d;
e677137d 11293 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 11294 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 11295 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2e70f6ef
PB
11296 num_insns = 0;
11297 max_insns = tb->cflags & CF_COUNT_MASK;
190ce7fb 11298 if (max_insns == 0) {
2e70f6ef 11299 max_insns = CF_COUNT_MASK;
190ce7fb
RH
11300 }
11301 if (max_insns > TCG_MAX_INSNS) {
11302 max_insns = TCG_MAX_INSNS;
11303 }
2e70f6ef 11304
cd42d5b2 11305 gen_tb_start(tb);
e12ce78d 11306
3849902c
PM
11307 tcg_clear_temp_count();
11308
e12ce78d
PM
11309 /* A note on handling of the condexec (IT) bits:
11310 *
11311 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 11312 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 11313 * (1) if the condexec bits are not already zero then we write
0ecb72a5 11314 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
11315 * to do it at the end of the block. (For example if we don't do this
11316 * it's hard to identify whether we can safely skip writing condexec
11317 * at the end of the TB, which we definitely want to do for the case
11318 * where a TB doesn't do anything with the IT state at all.)
11319 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 11320 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
11321 * This is done both for leaving the TB at the end, and for leaving
11322 * it because of an exception we know will happen, which is done in
11323 * gen_exception_insn(). The latter is necessary because we need to
11324 * leave the TB with the PC/IT state just prior to execution of the
11325 * instruction which caused the exception.
11326 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 11327 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d 11328 * This is handled in the same way as restoration of the
4e5e1215
RH
11329 * PC in these situations; we save the value of the condexec bits
11330 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11331 * then uses this to restore them after an exception.
e12ce78d
PM
11332 *
11333 * Note that there are no instructions which can read the condexec
11334 * bits, and none which can write non-static values to them, so
0ecb72a5 11335 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
11336 * middle of a TB.
11337 */
11338
9ee6e8bb
PB
11339 /* Reset the conditional execution bits immediately. This avoids
11340 complications trying to do it at the end of the block. */
98eac7ca 11341 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 11342 {
39d5492a 11343 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 11344 tcg_gen_movi_i32(tmp, 0);
d9ba4830 11345 store_cpu_field(tmp, condexec_bits);
8f01245e 11346 }
2c0262af 11347 do {
52e971d9
RH
11348 tcg_gen_insn_start(dc->pc,
11349 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1));
b933066a
RH
11350 num_insns++;
11351
fbb4a2e3
PB
11352#ifdef CONFIG_USER_ONLY
11353 /* Intercept jump to the magic kernel page. */
40f860cd 11354 if (dc->pc >= 0xffff0000) {
fbb4a2e3
PB
11355 /* We always get here via a jump, so know we are not in a
11356 conditional execution block. */
d4a2dc67 11357 gen_exception_internal(EXCP_KERNEL_TRAP);
577bf808 11358 dc->is_jmp = DISAS_EXC;
fbb4a2e3
PB
11359 break;
11360 }
11361#else
b53d8923 11362 if (dc->pc >= 0xfffffff0 && arm_dc_feature(dc, ARM_FEATURE_M)) {
9ee6e8bb
PB
11363 /* We always get here via a jump, so know we are not in a
11364 conditional execution block. */
d4a2dc67 11365 gen_exception_internal(EXCP_EXCEPTION_EXIT);
577bf808 11366 dc->is_jmp = DISAS_EXC;
d60bb01c 11367 break;
9ee6e8bb
PB
11368 }
11369#endif
11370
f0c3c505 11371 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
b933066a 11372 CPUBreakpoint *bp;
f0c3c505 11373 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
a1d1bb31 11374 if (bp->pc == dc->pc) {
5d98bf8f 11375 if (bp->flags & BP_CPU) {
ce8a1b54 11376 gen_set_condexec(dc);
ed6c6448 11377 gen_set_pc_im(dc, dc->pc);
5d98bf8f
SF
11378 gen_helper_check_breakpoints(cpu_env);
11379 /* End the TB early; it's likely not going to be executed */
11380 dc->is_jmp = DISAS_UPDATE;
11381 } else {
11382 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
522a0d4e
RH
11383 /* The address covered by the breakpoint must be
11384 included in [tb->pc, tb->pc + tb->size) in order
11385 to for it to be properly cleared -- thus we
11386 increment the PC here so that the logic setting
11387 tb->size below does the right thing. */
5d98bf8f
SF
11388 /* TODO: Advance PC by correct instruction length to
11389 * avoid disassembler error messages */
11390 dc->pc += 2;
11391 goto done_generating;
11392 }
11393 break;
1fddef4b
FB
11394 }
11395 }
11396 }
e50e6a20 11397
959082fc 11398 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
2e70f6ef 11399 gen_io_start();
959082fc 11400 }
2e70f6ef 11401
50225ad0
PM
11402 if (dc->ss_active && !dc->pstate_ss) {
11403 /* Singlestep state is Active-pending.
11404 * If we're in this state at the start of a TB then either
11405 * a) we just took an exception to an EL which is being debugged
11406 * and this is the first insn in the exception handler
11407 * b) debug exceptions were masked and we just unmasked them
11408 * without changing EL (eg by clearing PSTATE.D)
11409 * In either case we're going to take a swstep exception in the
11410 * "did not step an insn" case, and so the syndrome ISV and EX
11411 * bits should be zero.
11412 */
959082fc 11413 assert(num_insns == 1);
73710361
GB
11414 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
11415 default_exception_el(dc));
50225ad0
PM
11416 goto done_generating;
11417 }
11418
40f860cd 11419 if (dc->thumb) {
9ee6e8bb
PB
11420 disas_thumb_insn(env, dc);
11421 if (dc->condexec_mask) {
11422 dc->condexec_cond = (dc->condexec_cond & 0xe)
11423 | ((dc->condexec_mask >> 4) & 1);
11424 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
11425 if (dc->condexec_mask == 0) {
11426 dc->condexec_cond = 0;
11427 }
11428 }
11429 } else {
f4df2210
PM
11430 unsigned int insn = arm_ldl_code(env, dc->pc, dc->bswap_code);
11431 dc->pc += 4;
11432 disas_arm_insn(dc, insn);
9ee6e8bb 11433 }
e50e6a20
FB
11434
11435 if (dc->condjmp && !dc->is_jmp) {
11436 gen_set_label(dc->condlabel);
11437 dc->condjmp = 0;
11438 }
3849902c
PM
11439
11440 if (tcg_check_temp_count()) {
0a2461fa
AG
11441 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
11442 dc->pc);
3849902c
PM
11443 }
11444
aaf2d97d 11445 /* Translation stops when a conditional branch is encountered.
e50e6a20 11446 * Otherwise the subsequent code could get translated several times.
b5ff1b31 11447 * Also stop translation when a page boundary is reached. This
bf20dc07 11448 * ensures prefetch aborts occur at the right place. */
541ebcd4
PM
11449
11450 /* We want to stop the TB if the next insn starts in a new page,
11451 * or if it spans between this page and the next. This means that
11452 * if we're looking at the last halfword in the page we need to
11453 * see if it's a 16-bit Thumb insn (which will fit in this TB)
11454 * or a 32-bit Thumb insn (which won't).
11455 * This is to avoid generating a silly TB with a single 16-bit insn
11456 * in it at the end of this page (which would execute correctly
11457 * but isn't very efficient).
11458 */
11459 end_of_page = (dc->pc >= next_page_start) ||
11460 ((dc->pc >= next_page_start - 3) && insn_crosses_page(env, dc));
11461
fe700adb 11462 } while (!dc->is_jmp && !tcg_op_buf_full() &&
ed2803da 11463 !cs->singlestep_enabled &&
1b530a6d 11464 !singlestep &&
50225ad0 11465 !dc->ss_active &&
541ebcd4 11466 !end_of_page &&
2e70f6ef
PB
11467 num_insns < max_insns);
11468
11469 if (tb->cflags & CF_LAST_IO) {
11470 if (dc->condjmp) {
11471 /* FIXME: This can theoretically happen with self-modifying
11472 code. */
a47dddd7 11473 cpu_abort(cs, "IO on conditional branch instruction");
2e70f6ef
PB
11474 }
11475 gen_io_end();
11476 }
9ee6e8bb 11477
b5ff1b31 11478 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
11479 instruction was a conditional branch or trap, and the PC has
11480 already been written. */
50225ad0 11481 if (unlikely(cs->singlestep_enabled || dc->ss_active)) {
7999a5c8 11482 /* Unconditional and "condition passed" instruction codepath. */
9ee6e8bb 11483 gen_set_condexec(dc);
7999a5c8
SF
11484 switch (dc->is_jmp) {
11485 case DISAS_SWI:
50225ad0 11486 gen_ss_advance(dc);
73710361
GB
11487 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11488 default_exception_el(dc));
7999a5c8
SF
11489 break;
11490 case DISAS_HVC:
37e6456e 11491 gen_ss_advance(dc);
73710361 11492 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
11493 break;
11494 case DISAS_SMC:
37e6456e 11495 gen_ss_advance(dc);
73710361 11496 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
11497 break;
11498 case DISAS_NEXT:
11499 case DISAS_UPDATE:
11500 gen_set_pc_im(dc, dc->pc);
11501 /* fall through */
11502 default:
11503 if (dc->ss_active) {
11504 gen_step_complete_exception(dc);
11505 } else {
11506 /* FIXME: Single stepping a WFI insn will not halt
11507 the CPU. */
11508 gen_exception_internal(EXCP_DEBUG);
11509 }
11510 }
11511 if (dc->condjmp) {
11512 /* "Condition failed" instruction codepath. */
11513 gen_set_label(dc->condlabel);
11514 gen_set_condexec(dc);
11515 gen_set_pc_im(dc, dc->pc);
11516 if (dc->ss_active) {
11517 gen_step_complete_exception(dc);
11518 } else {
11519 gen_exception_internal(EXCP_DEBUG);
11520 }
9ee6e8bb 11521 }
8aaca4c0 11522 } else {
9ee6e8bb
PB
11523 /* While branches must always occur at the end of an IT block,
11524 there are a few other things that can cause us to terminate
65626741 11525 the TB in the middle of an IT block:
9ee6e8bb
PB
11526 - Exception generating instructions (bkpt, swi, undefined).
11527 - Page boundaries.
11528 - Hardware watchpoints.
11529 Hardware breakpoints have already been handled and skip this code.
11530 */
11531 gen_set_condexec(dc);
8aaca4c0 11532 switch(dc->is_jmp) {
8aaca4c0 11533 case DISAS_NEXT:
6e256c93 11534 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 11535 break;
8aaca4c0 11536 case DISAS_UPDATE:
577bf808
SF
11537 gen_set_pc_im(dc, dc->pc);
11538 /* fall through */
11539 case DISAS_JUMP:
11540 default:
8aaca4c0 11541 /* indicate that the hash table must be used to find the next TB */
57fec1fe 11542 tcg_gen_exit_tb(0);
8aaca4c0
FB
11543 break;
11544 case DISAS_TB_JUMP:
11545 /* nothing more to generate */
11546 break;
9ee6e8bb 11547 case DISAS_WFI:
1ce94f81 11548 gen_helper_wfi(cpu_env);
84549b6d
PM
11549 /* The helper doesn't necessarily throw an exception, but we
11550 * must go back to the main loop to check for interrupts anyway.
11551 */
11552 tcg_gen_exit_tb(0);
9ee6e8bb 11553 break;
72c1d3af
PM
11554 case DISAS_WFE:
11555 gen_helper_wfe(cpu_env);
11556 break;
c87e5a61
PM
11557 case DISAS_YIELD:
11558 gen_helper_yield(cpu_env);
11559 break;
9ee6e8bb 11560 case DISAS_SWI:
73710361
GB
11561 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11562 default_exception_el(dc));
9ee6e8bb 11563 break;
37e6456e 11564 case DISAS_HVC:
73710361 11565 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
11566 break;
11567 case DISAS_SMC:
73710361 11568 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 11569 break;
8aaca4c0 11570 }
e50e6a20
FB
11571 if (dc->condjmp) {
11572 gen_set_label(dc->condlabel);
9ee6e8bb 11573 gen_set_condexec(dc);
6e256c93 11574 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
11575 dc->condjmp = 0;
11576 }
2c0262af 11577 }
2e70f6ef 11578
9ee6e8bb 11579done_generating:
806f352d 11580 gen_tb_end(tb, num_insns);
2c0262af
FB
11581
11582#ifdef DEBUG_DISAS
8fec2b8c 11583 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
11584 qemu_log("----------------\n");
11585 qemu_log("IN: %s\n", lookup_symbol(pc_start));
d49190c4 11586 log_target_disas(cs, pc_start, dc->pc - pc_start,
d8fd2954 11587 dc->thumb | (dc->bswap_code << 1));
93fcfe39 11588 qemu_log("\n");
2c0262af
FB
11589 }
11590#endif
4e5e1215
RH
11591 tb->size = dc->pc - pc_start;
11592 tb->icount = num_insns;
2c0262af
FB
11593}
11594
b5ff1b31 11595static const char *cpu_mode_names[16] = {
28c9457d
EI
11596 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
11597 "???", "???", "hyp", "und", "???", "???", "???", "sys"
b5ff1b31 11598};
9ee6e8bb 11599
878096ee
AF
11600void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
11601 int flags)
2c0262af 11602{
878096ee
AF
11603 ARMCPU *cpu = ARM_CPU(cs);
11604 CPUARMState *env = &cpu->env;
2c0262af 11605 int i;
b5ff1b31 11606 uint32_t psr;
06e5cf7a 11607 const char *ns_status;
2c0262af 11608
17731115
PM
11609 if (is_a64(env)) {
11610 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
11611 return;
11612 }
11613
2c0262af 11614 for(i=0;i<16;i++) {
7fe48483 11615 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 11616 if ((i % 4) == 3)
7fe48483 11617 cpu_fprintf(f, "\n");
2c0262af 11618 else
7fe48483 11619 cpu_fprintf(f, " ");
2c0262af 11620 }
b5ff1b31 11621 psr = cpsr_read(env);
06e5cf7a
PM
11622
11623 if (arm_feature(env, ARM_FEATURE_EL3) &&
11624 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
11625 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
11626 } else {
11627 ns_status = "";
11628 }
11629
11630 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
687fa640 11631 psr,
b5ff1b31
FB
11632 psr & (1 << 31) ? 'N' : '-',
11633 psr & (1 << 30) ? 'Z' : '-',
11634 psr & (1 << 29) ? 'C' : '-',
11635 psr & (1 << 28) ? 'V' : '-',
5fafdf24 11636 psr & CPSR_T ? 'T' : 'A',
06e5cf7a 11637 ns_status,
b5ff1b31 11638 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 11639
f2617cfc
PM
11640 if (flags & CPU_DUMP_FPU) {
11641 int numvfpregs = 0;
11642 if (arm_feature(env, ARM_FEATURE_VFP)) {
11643 numvfpregs += 16;
11644 }
11645 if (arm_feature(env, ARM_FEATURE_VFP3)) {
11646 numvfpregs += 16;
11647 }
11648 for (i = 0; i < numvfpregs; i++) {
11649 uint64_t v = float64_val(env->vfp.regs[i]);
11650 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
11651 i * 2, (uint32_t)v,
11652 i * 2 + 1, (uint32_t)(v >> 32),
11653 i, v);
11654 }
11655 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 11656 }
2c0262af 11657}
a6b025d3 11658
bad729e2
RH
11659void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
11660 target_ulong *data)
d2856f1a 11661{
3926cc84 11662 if (is_a64(env)) {
bad729e2 11663 env->pc = data[0];
40f860cd 11664 env->condexec_bits = 0;
3926cc84 11665 } else {
bad729e2
RH
11666 env->regs[15] = data[0];
11667 env->condexec_bits = data[1];
3926cc84 11668 }
d2856f1a 11669}
This page took 3.205457 seconds and 4 git commands to generate.