]> Git Repo - qemu.git/blame - target-arm/translate.c
target-arm: Move call to disas_vfp_insn out of disas_coproc_insn.
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
76cad711 28#include "disas/disas.h"
57fec1fe 29#include "tcg-op.h"
1de7afc9 30#include "qemu/log.h"
534df156 31#include "qemu/bitops.h"
1497c961 32
7b59220e 33#include "helper.h"
1497c961 34#define GEN_HELPER 1
7b59220e 35#include "helper.h"
2c0262af 36
be5e7a76
DES
37#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
38#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
39/* currently all emulated v5 cores are also v5TE, so don't bother */
40#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
41#define ENABLE_ARCH_5J 0
42#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
43#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
44#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
45#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
81e69fb0 46#define ENABLE_ARCH_8 arm_feature(env, ARM_FEATURE_V8)
b5ff1b31 47
86753403 48#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 49
f570c61e 50#include "translate.h"
e12ce78d
PM
51static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
52
b5ff1b31
FB
53#if defined(CONFIG_USER_ONLY)
54#define IS_USER(s) 1
55#else
56#define IS_USER(s) (s->user)
57#endif
58
9ee6e8bb 59/* These instructions trap after executing, so defer them until after the
b90372ad 60 conditional execution state has been updated. */
9ee6e8bb
PB
61#define DISAS_WFI 4
62#define DISAS_SWI 5
2c0262af 63
3407ad0e 64TCGv_ptr cpu_env;
ad69471c 65/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 66static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 67static TCGv_i32 cpu_R[16];
66c374de 68static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
426f5abc
PB
69static TCGv_i32 cpu_exclusive_addr;
70static TCGv_i32 cpu_exclusive_val;
71static TCGv_i32 cpu_exclusive_high;
72#ifdef CONFIG_USER_ONLY
73static TCGv_i32 cpu_exclusive_test;
74static TCGv_i32 cpu_exclusive_info;
75#endif
ad69471c 76
b26eefb6 77/* FIXME: These should be removed. */
39d5492a 78static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 79static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 80
022c62cb 81#include "exec/gen-icount.h"
2e70f6ef 82
155c3eac
FN
83static const char *regnames[] =
84 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
85 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
86
b26eefb6
PB
87/* initialize TCG globals. */
88void arm_translate_init(void)
89{
155c3eac
FN
90 int i;
91
a7812ae4
PB
92 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
93
155c3eac
FN
94 for (i = 0; i < 16; i++) {
95 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 96 offsetof(CPUARMState, regs[i]),
155c3eac
FN
97 regnames[i]);
98 }
66c374de
AJ
99 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
100 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
101 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
102 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
103
426f5abc 104 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 105 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
426f5abc 106 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 107 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 108 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 109 offsetof(CPUARMState, exclusive_high), "exclusive_high");
426f5abc
PB
110#ifdef CONFIG_USER_ONLY
111 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 112 offsetof(CPUARMState, exclusive_test), "exclusive_test");
426f5abc 113 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 114 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 115#endif
155c3eac 116
14ade10f 117 a64_translate_init();
b26eefb6
PB
118}
119
39d5492a 120static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 121{
39d5492a 122 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
123 tcg_gen_ld_i32(tmp, cpu_env, offset);
124 return tmp;
125}
126
0ecb72a5 127#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 128
39d5492a 129static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
130{
131 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 132 tcg_temp_free_i32(var);
d9ba4830
PB
133}
134
135#define store_cpu_field(var, name) \
0ecb72a5 136 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 137
b26eefb6 138/* Set a variable to the value of a CPU register. */
39d5492a 139static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
140{
141 if (reg == 15) {
142 uint32_t addr;
b90372ad 143 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
144 if (s->thumb)
145 addr = (long)s->pc + 2;
146 else
147 addr = (long)s->pc + 4;
148 tcg_gen_movi_i32(var, addr);
149 } else {
155c3eac 150 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
151 }
152}
153
154/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 155static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 156{
39d5492a 157 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
158 load_reg_var(s, tmp, reg);
159 return tmp;
160}
161
162/* Set a CPU register. The source must be a temporary and will be
163 marked as dead. */
39d5492a 164static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
165{
166 if (reg == 15) {
167 tcg_gen_andi_i32(var, var, ~1);
168 s->is_jmp = DISAS_JUMP;
169 }
155c3eac 170 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 171 tcg_temp_free_i32(var);
b26eefb6
PB
172}
173
b26eefb6 174/* Value extensions. */
86831435
PB
175#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
176#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
177#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
178#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
179
1497c961
PB
180#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
181#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 182
b26eefb6 183
39d5492a 184static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 185{
39d5492a 186 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 187 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
188 tcg_temp_free_i32(tmp_mask);
189}
d9ba4830
PB
190/* Set NZCV flags from the high 4 bits of var. */
191#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
192
193static void gen_exception(int excp)
194{
39d5492a 195 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830 196 tcg_gen_movi_i32(tmp, excp);
1ce94f81 197 gen_helper_exception(cpu_env, tmp);
7d1b0095 198 tcg_temp_free_i32(tmp);
d9ba4830
PB
199}
200
39d5492a 201static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 202{
39d5492a
PM
203 TCGv_i32 tmp1 = tcg_temp_new_i32();
204 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
205 tcg_gen_ext16s_i32(tmp1, a);
206 tcg_gen_ext16s_i32(tmp2, b);
3670669c 207 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 208 tcg_temp_free_i32(tmp2);
3670669c
PB
209 tcg_gen_sari_i32(a, a, 16);
210 tcg_gen_sari_i32(b, b, 16);
211 tcg_gen_mul_i32(b, b, a);
212 tcg_gen_mov_i32(a, tmp1);
7d1b0095 213 tcg_temp_free_i32(tmp1);
3670669c
PB
214}
215
216/* Byteswap each halfword. */
39d5492a 217static void gen_rev16(TCGv_i32 var)
3670669c 218{
39d5492a 219 TCGv_i32 tmp = tcg_temp_new_i32();
3670669c
PB
220 tcg_gen_shri_i32(tmp, var, 8);
221 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
222 tcg_gen_shli_i32(var, var, 8);
223 tcg_gen_andi_i32(var, var, 0xff00ff00);
224 tcg_gen_or_i32(var, var, tmp);
7d1b0095 225 tcg_temp_free_i32(tmp);
3670669c
PB
226}
227
228/* Byteswap low halfword and sign extend. */
39d5492a 229static void gen_revsh(TCGv_i32 var)
3670669c 230{
1a855029
AJ
231 tcg_gen_ext16u_i32(var, var);
232 tcg_gen_bswap16_i32(var, var);
233 tcg_gen_ext16s_i32(var, var);
3670669c
PB
234}
235
236/* Unsigned bitfield extract. */
39d5492a 237static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
3670669c
PB
238{
239 if (shift)
240 tcg_gen_shri_i32(var, var, shift);
241 tcg_gen_andi_i32(var, var, mask);
242}
243
244/* Signed bitfield extract. */
39d5492a 245static void gen_sbfx(TCGv_i32 var, int shift, int width)
3670669c
PB
246{
247 uint32_t signbit;
248
249 if (shift)
250 tcg_gen_sari_i32(var, var, shift);
251 if (shift + width < 32) {
252 signbit = 1u << (width - 1);
253 tcg_gen_andi_i32(var, var, (1u << width) - 1);
254 tcg_gen_xori_i32(var, var, signbit);
255 tcg_gen_subi_i32(var, var, signbit);
256 }
257}
258
838fa72d 259/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 260static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 261{
838fa72d
AJ
262 TCGv_i64 tmp64 = tcg_temp_new_i64();
263
264 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 265 tcg_temp_free_i32(b);
838fa72d
AJ
266 tcg_gen_shli_i64(tmp64, tmp64, 32);
267 tcg_gen_add_i64(a, tmp64, a);
268
269 tcg_temp_free_i64(tmp64);
270 return a;
271}
272
273/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 274static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
275{
276 TCGv_i64 tmp64 = tcg_temp_new_i64();
277
278 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 279 tcg_temp_free_i32(b);
838fa72d
AJ
280 tcg_gen_shli_i64(tmp64, tmp64, 32);
281 tcg_gen_sub_i64(a, tmp64, a);
282
283 tcg_temp_free_i64(tmp64);
284 return a;
3670669c
PB
285}
286
5e3f878a 287/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 288static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 289{
39d5492a
PM
290 TCGv_i32 lo = tcg_temp_new_i32();
291 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 292 TCGv_i64 ret;
5e3f878a 293
831d7fe8 294 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 295 tcg_temp_free_i32(a);
7d1b0095 296 tcg_temp_free_i32(b);
831d7fe8
RH
297
298 ret = tcg_temp_new_i64();
299 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
300 tcg_temp_free_i32(lo);
301 tcg_temp_free_i32(hi);
831d7fe8
RH
302
303 return ret;
5e3f878a
PB
304}
305
39d5492a 306static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 307{
39d5492a
PM
308 TCGv_i32 lo = tcg_temp_new_i32();
309 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 310 TCGv_i64 ret;
5e3f878a 311
831d7fe8 312 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 313 tcg_temp_free_i32(a);
7d1b0095 314 tcg_temp_free_i32(b);
831d7fe8
RH
315
316 ret = tcg_temp_new_i64();
317 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
318 tcg_temp_free_i32(lo);
319 tcg_temp_free_i32(hi);
831d7fe8
RH
320
321 return ret;
5e3f878a
PB
322}
323
8f01245e 324/* Swap low and high halfwords. */
39d5492a 325static void gen_swap_half(TCGv_i32 var)
8f01245e 326{
39d5492a 327 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
328 tcg_gen_shri_i32(tmp, var, 16);
329 tcg_gen_shli_i32(var, var, 16);
330 tcg_gen_or_i32(var, var, tmp);
7d1b0095 331 tcg_temp_free_i32(tmp);
8f01245e
PB
332}
333
b26eefb6
PB
334/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
335 tmp = (t0 ^ t1) & 0x8000;
336 t0 &= ~0x8000;
337 t1 &= ~0x8000;
338 t0 = (t0 + t1) ^ tmp;
339 */
340
39d5492a 341static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 342{
39d5492a 343 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
344 tcg_gen_xor_i32(tmp, t0, t1);
345 tcg_gen_andi_i32(tmp, tmp, 0x8000);
346 tcg_gen_andi_i32(t0, t0, ~0x8000);
347 tcg_gen_andi_i32(t1, t1, ~0x8000);
348 tcg_gen_add_i32(t0, t0, t1);
349 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
350 tcg_temp_free_i32(tmp);
351 tcg_temp_free_i32(t1);
b26eefb6
PB
352}
353
354/* Set CF to the top bit of var. */
39d5492a 355static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 356{
66c374de 357 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
358}
359
360/* Set N and Z flags from var. */
39d5492a 361static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 362{
66c374de
AJ
363 tcg_gen_mov_i32(cpu_NF, var);
364 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
365}
366
367/* T0 += T1 + CF. */
39d5492a 368static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 369{
396e467c 370 tcg_gen_add_i32(t0, t0, t1);
66c374de 371 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
372}
373
e9bb4aa9 374/* dest = T0 + T1 + CF. */
39d5492a 375static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 376{
e9bb4aa9 377 tcg_gen_add_i32(dest, t0, t1);
66c374de 378 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
379}
380
3670669c 381/* dest = T0 - T1 + CF - 1. */
39d5492a 382static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 383{
3670669c 384 tcg_gen_sub_i32(dest, t0, t1);
66c374de 385 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 386 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
387}
388
72485ec4 389/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 390static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 391{
39d5492a 392 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
393 tcg_gen_movi_i32(tmp, 0);
394 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 395 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 396 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
397 tcg_gen_xor_i32(tmp, t0, t1);
398 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
399 tcg_temp_free_i32(tmp);
400 tcg_gen_mov_i32(dest, cpu_NF);
401}
402
49b4c31e 403/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 404static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 405{
39d5492a 406 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
407 if (TCG_TARGET_HAS_add2_i32) {
408 tcg_gen_movi_i32(tmp, 0);
409 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 410 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
411 } else {
412 TCGv_i64 q0 = tcg_temp_new_i64();
413 TCGv_i64 q1 = tcg_temp_new_i64();
414 tcg_gen_extu_i32_i64(q0, t0);
415 tcg_gen_extu_i32_i64(q1, t1);
416 tcg_gen_add_i64(q0, q0, q1);
417 tcg_gen_extu_i32_i64(q1, cpu_CF);
418 tcg_gen_add_i64(q0, q0, q1);
419 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
420 tcg_temp_free_i64(q0);
421 tcg_temp_free_i64(q1);
422 }
423 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
424 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
425 tcg_gen_xor_i32(tmp, t0, t1);
426 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
427 tcg_temp_free_i32(tmp);
428 tcg_gen_mov_i32(dest, cpu_NF);
429}
430
72485ec4 431/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 432static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 433{
39d5492a 434 TCGv_i32 tmp;
72485ec4
AJ
435 tcg_gen_sub_i32(cpu_NF, t0, t1);
436 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
437 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
438 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
439 tmp = tcg_temp_new_i32();
440 tcg_gen_xor_i32(tmp, t0, t1);
441 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
442 tcg_temp_free_i32(tmp);
443 tcg_gen_mov_i32(dest, cpu_NF);
444}
445
e77f0832 446/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 447static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 448{
39d5492a 449 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
450 tcg_gen_not_i32(tmp, t1);
451 gen_adc_CC(dest, t0, tmp);
39d5492a 452 tcg_temp_free_i32(tmp);
2de68a49
RH
453}
454
365af80e 455#define GEN_SHIFT(name) \
39d5492a 456static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 457{ \
39d5492a 458 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
459 tmp1 = tcg_temp_new_i32(); \
460 tcg_gen_andi_i32(tmp1, t1, 0xff); \
461 tmp2 = tcg_const_i32(0); \
462 tmp3 = tcg_const_i32(0x1f); \
463 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
464 tcg_temp_free_i32(tmp3); \
465 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
466 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
467 tcg_temp_free_i32(tmp2); \
468 tcg_temp_free_i32(tmp1); \
469}
470GEN_SHIFT(shl)
471GEN_SHIFT(shr)
472#undef GEN_SHIFT
473
39d5492a 474static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 475{
39d5492a 476 TCGv_i32 tmp1, tmp2;
365af80e
AJ
477 tmp1 = tcg_temp_new_i32();
478 tcg_gen_andi_i32(tmp1, t1, 0xff);
479 tmp2 = tcg_const_i32(0x1f);
480 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
481 tcg_temp_free_i32(tmp2);
482 tcg_gen_sar_i32(dest, t0, tmp1);
483 tcg_temp_free_i32(tmp1);
484}
485
39d5492a 486static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 487{
39d5492a
PM
488 TCGv_i32 c0 = tcg_const_i32(0);
489 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
490 tcg_gen_neg_i32(tmp, src);
491 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
492 tcg_temp_free_i32(c0);
493 tcg_temp_free_i32(tmp);
494}
ad69471c 495
39d5492a 496static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 497{
9a119ff6 498 if (shift == 0) {
66c374de 499 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 500 } else {
66c374de
AJ
501 tcg_gen_shri_i32(cpu_CF, var, shift);
502 if (shift != 31) {
503 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
504 }
9a119ff6 505 }
9a119ff6 506}
b26eefb6 507
9a119ff6 508/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
509static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
510 int shift, int flags)
9a119ff6
PB
511{
512 switch (shiftop) {
513 case 0: /* LSL */
514 if (shift != 0) {
515 if (flags)
516 shifter_out_im(var, 32 - shift);
517 tcg_gen_shli_i32(var, var, shift);
518 }
519 break;
520 case 1: /* LSR */
521 if (shift == 0) {
522 if (flags) {
66c374de 523 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
524 }
525 tcg_gen_movi_i32(var, 0);
526 } else {
527 if (flags)
528 shifter_out_im(var, shift - 1);
529 tcg_gen_shri_i32(var, var, shift);
530 }
531 break;
532 case 2: /* ASR */
533 if (shift == 0)
534 shift = 32;
535 if (flags)
536 shifter_out_im(var, shift - 1);
537 if (shift == 32)
538 shift = 31;
539 tcg_gen_sari_i32(var, var, shift);
540 break;
541 case 3: /* ROR/RRX */
542 if (shift != 0) {
543 if (flags)
544 shifter_out_im(var, shift - 1);
f669df27 545 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 546 } else {
39d5492a 547 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 548 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
549 if (flags)
550 shifter_out_im(var, 0);
551 tcg_gen_shri_i32(var, var, 1);
b26eefb6 552 tcg_gen_or_i32(var, var, tmp);
7d1b0095 553 tcg_temp_free_i32(tmp);
b26eefb6
PB
554 }
555 }
556};
557
39d5492a
PM
558static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
559 TCGv_i32 shift, int flags)
8984bd2e
PB
560{
561 if (flags) {
562 switch (shiftop) {
9ef39277
BS
563 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
564 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
565 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
566 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
567 }
568 } else {
569 switch (shiftop) {
365af80e
AJ
570 case 0:
571 gen_shl(var, var, shift);
572 break;
573 case 1:
574 gen_shr(var, var, shift);
575 break;
576 case 2:
577 gen_sar(var, var, shift);
578 break;
f669df27
AJ
579 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
580 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
581 }
582 }
7d1b0095 583 tcg_temp_free_i32(shift);
8984bd2e
PB
584}
585
6ddbc6e4
PB
586#define PAS_OP(pfx) \
587 switch (op2) { \
588 case 0: gen_pas_helper(glue(pfx,add16)); break; \
589 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
590 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
591 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
592 case 4: gen_pas_helper(glue(pfx,add8)); break; \
593 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
594 }
39d5492a 595static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 596{
a7812ae4 597 TCGv_ptr tmp;
6ddbc6e4
PB
598
599 switch (op1) {
600#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
601 case 1:
a7812ae4 602 tmp = tcg_temp_new_ptr();
0ecb72a5 603 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 604 PAS_OP(s)
b75263d6 605 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
606 break;
607 case 5:
a7812ae4 608 tmp = tcg_temp_new_ptr();
0ecb72a5 609 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 610 PAS_OP(u)
b75263d6 611 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
612 break;
613#undef gen_pas_helper
614#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
615 case 2:
616 PAS_OP(q);
617 break;
618 case 3:
619 PAS_OP(sh);
620 break;
621 case 6:
622 PAS_OP(uq);
623 break;
624 case 7:
625 PAS_OP(uh);
626 break;
627#undef gen_pas_helper
628 }
629}
9ee6e8bb
PB
630#undef PAS_OP
631
6ddbc6e4
PB
632/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
633#define PAS_OP(pfx) \
ed89a2f1 634 switch (op1) { \
6ddbc6e4
PB
635 case 0: gen_pas_helper(glue(pfx,add8)); break; \
636 case 1: gen_pas_helper(glue(pfx,add16)); break; \
637 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
638 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
639 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
640 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
641 }
39d5492a 642static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 643{
a7812ae4 644 TCGv_ptr tmp;
6ddbc6e4 645
ed89a2f1 646 switch (op2) {
6ddbc6e4
PB
647#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
648 case 0:
a7812ae4 649 tmp = tcg_temp_new_ptr();
0ecb72a5 650 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 651 PAS_OP(s)
b75263d6 652 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
653 break;
654 case 4:
a7812ae4 655 tmp = tcg_temp_new_ptr();
0ecb72a5 656 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 657 PAS_OP(u)
b75263d6 658 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
659 break;
660#undef gen_pas_helper
661#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
662 case 1:
663 PAS_OP(q);
664 break;
665 case 2:
666 PAS_OP(sh);
667 break;
668 case 5:
669 PAS_OP(uq);
670 break;
671 case 6:
672 PAS_OP(uh);
673 break;
674#undef gen_pas_helper
675 }
676}
9ee6e8bb
PB
677#undef PAS_OP
678
d9ba4830
PB
679static void gen_test_cc(int cc, int label)
680{
39d5492a 681 TCGv_i32 tmp;
d9ba4830
PB
682 int inv;
683
d9ba4830
PB
684 switch (cc) {
685 case 0: /* eq: Z */
66c374de 686 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
687 break;
688 case 1: /* ne: !Z */
66c374de 689 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
690 break;
691 case 2: /* cs: C */
66c374de 692 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
d9ba4830
PB
693 break;
694 case 3: /* cc: !C */
66c374de 695 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
d9ba4830
PB
696 break;
697 case 4: /* mi: N */
66c374de 698 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
d9ba4830
PB
699 break;
700 case 5: /* pl: !N */
66c374de 701 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
d9ba4830
PB
702 break;
703 case 6: /* vs: V */
66c374de 704 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
d9ba4830
PB
705 break;
706 case 7: /* vc: !V */
66c374de 707 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
d9ba4830
PB
708 break;
709 case 8: /* hi: C && !Z */
710 inv = gen_new_label();
66c374de
AJ
711 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
712 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
713 gen_set_label(inv);
714 break;
715 case 9: /* ls: !C || Z */
66c374de
AJ
716 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
717 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
718 break;
719 case 10: /* ge: N == V -> N ^ V == 0 */
66c374de
AJ
720 tmp = tcg_temp_new_i32();
721 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 722 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 723 tcg_temp_free_i32(tmp);
d9ba4830
PB
724 break;
725 case 11: /* lt: N != V -> N ^ V != 0 */
66c374de
AJ
726 tmp = tcg_temp_new_i32();
727 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 728 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 729 tcg_temp_free_i32(tmp);
d9ba4830
PB
730 break;
731 case 12: /* gt: !Z && N == V */
732 inv = gen_new_label();
66c374de
AJ
733 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
734 tmp = tcg_temp_new_i32();
735 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 736 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 737 tcg_temp_free_i32(tmp);
d9ba4830
PB
738 gen_set_label(inv);
739 break;
740 case 13: /* le: Z || N != V */
66c374de
AJ
741 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
742 tmp = tcg_temp_new_i32();
743 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 744 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 745 tcg_temp_free_i32(tmp);
d9ba4830
PB
746 break;
747 default:
748 fprintf(stderr, "Bad condition code 0x%x\n", cc);
749 abort();
750 }
d9ba4830 751}
2c0262af 752
b1d8e52e 753static const uint8_t table_logic_cc[16] = {
2c0262af
FB
754 1, /* and */
755 1, /* xor */
756 0, /* sub */
757 0, /* rsb */
758 0, /* add */
759 0, /* adc */
760 0, /* sbc */
761 0, /* rsc */
762 1, /* andl */
763 1, /* xorl */
764 0, /* cmp */
765 0, /* cmn */
766 1, /* orr */
767 1, /* mov */
768 1, /* bic */
769 1, /* mvn */
770};
3b46e624 771
d9ba4830
PB
772/* Set PC and Thumb state from an immediate address. */
773static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 774{
39d5492a 775 TCGv_i32 tmp;
99c475ab 776
b26eefb6 777 s->is_jmp = DISAS_UPDATE;
d9ba4830 778 if (s->thumb != (addr & 1)) {
7d1b0095 779 tmp = tcg_temp_new_i32();
d9ba4830 780 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 781 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 782 tcg_temp_free_i32(tmp);
d9ba4830 783 }
155c3eac 784 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
785}
786
787/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 788static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 789{
d9ba4830 790 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
791 tcg_gen_andi_i32(cpu_R[15], var, ~1);
792 tcg_gen_andi_i32(var, var, 1);
793 store_cpu_field(var, thumb);
d9ba4830
PB
794}
795
21aeb343
JR
796/* Variant of store_reg which uses branch&exchange logic when storing
797 to r15 in ARM architecture v7 and above. The source must be a temporary
798 and will be marked as dead. */
0ecb72a5 799static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
39d5492a 800 int reg, TCGv_i32 var)
21aeb343
JR
801{
802 if (reg == 15 && ENABLE_ARCH_7) {
803 gen_bx(s, var);
804 } else {
805 store_reg(s, reg, var);
806 }
807}
808
be5e7a76
DES
809/* Variant of store_reg which uses branch&exchange logic when storing
810 * to r15 in ARM architecture v5T and above. This is used for storing
811 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
812 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
0ecb72a5 813static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
39d5492a 814 int reg, TCGv_i32 var)
be5e7a76
DES
815{
816 if (reg == 15 && ENABLE_ARCH_5) {
817 gen_bx(s, var);
818 } else {
819 store_reg(s, reg, var);
820 }
821}
822
08307563
PM
823/* Abstractions of "generate code to do a guest load/store for
824 * AArch32", where a vaddr is always 32 bits (and is zero
825 * extended if we're a 64 bit core) and data is also
826 * 32 bits unless specifically doing a 64 bit access.
827 * These functions work like tcg_gen_qemu_{ld,st}* except
828 * that their arguments are TCGv_i32 rather than TCGv.
829 */
830#if TARGET_LONG_BITS == 32
831
832#define DO_GEN_LD(OP) \
833static inline void gen_aa32_##OP(TCGv_i32 val, TCGv_i32 addr, int index) \
834{ \
835 tcg_gen_qemu_##OP(val, addr, index); \
836}
837
838#define DO_GEN_ST(OP) \
839static inline void gen_aa32_##OP(TCGv_i32 val, TCGv_i32 addr, int index) \
840{ \
841 tcg_gen_qemu_##OP(val, addr, index); \
842}
843
844static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
845{
846 tcg_gen_qemu_ld64(val, addr, index);
847}
848
849static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
850{
851 tcg_gen_qemu_st64(val, addr, index);
852}
853
854#else
855
856#define DO_GEN_LD(OP) \
857static inline void gen_aa32_##OP(TCGv_i32 val, TCGv_i32 addr, int index) \
858{ \
859 TCGv addr64 = tcg_temp_new(); \
860 TCGv val64 = tcg_temp_new(); \
861 tcg_gen_extu_i32_i64(addr64, addr); \
862 tcg_gen_qemu_##OP(val64, addr64, index); \
863 tcg_temp_free(addr64); \
864 tcg_gen_trunc_i64_i32(val, val64); \
865 tcg_temp_free(val64); \
866}
867
868#define DO_GEN_ST(OP) \
869static inline void gen_aa32_##OP(TCGv_i32 val, TCGv_i32 addr, int index) \
870{ \
871 TCGv addr64 = tcg_temp_new(); \
872 TCGv val64 = tcg_temp_new(); \
873 tcg_gen_extu_i32_i64(addr64, addr); \
874 tcg_gen_extu_i32_i64(val64, val); \
875 tcg_gen_qemu_##OP(val64, addr64, index); \
876 tcg_temp_free(addr64); \
877 tcg_temp_free(val64); \
878}
879
880static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
881{
882 TCGv addr64 = tcg_temp_new();
883 tcg_gen_extu_i32_i64(addr64, addr);
884 tcg_gen_qemu_ld64(val, addr64, index);
885 tcg_temp_free(addr64);
886}
887
888static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
889{
890 TCGv addr64 = tcg_temp_new();
891 tcg_gen_extu_i32_i64(addr64, addr);
892 tcg_gen_qemu_st64(val, addr64, index);
893 tcg_temp_free(addr64);
894}
895
896#endif
897
898DO_GEN_LD(ld8s)
899DO_GEN_LD(ld8u)
900DO_GEN_LD(ld16s)
901DO_GEN_LD(ld16u)
902DO_GEN_LD(ld32u)
903DO_GEN_ST(st8)
904DO_GEN_ST(st16)
905DO_GEN_ST(st32)
906
eaed129d 907static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
5e3f878a 908{
14ade10f
AG
909 if (s->aarch64) {
910 gen_a64_set_pc_im(val);
911 } else {
912 tcg_gen_movi_i32(cpu_R[15], val);
913 }
5e3f878a
PB
914}
915
b5ff1b31
FB
916/* Force a TB lookup after an instruction that changes the CPU state. */
917static inline void gen_lookup_tb(DisasContext *s)
918{
a6445c52 919 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
920 s->is_jmp = DISAS_UPDATE;
921}
922
b0109805 923static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 924 TCGv_i32 var)
2c0262af 925{
1e8d4eec 926 int val, rm, shift, shiftop;
39d5492a 927 TCGv_i32 offset;
2c0262af
FB
928
929 if (!(insn & (1 << 25))) {
930 /* immediate */
931 val = insn & 0xfff;
932 if (!(insn & (1 << 23)))
933 val = -val;
537730b9 934 if (val != 0)
b0109805 935 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
936 } else {
937 /* shift/register */
938 rm = (insn) & 0xf;
939 shift = (insn >> 7) & 0x1f;
1e8d4eec 940 shiftop = (insn >> 5) & 3;
b26eefb6 941 offset = load_reg(s, rm);
9a119ff6 942 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 943 if (!(insn & (1 << 23)))
b0109805 944 tcg_gen_sub_i32(var, var, offset);
2c0262af 945 else
b0109805 946 tcg_gen_add_i32(var, var, offset);
7d1b0095 947 tcg_temp_free_i32(offset);
2c0262af
FB
948 }
949}
950
191f9a93 951static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 952 int extra, TCGv_i32 var)
2c0262af
FB
953{
954 int val, rm;
39d5492a 955 TCGv_i32 offset;
3b46e624 956
2c0262af
FB
957 if (insn & (1 << 22)) {
958 /* immediate */
959 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
960 if (!(insn & (1 << 23)))
961 val = -val;
18acad92 962 val += extra;
537730b9 963 if (val != 0)
b0109805 964 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
965 } else {
966 /* register */
191f9a93 967 if (extra)
b0109805 968 tcg_gen_addi_i32(var, var, extra);
2c0262af 969 rm = (insn) & 0xf;
b26eefb6 970 offset = load_reg(s, rm);
2c0262af 971 if (!(insn & (1 << 23)))
b0109805 972 tcg_gen_sub_i32(var, var, offset);
2c0262af 973 else
b0109805 974 tcg_gen_add_i32(var, var, offset);
7d1b0095 975 tcg_temp_free_i32(offset);
2c0262af
FB
976 }
977}
978
5aaebd13
PM
979static TCGv_ptr get_fpstatus_ptr(int neon)
980{
981 TCGv_ptr statusptr = tcg_temp_new_ptr();
982 int offset;
983 if (neon) {
0ecb72a5 984 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 985 } else {
0ecb72a5 986 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
987 }
988 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
989 return statusptr;
990}
991
4373f3ce
PB
992#define VFP_OP2(name) \
993static inline void gen_vfp_##name(int dp) \
994{ \
ae1857ec
PM
995 TCGv_ptr fpst = get_fpstatus_ptr(0); \
996 if (dp) { \
997 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
998 } else { \
999 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1000 } \
1001 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1002}
1003
4373f3ce
PB
1004VFP_OP2(add)
1005VFP_OP2(sub)
1006VFP_OP2(mul)
1007VFP_OP2(div)
1008
1009#undef VFP_OP2
1010
605a6aed
PM
1011static inline void gen_vfp_F1_mul(int dp)
1012{
1013 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1014 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1015 if (dp) {
ae1857ec 1016 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1017 } else {
ae1857ec 1018 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1019 }
ae1857ec 1020 tcg_temp_free_ptr(fpst);
605a6aed
PM
1021}
1022
1023static inline void gen_vfp_F1_neg(int dp)
1024{
1025 /* Like gen_vfp_neg() but put result in F1 */
1026 if (dp) {
1027 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1028 } else {
1029 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1030 }
1031}
1032
4373f3ce
PB
1033static inline void gen_vfp_abs(int dp)
1034{
1035 if (dp)
1036 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1037 else
1038 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1039}
1040
1041static inline void gen_vfp_neg(int dp)
1042{
1043 if (dp)
1044 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1045 else
1046 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1047}
1048
1049static inline void gen_vfp_sqrt(int dp)
1050{
1051 if (dp)
1052 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1053 else
1054 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1055}
1056
1057static inline void gen_vfp_cmp(int dp)
1058{
1059 if (dp)
1060 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1061 else
1062 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1063}
1064
1065static inline void gen_vfp_cmpe(int dp)
1066{
1067 if (dp)
1068 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1069 else
1070 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1071}
1072
1073static inline void gen_vfp_F1_ld0(int dp)
1074{
1075 if (dp)
5b340b51 1076 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1077 else
5b340b51 1078 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1079}
1080
5500b06c
PM
1081#define VFP_GEN_ITOF(name) \
1082static inline void gen_vfp_##name(int dp, int neon) \
1083{ \
5aaebd13 1084 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1085 if (dp) { \
1086 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1087 } else { \
1088 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1089 } \
b7fa9214 1090 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1091}
1092
5500b06c
PM
1093VFP_GEN_ITOF(uito)
1094VFP_GEN_ITOF(sito)
1095#undef VFP_GEN_ITOF
4373f3ce 1096
5500b06c
PM
1097#define VFP_GEN_FTOI(name) \
1098static inline void gen_vfp_##name(int dp, int neon) \
1099{ \
5aaebd13 1100 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1101 if (dp) { \
1102 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1103 } else { \
1104 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1105 } \
b7fa9214 1106 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1107}
1108
5500b06c
PM
1109VFP_GEN_FTOI(toui)
1110VFP_GEN_FTOI(touiz)
1111VFP_GEN_FTOI(tosi)
1112VFP_GEN_FTOI(tosiz)
1113#undef VFP_GEN_FTOI
4373f3ce
PB
1114
1115#define VFP_GEN_FIX(name) \
5500b06c 1116static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1117{ \
39d5492a 1118 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1119 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1120 if (dp) { \
1121 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1122 } else { \
1123 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1124 } \
b75263d6 1125 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1126 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1127}
4373f3ce
PB
1128VFP_GEN_FIX(tosh)
1129VFP_GEN_FIX(tosl)
1130VFP_GEN_FIX(touh)
1131VFP_GEN_FIX(toul)
1132VFP_GEN_FIX(shto)
1133VFP_GEN_FIX(slto)
1134VFP_GEN_FIX(uhto)
1135VFP_GEN_FIX(ulto)
1136#undef VFP_GEN_FIX
9ee6e8bb 1137
39d5492a 1138static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1139{
08307563
PM
1140 if (dp) {
1141 gen_aa32_ld64(cpu_F0d, addr, IS_USER(s));
1142 } else {
1143 gen_aa32_ld32u(cpu_F0s, addr, IS_USER(s));
1144 }
b5ff1b31
FB
1145}
1146
39d5492a 1147static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1148{
08307563
PM
1149 if (dp) {
1150 gen_aa32_st64(cpu_F0d, addr, IS_USER(s));
1151 } else {
1152 gen_aa32_st32(cpu_F0s, addr, IS_USER(s));
1153 }
b5ff1b31
FB
1154}
1155
8e96005d
FB
1156static inline long
1157vfp_reg_offset (int dp, int reg)
1158{
1159 if (dp)
1160 return offsetof(CPUARMState, vfp.regs[reg]);
1161 else if (reg & 1) {
1162 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1163 + offsetof(CPU_DoubleU, l.upper);
1164 } else {
1165 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1166 + offsetof(CPU_DoubleU, l.lower);
1167 }
1168}
9ee6e8bb
PB
1169
1170/* Return the offset of a 32-bit piece of a NEON register.
1171 zero is the least significant end of the register. */
1172static inline long
1173neon_reg_offset (int reg, int n)
1174{
1175 int sreg;
1176 sreg = reg * 2 + n;
1177 return vfp_reg_offset(0, sreg);
1178}
1179
39d5492a 1180static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1181{
39d5492a 1182 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1183 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1184 return tmp;
1185}
1186
39d5492a 1187static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1188{
1189 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1190 tcg_temp_free_i32(var);
8f8e3aa4
PB
1191}
1192
a7812ae4 1193static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1194{
1195 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1196}
1197
a7812ae4 1198static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1199{
1200 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1201}
1202
4373f3ce
PB
1203#define tcg_gen_ld_f32 tcg_gen_ld_i32
1204#define tcg_gen_ld_f64 tcg_gen_ld_i64
1205#define tcg_gen_st_f32 tcg_gen_st_i32
1206#define tcg_gen_st_f64 tcg_gen_st_i64
1207
b7bcbe95
FB
1208static inline void gen_mov_F0_vreg(int dp, int reg)
1209{
1210 if (dp)
4373f3ce 1211 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1212 else
4373f3ce 1213 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1214}
1215
1216static inline void gen_mov_F1_vreg(int dp, int reg)
1217{
1218 if (dp)
4373f3ce 1219 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1220 else
4373f3ce 1221 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1222}
1223
1224static inline void gen_mov_vreg_F0(int dp, int reg)
1225{
1226 if (dp)
4373f3ce 1227 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1228 else
4373f3ce 1229 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1230}
1231
18c9b560
AZ
1232#define ARM_CP_RW_BIT (1 << 20)
1233
a7812ae4 1234static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1235{
0ecb72a5 1236 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1237}
1238
a7812ae4 1239static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1240{
0ecb72a5 1241 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1242}
1243
39d5492a 1244static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1245{
39d5492a 1246 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1247 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1248 return var;
e677137d
PB
1249}
1250
39d5492a 1251static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1252{
0ecb72a5 1253 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1254 tcg_temp_free_i32(var);
e677137d
PB
1255}
1256
1257static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1258{
1259 iwmmxt_store_reg(cpu_M0, rn);
1260}
1261
1262static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1263{
1264 iwmmxt_load_reg(cpu_M0, rn);
1265}
1266
1267static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1268{
1269 iwmmxt_load_reg(cpu_V1, rn);
1270 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1271}
1272
1273static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1274{
1275 iwmmxt_load_reg(cpu_V1, rn);
1276 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1277}
1278
1279static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1280{
1281 iwmmxt_load_reg(cpu_V1, rn);
1282 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1283}
1284
1285#define IWMMXT_OP(name) \
1286static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1287{ \
1288 iwmmxt_load_reg(cpu_V1, rn); \
1289 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1290}
1291
477955bd
PM
1292#define IWMMXT_OP_ENV(name) \
1293static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1294{ \
1295 iwmmxt_load_reg(cpu_V1, rn); \
1296 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1297}
1298
1299#define IWMMXT_OP_ENV_SIZE(name) \
1300IWMMXT_OP_ENV(name##b) \
1301IWMMXT_OP_ENV(name##w) \
1302IWMMXT_OP_ENV(name##l)
e677137d 1303
477955bd 1304#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1305static inline void gen_op_iwmmxt_##name##_M0(void) \
1306{ \
477955bd 1307 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1308}
1309
1310IWMMXT_OP(maddsq)
1311IWMMXT_OP(madduq)
1312IWMMXT_OP(sadb)
1313IWMMXT_OP(sadw)
1314IWMMXT_OP(mulslw)
1315IWMMXT_OP(mulshw)
1316IWMMXT_OP(mululw)
1317IWMMXT_OP(muluhw)
1318IWMMXT_OP(macsw)
1319IWMMXT_OP(macuw)
1320
477955bd
PM
1321IWMMXT_OP_ENV_SIZE(unpackl)
1322IWMMXT_OP_ENV_SIZE(unpackh)
1323
1324IWMMXT_OP_ENV1(unpacklub)
1325IWMMXT_OP_ENV1(unpackluw)
1326IWMMXT_OP_ENV1(unpacklul)
1327IWMMXT_OP_ENV1(unpackhub)
1328IWMMXT_OP_ENV1(unpackhuw)
1329IWMMXT_OP_ENV1(unpackhul)
1330IWMMXT_OP_ENV1(unpacklsb)
1331IWMMXT_OP_ENV1(unpacklsw)
1332IWMMXT_OP_ENV1(unpacklsl)
1333IWMMXT_OP_ENV1(unpackhsb)
1334IWMMXT_OP_ENV1(unpackhsw)
1335IWMMXT_OP_ENV1(unpackhsl)
1336
1337IWMMXT_OP_ENV_SIZE(cmpeq)
1338IWMMXT_OP_ENV_SIZE(cmpgtu)
1339IWMMXT_OP_ENV_SIZE(cmpgts)
1340
1341IWMMXT_OP_ENV_SIZE(mins)
1342IWMMXT_OP_ENV_SIZE(minu)
1343IWMMXT_OP_ENV_SIZE(maxs)
1344IWMMXT_OP_ENV_SIZE(maxu)
1345
1346IWMMXT_OP_ENV_SIZE(subn)
1347IWMMXT_OP_ENV_SIZE(addn)
1348IWMMXT_OP_ENV_SIZE(subu)
1349IWMMXT_OP_ENV_SIZE(addu)
1350IWMMXT_OP_ENV_SIZE(subs)
1351IWMMXT_OP_ENV_SIZE(adds)
1352
1353IWMMXT_OP_ENV(avgb0)
1354IWMMXT_OP_ENV(avgb1)
1355IWMMXT_OP_ENV(avgw0)
1356IWMMXT_OP_ENV(avgw1)
e677137d
PB
1357
1358IWMMXT_OP(msadb)
1359
477955bd
PM
1360IWMMXT_OP_ENV(packuw)
1361IWMMXT_OP_ENV(packul)
1362IWMMXT_OP_ENV(packuq)
1363IWMMXT_OP_ENV(packsw)
1364IWMMXT_OP_ENV(packsl)
1365IWMMXT_OP_ENV(packsq)
e677137d 1366
e677137d
PB
1367static void gen_op_iwmmxt_set_mup(void)
1368{
39d5492a 1369 TCGv_i32 tmp;
e677137d
PB
1370 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1371 tcg_gen_ori_i32(tmp, tmp, 2);
1372 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1373}
1374
1375static void gen_op_iwmmxt_set_cup(void)
1376{
39d5492a 1377 TCGv_i32 tmp;
e677137d
PB
1378 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1379 tcg_gen_ori_i32(tmp, tmp, 1);
1380 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1381}
1382
1383static void gen_op_iwmmxt_setpsr_nz(void)
1384{
39d5492a 1385 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1386 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1387 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1388}
1389
1390static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1391{
1392 iwmmxt_load_reg(cpu_V1, rn);
86831435 1393 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1394 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1395}
1396
39d5492a
PM
1397static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1398 TCGv_i32 dest)
18c9b560
AZ
1399{
1400 int rd;
1401 uint32_t offset;
39d5492a 1402 TCGv_i32 tmp;
18c9b560
AZ
1403
1404 rd = (insn >> 16) & 0xf;
da6b5335 1405 tmp = load_reg(s, rd);
18c9b560
AZ
1406
1407 offset = (insn & 0xff) << ((insn >> 7) & 2);
1408 if (insn & (1 << 24)) {
1409 /* Pre indexed */
1410 if (insn & (1 << 23))
da6b5335 1411 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1412 else
da6b5335
FN
1413 tcg_gen_addi_i32(tmp, tmp, -offset);
1414 tcg_gen_mov_i32(dest, tmp);
18c9b560 1415 if (insn & (1 << 21))
da6b5335
FN
1416 store_reg(s, rd, tmp);
1417 else
7d1b0095 1418 tcg_temp_free_i32(tmp);
18c9b560
AZ
1419 } else if (insn & (1 << 21)) {
1420 /* Post indexed */
da6b5335 1421 tcg_gen_mov_i32(dest, tmp);
18c9b560 1422 if (insn & (1 << 23))
da6b5335 1423 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1424 else
da6b5335
FN
1425 tcg_gen_addi_i32(tmp, tmp, -offset);
1426 store_reg(s, rd, tmp);
18c9b560
AZ
1427 } else if (!(insn & (1 << 23)))
1428 return 1;
1429 return 0;
1430}
1431
39d5492a 1432static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1433{
1434 int rd = (insn >> 0) & 0xf;
39d5492a 1435 TCGv_i32 tmp;
18c9b560 1436
da6b5335
FN
1437 if (insn & (1 << 8)) {
1438 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1439 return 1;
da6b5335
FN
1440 } else {
1441 tmp = iwmmxt_load_creg(rd);
1442 }
1443 } else {
7d1b0095 1444 tmp = tcg_temp_new_i32();
da6b5335
FN
1445 iwmmxt_load_reg(cpu_V0, rd);
1446 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1447 }
1448 tcg_gen_andi_i32(tmp, tmp, mask);
1449 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1450 tcg_temp_free_i32(tmp);
18c9b560
AZ
1451 return 0;
1452}
1453
a1c7273b 1454/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1455 (ie. an undefined instruction). */
0ecb72a5 1456static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
1457{
1458 int rd, wrd;
1459 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1460 TCGv_i32 addr;
1461 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1462
1463 if ((insn & 0x0e000e00) == 0x0c000000) {
1464 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1465 wrd = insn & 0xf;
1466 rdlo = (insn >> 12) & 0xf;
1467 rdhi = (insn >> 16) & 0xf;
1468 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1469 iwmmxt_load_reg(cpu_V0, wrd);
1470 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1471 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1472 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1473 } else { /* TMCRR */
da6b5335
FN
1474 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1475 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1476 gen_op_iwmmxt_set_mup();
1477 }
1478 return 0;
1479 }
1480
1481 wrd = (insn >> 12) & 0xf;
7d1b0095 1482 addr = tcg_temp_new_i32();
da6b5335 1483 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1484 tcg_temp_free_i32(addr);
18c9b560 1485 return 1;
da6b5335 1486 }
18c9b560
AZ
1487 if (insn & ARM_CP_RW_BIT) {
1488 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1489 tmp = tcg_temp_new_i32();
08307563 1490 gen_aa32_ld32u(tmp, addr, IS_USER(s));
da6b5335 1491 iwmmxt_store_creg(wrd, tmp);
18c9b560 1492 } else {
e677137d
PB
1493 i = 1;
1494 if (insn & (1 << 8)) {
1495 if (insn & (1 << 22)) { /* WLDRD */
08307563 1496 gen_aa32_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1497 i = 0;
1498 } else { /* WLDRW wRd */
29531141 1499 tmp = tcg_temp_new_i32();
08307563 1500 gen_aa32_ld32u(tmp, addr, IS_USER(s));
e677137d
PB
1501 }
1502 } else {
29531141 1503 tmp = tcg_temp_new_i32();
e677137d 1504 if (insn & (1 << 22)) { /* WLDRH */
08307563 1505 gen_aa32_ld16u(tmp, addr, IS_USER(s));
e677137d 1506 } else { /* WLDRB */
08307563 1507 gen_aa32_ld8u(tmp, addr, IS_USER(s));
e677137d
PB
1508 }
1509 }
1510 if (i) {
1511 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1512 tcg_temp_free_i32(tmp);
e677137d 1513 }
18c9b560
AZ
1514 gen_op_iwmmxt_movq_wRn_M0(wrd);
1515 }
1516 } else {
1517 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1518 tmp = iwmmxt_load_creg(wrd);
08307563 1519 gen_aa32_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1520 } else {
1521 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1522 tmp = tcg_temp_new_i32();
e677137d
PB
1523 if (insn & (1 << 8)) {
1524 if (insn & (1 << 22)) { /* WSTRD */
08307563 1525 gen_aa32_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1526 } else { /* WSTRW wRd */
1527 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
08307563 1528 gen_aa32_st32(tmp, addr, IS_USER(s));
e677137d
PB
1529 }
1530 } else {
1531 if (insn & (1 << 22)) { /* WSTRH */
1532 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
08307563 1533 gen_aa32_st16(tmp, addr, IS_USER(s));
e677137d
PB
1534 } else { /* WSTRB */
1535 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
08307563 1536 gen_aa32_st8(tmp, addr, IS_USER(s));
e677137d
PB
1537 }
1538 }
18c9b560 1539 }
29531141 1540 tcg_temp_free_i32(tmp);
18c9b560 1541 }
7d1b0095 1542 tcg_temp_free_i32(addr);
18c9b560
AZ
1543 return 0;
1544 }
1545
1546 if ((insn & 0x0f000000) != 0x0e000000)
1547 return 1;
1548
1549 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1550 case 0x000: /* WOR */
1551 wrd = (insn >> 12) & 0xf;
1552 rd0 = (insn >> 0) & 0xf;
1553 rd1 = (insn >> 16) & 0xf;
1554 gen_op_iwmmxt_movq_M0_wRn(rd0);
1555 gen_op_iwmmxt_orq_M0_wRn(rd1);
1556 gen_op_iwmmxt_setpsr_nz();
1557 gen_op_iwmmxt_movq_wRn_M0(wrd);
1558 gen_op_iwmmxt_set_mup();
1559 gen_op_iwmmxt_set_cup();
1560 break;
1561 case 0x011: /* TMCR */
1562 if (insn & 0xf)
1563 return 1;
1564 rd = (insn >> 12) & 0xf;
1565 wrd = (insn >> 16) & 0xf;
1566 switch (wrd) {
1567 case ARM_IWMMXT_wCID:
1568 case ARM_IWMMXT_wCASF:
1569 break;
1570 case ARM_IWMMXT_wCon:
1571 gen_op_iwmmxt_set_cup();
1572 /* Fall through. */
1573 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1574 tmp = iwmmxt_load_creg(wrd);
1575 tmp2 = load_reg(s, rd);
f669df27 1576 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1577 tcg_temp_free_i32(tmp2);
da6b5335 1578 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1579 break;
1580 case ARM_IWMMXT_wCGR0:
1581 case ARM_IWMMXT_wCGR1:
1582 case ARM_IWMMXT_wCGR2:
1583 case ARM_IWMMXT_wCGR3:
1584 gen_op_iwmmxt_set_cup();
da6b5335
FN
1585 tmp = load_reg(s, rd);
1586 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1587 break;
1588 default:
1589 return 1;
1590 }
1591 break;
1592 case 0x100: /* WXOR */
1593 wrd = (insn >> 12) & 0xf;
1594 rd0 = (insn >> 0) & 0xf;
1595 rd1 = (insn >> 16) & 0xf;
1596 gen_op_iwmmxt_movq_M0_wRn(rd0);
1597 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1598 gen_op_iwmmxt_setpsr_nz();
1599 gen_op_iwmmxt_movq_wRn_M0(wrd);
1600 gen_op_iwmmxt_set_mup();
1601 gen_op_iwmmxt_set_cup();
1602 break;
1603 case 0x111: /* TMRC */
1604 if (insn & 0xf)
1605 return 1;
1606 rd = (insn >> 12) & 0xf;
1607 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1608 tmp = iwmmxt_load_creg(wrd);
1609 store_reg(s, rd, tmp);
18c9b560
AZ
1610 break;
1611 case 0x300: /* WANDN */
1612 wrd = (insn >> 12) & 0xf;
1613 rd0 = (insn >> 0) & 0xf;
1614 rd1 = (insn >> 16) & 0xf;
1615 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1616 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1617 gen_op_iwmmxt_andq_M0_wRn(rd1);
1618 gen_op_iwmmxt_setpsr_nz();
1619 gen_op_iwmmxt_movq_wRn_M0(wrd);
1620 gen_op_iwmmxt_set_mup();
1621 gen_op_iwmmxt_set_cup();
1622 break;
1623 case 0x200: /* WAND */
1624 wrd = (insn >> 12) & 0xf;
1625 rd0 = (insn >> 0) & 0xf;
1626 rd1 = (insn >> 16) & 0xf;
1627 gen_op_iwmmxt_movq_M0_wRn(rd0);
1628 gen_op_iwmmxt_andq_M0_wRn(rd1);
1629 gen_op_iwmmxt_setpsr_nz();
1630 gen_op_iwmmxt_movq_wRn_M0(wrd);
1631 gen_op_iwmmxt_set_mup();
1632 gen_op_iwmmxt_set_cup();
1633 break;
1634 case 0x810: case 0xa10: /* WMADD */
1635 wrd = (insn >> 12) & 0xf;
1636 rd0 = (insn >> 0) & 0xf;
1637 rd1 = (insn >> 16) & 0xf;
1638 gen_op_iwmmxt_movq_M0_wRn(rd0);
1639 if (insn & (1 << 21))
1640 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1641 else
1642 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1643 gen_op_iwmmxt_movq_wRn_M0(wrd);
1644 gen_op_iwmmxt_set_mup();
1645 break;
1646 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1647 wrd = (insn >> 12) & 0xf;
1648 rd0 = (insn >> 16) & 0xf;
1649 rd1 = (insn >> 0) & 0xf;
1650 gen_op_iwmmxt_movq_M0_wRn(rd0);
1651 switch ((insn >> 22) & 3) {
1652 case 0:
1653 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1654 break;
1655 case 1:
1656 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1657 break;
1658 case 2:
1659 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1660 break;
1661 case 3:
1662 return 1;
1663 }
1664 gen_op_iwmmxt_movq_wRn_M0(wrd);
1665 gen_op_iwmmxt_set_mup();
1666 gen_op_iwmmxt_set_cup();
1667 break;
1668 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1669 wrd = (insn >> 12) & 0xf;
1670 rd0 = (insn >> 16) & 0xf;
1671 rd1 = (insn >> 0) & 0xf;
1672 gen_op_iwmmxt_movq_M0_wRn(rd0);
1673 switch ((insn >> 22) & 3) {
1674 case 0:
1675 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1676 break;
1677 case 1:
1678 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1679 break;
1680 case 2:
1681 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1682 break;
1683 case 3:
1684 return 1;
1685 }
1686 gen_op_iwmmxt_movq_wRn_M0(wrd);
1687 gen_op_iwmmxt_set_mup();
1688 gen_op_iwmmxt_set_cup();
1689 break;
1690 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1691 wrd = (insn >> 12) & 0xf;
1692 rd0 = (insn >> 16) & 0xf;
1693 rd1 = (insn >> 0) & 0xf;
1694 gen_op_iwmmxt_movq_M0_wRn(rd0);
1695 if (insn & (1 << 22))
1696 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1697 else
1698 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1699 if (!(insn & (1 << 20)))
1700 gen_op_iwmmxt_addl_M0_wRn(wrd);
1701 gen_op_iwmmxt_movq_wRn_M0(wrd);
1702 gen_op_iwmmxt_set_mup();
1703 break;
1704 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1705 wrd = (insn >> 12) & 0xf;
1706 rd0 = (insn >> 16) & 0xf;
1707 rd1 = (insn >> 0) & 0xf;
1708 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1709 if (insn & (1 << 21)) {
1710 if (insn & (1 << 20))
1711 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1712 else
1713 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1714 } else {
1715 if (insn & (1 << 20))
1716 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1717 else
1718 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1719 }
18c9b560
AZ
1720 gen_op_iwmmxt_movq_wRn_M0(wrd);
1721 gen_op_iwmmxt_set_mup();
1722 break;
1723 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1724 wrd = (insn >> 12) & 0xf;
1725 rd0 = (insn >> 16) & 0xf;
1726 rd1 = (insn >> 0) & 0xf;
1727 gen_op_iwmmxt_movq_M0_wRn(rd0);
1728 if (insn & (1 << 21))
1729 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1730 else
1731 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1732 if (!(insn & (1 << 20))) {
e677137d
PB
1733 iwmmxt_load_reg(cpu_V1, wrd);
1734 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1735 }
1736 gen_op_iwmmxt_movq_wRn_M0(wrd);
1737 gen_op_iwmmxt_set_mup();
1738 break;
1739 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1740 wrd = (insn >> 12) & 0xf;
1741 rd0 = (insn >> 16) & 0xf;
1742 rd1 = (insn >> 0) & 0xf;
1743 gen_op_iwmmxt_movq_M0_wRn(rd0);
1744 switch ((insn >> 22) & 3) {
1745 case 0:
1746 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1747 break;
1748 case 1:
1749 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1750 break;
1751 case 2:
1752 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1753 break;
1754 case 3:
1755 return 1;
1756 }
1757 gen_op_iwmmxt_movq_wRn_M0(wrd);
1758 gen_op_iwmmxt_set_mup();
1759 gen_op_iwmmxt_set_cup();
1760 break;
1761 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1762 wrd = (insn >> 12) & 0xf;
1763 rd0 = (insn >> 16) & 0xf;
1764 rd1 = (insn >> 0) & 0xf;
1765 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1766 if (insn & (1 << 22)) {
1767 if (insn & (1 << 20))
1768 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1769 else
1770 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1771 } else {
1772 if (insn & (1 << 20))
1773 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1774 else
1775 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1776 }
18c9b560
AZ
1777 gen_op_iwmmxt_movq_wRn_M0(wrd);
1778 gen_op_iwmmxt_set_mup();
1779 gen_op_iwmmxt_set_cup();
1780 break;
1781 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1782 wrd = (insn >> 12) & 0xf;
1783 rd0 = (insn >> 16) & 0xf;
1784 rd1 = (insn >> 0) & 0xf;
1785 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1786 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1787 tcg_gen_andi_i32(tmp, tmp, 7);
1788 iwmmxt_load_reg(cpu_V1, rd1);
1789 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1790 tcg_temp_free_i32(tmp);
18c9b560
AZ
1791 gen_op_iwmmxt_movq_wRn_M0(wrd);
1792 gen_op_iwmmxt_set_mup();
1793 break;
1794 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1795 if (((insn >> 6) & 3) == 3)
1796 return 1;
18c9b560
AZ
1797 rd = (insn >> 12) & 0xf;
1798 wrd = (insn >> 16) & 0xf;
da6b5335 1799 tmp = load_reg(s, rd);
18c9b560
AZ
1800 gen_op_iwmmxt_movq_M0_wRn(wrd);
1801 switch ((insn >> 6) & 3) {
1802 case 0:
da6b5335
FN
1803 tmp2 = tcg_const_i32(0xff);
1804 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1805 break;
1806 case 1:
da6b5335
FN
1807 tmp2 = tcg_const_i32(0xffff);
1808 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1809 break;
1810 case 2:
da6b5335
FN
1811 tmp2 = tcg_const_i32(0xffffffff);
1812 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1813 break;
da6b5335 1814 default:
39d5492a
PM
1815 TCGV_UNUSED_I32(tmp2);
1816 TCGV_UNUSED_I32(tmp3);
18c9b560 1817 }
da6b5335 1818 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
1819 tcg_temp_free_i32(tmp3);
1820 tcg_temp_free_i32(tmp2);
7d1b0095 1821 tcg_temp_free_i32(tmp);
18c9b560
AZ
1822 gen_op_iwmmxt_movq_wRn_M0(wrd);
1823 gen_op_iwmmxt_set_mup();
1824 break;
1825 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1826 rd = (insn >> 12) & 0xf;
1827 wrd = (insn >> 16) & 0xf;
da6b5335 1828 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1829 return 1;
1830 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1831 tmp = tcg_temp_new_i32();
18c9b560
AZ
1832 switch ((insn >> 22) & 3) {
1833 case 0:
da6b5335
FN
1834 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1835 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1836 if (insn & 8) {
1837 tcg_gen_ext8s_i32(tmp, tmp);
1838 } else {
1839 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1840 }
1841 break;
1842 case 1:
da6b5335
FN
1843 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1844 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1845 if (insn & 8) {
1846 tcg_gen_ext16s_i32(tmp, tmp);
1847 } else {
1848 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1849 }
1850 break;
1851 case 2:
da6b5335
FN
1852 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1853 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1854 break;
18c9b560 1855 }
da6b5335 1856 store_reg(s, rd, tmp);
18c9b560
AZ
1857 break;
1858 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1859 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1860 return 1;
da6b5335 1861 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1862 switch ((insn >> 22) & 3) {
1863 case 0:
da6b5335 1864 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1865 break;
1866 case 1:
da6b5335 1867 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1868 break;
1869 case 2:
da6b5335 1870 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1871 break;
18c9b560 1872 }
da6b5335
FN
1873 tcg_gen_shli_i32(tmp, tmp, 28);
1874 gen_set_nzcv(tmp);
7d1b0095 1875 tcg_temp_free_i32(tmp);
18c9b560
AZ
1876 break;
1877 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1878 if (((insn >> 6) & 3) == 3)
1879 return 1;
18c9b560
AZ
1880 rd = (insn >> 12) & 0xf;
1881 wrd = (insn >> 16) & 0xf;
da6b5335 1882 tmp = load_reg(s, rd);
18c9b560
AZ
1883 switch ((insn >> 6) & 3) {
1884 case 0:
da6b5335 1885 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1886 break;
1887 case 1:
da6b5335 1888 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1889 break;
1890 case 2:
da6b5335 1891 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1892 break;
18c9b560 1893 }
7d1b0095 1894 tcg_temp_free_i32(tmp);
18c9b560
AZ
1895 gen_op_iwmmxt_movq_wRn_M0(wrd);
1896 gen_op_iwmmxt_set_mup();
1897 break;
1898 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1899 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1900 return 1;
da6b5335 1901 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1902 tmp2 = tcg_temp_new_i32();
da6b5335 1903 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1904 switch ((insn >> 22) & 3) {
1905 case 0:
1906 for (i = 0; i < 7; i ++) {
da6b5335
FN
1907 tcg_gen_shli_i32(tmp2, tmp2, 4);
1908 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1909 }
1910 break;
1911 case 1:
1912 for (i = 0; i < 3; i ++) {
da6b5335
FN
1913 tcg_gen_shli_i32(tmp2, tmp2, 8);
1914 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1915 }
1916 break;
1917 case 2:
da6b5335
FN
1918 tcg_gen_shli_i32(tmp2, tmp2, 16);
1919 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1920 break;
18c9b560 1921 }
da6b5335 1922 gen_set_nzcv(tmp);
7d1b0095
PM
1923 tcg_temp_free_i32(tmp2);
1924 tcg_temp_free_i32(tmp);
18c9b560
AZ
1925 break;
1926 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1927 wrd = (insn >> 12) & 0xf;
1928 rd0 = (insn >> 16) & 0xf;
1929 gen_op_iwmmxt_movq_M0_wRn(rd0);
1930 switch ((insn >> 22) & 3) {
1931 case 0:
e677137d 1932 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1933 break;
1934 case 1:
e677137d 1935 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1936 break;
1937 case 2:
e677137d 1938 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1939 break;
1940 case 3:
1941 return 1;
1942 }
1943 gen_op_iwmmxt_movq_wRn_M0(wrd);
1944 gen_op_iwmmxt_set_mup();
1945 break;
1946 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1947 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1948 return 1;
da6b5335 1949 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1950 tmp2 = tcg_temp_new_i32();
da6b5335 1951 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1952 switch ((insn >> 22) & 3) {
1953 case 0:
1954 for (i = 0; i < 7; i ++) {
da6b5335
FN
1955 tcg_gen_shli_i32(tmp2, tmp2, 4);
1956 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1957 }
1958 break;
1959 case 1:
1960 for (i = 0; i < 3; i ++) {
da6b5335
FN
1961 tcg_gen_shli_i32(tmp2, tmp2, 8);
1962 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1963 }
1964 break;
1965 case 2:
da6b5335
FN
1966 tcg_gen_shli_i32(tmp2, tmp2, 16);
1967 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1968 break;
18c9b560 1969 }
da6b5335 1970 gen_set_nzcv(tmp);
7d1b0095
PM
1971 tcg_temp_free_i32(tmp2);
1972 tcg_temp_free_i32(tmp);
18c9b560
AZ
1973 break;
1974 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1975 rd = (insn >> 12) & 0xf;
1976 rd0 = (insn >> 16) & 0xf;
da6b5335 1977 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1978 return 1;
1979 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1980 tmp = tcg_temp_new_i32();
18c9b560
AZ
1981 switch ((insn >> 22) & 3) {
1982 case 0:
da6b5335 1983 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1984 break;
1985 case 1:
da6b5335 1986 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1987 break;
1988 case 2:
da6b5335 1989 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1990 break;
18c9b560 1991 }
da6b5335 1992 store_reg(s, rd, tmp);
18c9b560
AZ
1993 break;
1994 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1995 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1996 wrd = (insn >> 12) & 0xf;
1997 rd0 = (insn >> 16) & 0xf;
1998 rd1 = (insn >> 0) & 0xf;
1999 gen_op_iwmmxt_movq_M0_wRn(rd0);
2000 switch ((insn >> 22) & 3) {
2001 case 0:
2002 if (insn & (1 << 21))
2003 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2004 else
2005 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2006 break;
2007 case 1:
2008 if (insn & (1 << 21))
2009 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2010 else
2011 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2012 break;
2013 case 2:
2014 if (insn & (1 << 21))
2015 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2016 else
2017 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2018 break;
2019 case 3:
2020 return 1;
2021 }
2022 gen_op_iwmmxt_movq_wRn_M0(wrd);
2023 gen_op_iwmmxt_set_mup();
2024 gen_op_iwmmxt_set_cup();
2025 break;
2026 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2027 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2028 wrd = (insn >> 12) & 0xf;
2029 rd0 = (insn >> 16) & 0xf;
2030 gen_op_iwmmxt_movq_M0_wRn(rd0);
2031 switch ((insn >> 22) & 3) {
2032 case 0:
2033 if (insn & (1 << 21))
2034 gen_op_iwmmxt_unpacklsb_M0();
2035 else
2036 gen_op_iwmmxt_unpacklub_M0();
2037 break;
2038 case 1:
2039 if (insn & (1 << 21))
2040 gen_op_iwmmxt_unpacklsw_M0();
2041 else
2042 gen_op_iwmmxt_unpackluw_M0();
2043 break;
2044 case 2:
2045 if (insn & (1 << 21))
2046 gen_op_iwmmxt_unpacklsl_M0();
2047 else
2048 gen_op_iwmmxt_unpacklul_M0();
2049 break;
2050 case 3:
2051 return 1;
2052 }
2053 gen_op_iwmmxt_movq_wRn_M0(wrd);
2054 gen_op_iwmmxt_set_mup();
2055 gen_op_iwmmxt_set_cup();
2056 break;
2057 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2058 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2059 wrd = (insn >> 12) & 0xf;
2060 rd0 = (insn >> 16) & 0xf;
2061 gen_op_iwmmxt_movq_M0_wRn(rd0);
2062 switch ((insn >> 22) & 3) {
2063 case 0:
2064 if (insn & (1 << 21))
2065 gen_op_iwmmxt_unpackhsb_M0();
2066 else
2067 gen_op_iwmmxt_unpackhub_M0();
2068 break;
2069 case 1:
2070 if (insn & (1 << 21))
2071 gen_op_iwmmxt_unpackhsw_M0();
2072 else
2073 gen_op_iwmmxt_unpackhuw_M0();
2074 break;
2075 case 2:
2076 if (insn & (1 << 21))
2077 gen_op_iwmmxt_unpackhsl_M0();
2078 else
2079 gen_op_iwmmxt_unpackhul_M0();
2080 break;
2081 case 3:
2082 return 1;
2083 }
2084 gen_op_iwmmxt_movq_wRn_M0(wrd);
2085 gen_op_iwmmxt_set_mup();
2086 gen_op_iwmmxt_set_cup();
2087 break;
2088 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2089 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2090 if (((insn >> 22) & 3) == 0)
2091 return 1;
18c9b560
AZ
2092 wrd = (insn >> 12) & 0xf;
2093 rd0 = (insn >> 16) & 0xf;
2094 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2095 tmp = tcg_temp_new_i32();
da6b5335 2096 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2097 tcg_temp_free_i32(tmp);
18c9b560 2098 return 1;
da6b5335 2099 }
18c9b560 2100 switch ((insn >> 22) & 3) {
18c9b560 2101 case 1:
477955bd 2102 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2103 break;
2104 case 2:
477955bd 2105 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2106 break;
2107 case 3:
477955bd 2108 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2109 break;
2110 }
7d1b0095 2111 tcg_temp_free_i32(tmp);
18c9b560
AZ
2112 gen_op_iwmmxt_movq_wRn_M0(wrd);
2113 gen_op_iwmmxt_set_mup();
2114 gen_op_iwmmxt_set_cup();
2115 break;
2116 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2117 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2118 if (((insn >> 22) & 3) == 0)
2119 return 1;
18c9b560
AZ
2120 wrd = (insn >> 12) & 0xf;
2121 rd0 = (insn >> 16) & 0xf;
2122 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2123 tmp = tcg_temp_new_i32();
da6b5335 2124 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2125 tcg_temp_free_i32(tmp);
18c9b560 2126 return 1;
da6b5335 2127 }
18c9b560 2128 switch ((insn >> 22) & 3) {
18c9b560 2129 case 1:
477955bd 2130 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2131 break;
2132 case 2:
477955bd 2133 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2134 break;
2135 case 3:
477955bd 2136 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2137 break;
2138 }
7d1b0095 2139 tcg_temp_free_i32(tmp);
18c9b560
AZ
2140 gen_op_iwmmxt_movq_wRn_M0(wrd);
2141 gen_op_iwmmxt_set_mup();
2142 gen_op_iwmmxt_set_cup();
2143 break;
2144 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2145 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2146 if (((insn >> 22) & 3) == 0)
2147 return 1;
18c9b560
AZ
2148 wrd = (insn >> 12) & 0xf;
2149 rd0 = (insn >> 16) & 0xf;
2150 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2151 tmp = tcg_temp_new_i32();
da6b5335 2152 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2153 tcg_temp_free_i32(tmp);
18c9b560 2154 return 1;
da6b5335 2155 }
18c9b560 2156 switch ((insn >> 22) & 3) {
18c9b560 2157 case 1:
477955bd 2158 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2159 break;
2160 case 2:
477955bd 2161 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2162 break;
2163 case 3:
477955bd 2164 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2165 break;
2166 }
7d1b0095 2167 tcg_temp_free_i32(tmp);
18c9b560
AZ
2168 gen_op_iwmmxt_movq_wRn_M0(wrd);
2169 gen_op_iwmmxt_set_mup();
2170 gen_op_iwmmxt_set_cup();
2171 break;
2172 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2173 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2174 if (((insn >> 22) & 3) == 0)
2175 return 1;
18c9b560
AZ
2176 wrd = (insn >> 12) & 0xf;
2177 rd0 = (insn >> 16) & 0xf;
2178 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2179 tmp = tcg_temp_new_i32();
18c9b560 2180 switch ((insn >> 22) & 3) {
18c9b560 2181 case 1:
da6b5335 2182 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2183 tcg_temp_free_i32(tmp);
18c9b560 2184 return 1;
da6b5335 2185 }
477955bd 2186 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2187 break;
2188 case 2:
da6b5335 2189 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2190 tcg_temp_free_i32(tmp);
18c9b560 2191 return 1;
da6b5335 2192 }
477955bd 2193 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2194 break;
2195 case 3:
da6b5335 2196 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2197 tcg_temp_free_i32(tmp);
18c9b560 2198 return 1;
da6b5335 2199 }
477955bd 2200 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2201 break;
2202 }
7d1b0095 2203 tcg_temp_free_i32(tmp);
18c9b560
AZ
2204 gen_op_iwmmxt_movq_wRn_M0(wrd);
2205 gen_op_iwmmxt_set_mup();
2206 gen_op_iwmmxt_set_cup();
2207 break;
2208 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2209 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2210 wrd = (insn >> 12) & 0xf;
2211 rd0 = (insn >> 16) & 0xf;
2212 rd1 = (insn >> 0) & 0xf;
2213 gen_op_iwmmxt_movq_M0_wRn(rd0);
2214 switch ((insn >> 22) & 3) {
2215 case 0:
2216 if (insn & (1 << 21))
2217 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2218 else
2219 gen_op_iwmmxt_minub_M0_wRn(rd1);
2220 break;
2221 case 1:
2222 if (insn & (1 << 21))
2223 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2224 else
2225 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2226 break;
2227 case 2:
2228 if (insn & (1 << 21))
2229 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2230 else
2231 gen_op_iwmmxt_minul_M0_wRn(rd1);
2232 break;
2233 case 3:
2234 return 1;
2235 }
2236 gen_op_iwmmxt_movq_wRn_M0(wrd);
2237 gen_op_iwmmxt_set_mup();
2238 break;
2239 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2240 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2241 wrd = (insn >> 12) & 0xf;
2242 rd0 = (insn >> 16) & 0xf;
2243 rd1 = (insn >> 0) & 0xf;
2244 gen_op_iwmmxt_movq_M0_wRn(rd0);
2245 switch ((insn >> 22) & 3) {
2246 case 0:
2247 if (insn & (1 << 21))
2248 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2249 else
2250 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2251 break;
2252 case 1:
2253 if (insn & (1 << 21))
2254 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2255 else
2256 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2257 break;
2258 case 2:
2259 if (insn & (1 << 21))
2260 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2261 else
2262 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2263 break;
2264 case 3:
2265 return 1;
2266 }
2267 gen_op_iwmmxt_movq_wRn_M0(wrd);
2268 gen_op_iwmmxt_set_mup();
2269 break;
2270 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2271 case 0x402: case 0x502: case 0x602: case 0x702:
2272 wrd = (insn >> 12) & 0xf;
2273 rd0 = (insn >> 16) & 0xf;
2274 rd1 = (insn >> 0) & 0xf;
2275 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2276 tmp = tcg_const_i32((insn >> 20) & 3);
2277 iwmmxt_load_reg(cpu_V1, rd1);
2278 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2279 tcg_temp_free_i32(tmp);
18c9b560
AZ
2280 gen_op_iwmmxt_movq_wRn_M0(wrd);
2281 gen_op_iwmmxt_set_mup();
2282 break;
2283 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2284 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2285 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2286 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2287 wrd = (insn >> 12) & 0xf;
2288 rd0 = (insn >> 16) & 0xf;
2289 rd1 = (insn >> 0) & 0xf;
2290 gen_op_iwmmxt_movq_M0_wRn(rd0);
2291 switch ((insn >> 20) & 0xf) {
2292 case 0x0:
2293 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2294 break;
2295 case 0x1:
2296 gen_op_iwmmxt_subub_M0_wRn(rd1);
2297 break;
2298 case 0x3:
2299 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2300 break;
2301 case 0x4:
2302 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2303 break;
2304 case 0x5:
2305 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2306 break;
2307 case 0x7:
2308 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2309 break;
2310 case 0x8:
2311 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2312 break;
2313 case 0x9:
2314 gen_op_iwmmxt_subul_M0_wRn(rd1);
2315 break;
2316 case 0xb:
2317 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2318 break;
2319 default:
2320 return 1;
2321 }
2322 gen_op_iwmmxt_movq_wRn_M0(wrd);
2323 gen_op_iwmmxt_set_mup();
2324 gen_op_iwmmxt_set_cup();
2325 break;
2326 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2327 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2328 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2329 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2330 wrd = (insn >> 12) & 0xf;
2331 rd0 = (insn >> 16) & 0xf;
2332 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2333 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2334 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2335 tcg_temp_free_i32(tmp);
18c9b560
AZ
2336 gen_op_iwmmxt_movq_wRn_M0(wrd);
2337 gen_op_iwmmxt_set_mup();
2338 gen_op_iwmmxt_set_cup();
2339 break;
2340 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2341 case 0x418: case 0x518: case 0x618: case 0x718:
2342 case 0x818: case 0x918: case 0xa18: case 0xb18:
2343 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2344 wrd = (insn >> 12) & 0xf;
2345 rd0 = (insn >> 16) & 0xf;
2346 rd1 = (insn >> 0) & 0xf;
2347 gen_op_iwmmxt_movq_M0_wRn(rd0);
2348 switch ((insn >> 20) & 0xf) {
2349 case 0x0:
2350 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2351 break;
2352 case 0x1:
2353 gen_op_iwmmxt_addub_M0_wRn(rd1);
2354 break;
2355 case 0x3:
2356 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2357 break;
2358 case 0x4:
2359 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2360 break;
2361 case 0x5:
2362 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2363 break;
2364 case 0x7:
2365 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2366 break;
2367 case 0x8:
2368 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2369 break;
2370 case 0x9:
2371 gen_op_iwmmxt_addul_M0_wRn(rd1);
2372 break;
2373 case 0xb:
2374 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2375 break;
2376 default:
2377 return 1;
2378 }
2379 gen_op_iwmmxt_movq_wRn_M0(wrd);
2380 gen_op_iwmmxt_set_mup();
2381 gen_op_iwmmxt_set_cup();
2382 break;
2383 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2384 case 0x408: case 0x508: case 0x608: case 0x708:
2385 case 0x808: case 0x908: case 0xa08: case 0xb08:
2386 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2387 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2388 return 1;
18c9b560
AZ
2389 wrd = (insn >> 12) & 0xf;
2390 rd0 = (insn >> 16) & 0xf;
2391 rd1 = (insn >> 0) & 0xf;
2392 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2393 switch ((insn >> 22) & 3) {
18c9b560
AZ
2394 case 1:
2395 if (insn & (1 << 21))
2396 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2397 else
2398 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2399 break;
2400 case 2:
2401 if (insn & (1 << 21))
2402 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2403 else
2404 gen_op_iwmmxt_packul_M0_wRn(rd1);
2405 break;
2406 case 3:
2407 if (insn & (1 << 21))
2408 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2409 else
2410 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2411 break;
2412 }
2413 gen_op_iwmmxt_movq_wRn_M0(wrd);
2414 gen_op_iwmmxt_set_mup();
2415 gen_op_iwmmxt_set_cup();
2416 break;
2417 case 0x201: case 0x203: case 0x205: case 0x207:
2418 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2419 case 0x211: case 0x213: case 0x215: case 0x217:
2420 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2421 wrd = (insn >> 5) & 0xf;
2422 rd0 = (insn >> 12) & 0xf;
2423 rd1 = (insn >> 0) & 0xf;
2424 if (rd0 == 0xf || rd1 == 0xf)
2425 return 1;
2426 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2427 tmp = load_reg(s, rd0);
2428 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2429 switch ((insn >> 16) & 0xf) {
2430 case 0x0: /* TMIA */
da6b5335 2431 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2432 break;
2433 case 0x8: /* TMIAPH */
da6b5335 2434 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2435 break;
2436 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2437 if (insn & (1 << 16))
da6b5335 2438 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2439 if (insn & (1 << 17))
da6b5335
FN
2440 tcg_gen_shri_i32(tmp2, tmp2, 16);
2441 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2442 break;
2443 default:
7d1b0095
PM
2444 tcg_temp_free_i32(tmp2);
2445 tcg_temp_free_i32(tmp);
18c9b560
AZ
2446 return 1;
2447 }
7d1b0095
PM
2448 tcg_temp_free_i32(tmp2);
2449 tcg_temp_free_i32(tmp);
18c9b560
AZ
2450 gen_op_iwmmxt_movq_wRn_M0(wrd);
2451 gen_op_iwmmxt_set_mup();
2452 break;
2453 default:
2454 return 1;
2455 }
2456
2457 return 0;
2458}
2459
a1c7273b 2460/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2461 (ie. an undefined instruction). */
0ecb72a5 2462static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
2463{
2464 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2465 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2466
2467 if ((insn & 0x0ff00f10) == 0x0e200010) {
2468 /* Multiply with Internal Accumulate Format */
2469 rd0 = (insn >> 12) & 0xf;
2470 rd1 = insn & 0xf;
2471 acc = (insn >> 5) & 7;
2472
2473 if (acc != 0)
2474 return 1;
2475
3a554c0f
FN
2476 tmp = load_reg(s, rd0);
2477 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2478 switch ((insn >> 16) & 0xf) {
2479 case 0x0: /* MIA */
3a554c0f 2480 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2481 break;
2482 case 0x8: /* MIAPH */
3a554c0f 2483 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2484 break;
2485 case 0xc: /* MIABB */
2486 case 0xd: /* MIABT */
2487 case 0xe: /* MIATB */
2488 case 0xf: /* MIATT */
18c9b560 2489 if (insn & (1 << 16))
3a554c0f 2490 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2491 if (insn & (1 << 17))
3a554c0f
FN
2492 tcg_gen_shri_i32(tmp2, tmp2, 16);
2493 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2494 break;
2495 default:
2496 return 1;
2497 }
7d1b0095
PM
2498 tcg_temp_free_i32(tmp2);
2499 tcg_temp_free_i32(tmp);
18c9b560
AZ
2500
2501 gen_op_iwmmxt_movq_wRn_M0(acc);
2502 return 0;
2503 }
2504
2505 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2506 /* Internal Accumulator Access Format */
2507 rdhi = (insn >> 16) & 0xf;
2508 rdlo = (insn >> 12) & 0xf;
2509 acc = insn & 7;
2510
2511 if (acc != 0)
2512 return 1;
2513
2514 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2515 iwmmxt_load_reg(cpu_V0, acc);
2516 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2517 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2518 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2519 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2520 } else { /* MAR */
3a554c0f
FN
2521 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2522 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2523 }
2524 return 0;
2525 }
2526
2527 return 1;
2528}
2529
9ee6e8bb
PB
2530#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2531#define VFP_SREG(insn, bigbit, smallbit) \
2532 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2533#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2534 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2535 reg = (((insn) >> (bigbit)) & 0x0f) \
2536 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2537 } else { \
2538 if (insn & (1 << (smallbit))) \
2539 return 1; \
2540 reg = ((insn) >> (bigbit)) & 0x0f; \
2541 }} while (0)
2542
2543#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2544#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2545#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2546#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2547#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2548#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2549
4373f3ce 2550/* Move between integer and VFP cores. */
39d5492a 2551static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2552{
39d5492a 2553 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2554 tcg_gen_mov_i32(tmp, cpu_F0s);
2555 return tmp;
2556}
2557
39d5492a 2558static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2559{
2560 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2561 tcg_temp_free_i32(tmp);
4373f3ce
PB
2562}
2563
39d5492a 2564static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2565{
39d5492a 2566 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2567 if (shift)
2568 tcg_gen_shri_i32(var, var, shift);
86831435 2569 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2570 tcg_gen_shli_i32(tmp, var, 8);
2571 tcg_gen_or_i32(var, var, tmp);
2572 tcg_gen_shli_i32(tmp, var, 16);
2573 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2574 tcg_temp_free_i32(tmp);
ad69471c
PB
2575}
2576
39d5492a 2577static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2578{
39d5492a 2579 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2580 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2581 tcg_gen_shli_i32(tmp, var, 16);
2582 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2583 tcg_temp_free_i32(tmp);
ad69471c
PB
2584}
2585
39d5492a 2586static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2587{
39d5492a 2588 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2589 tcg_gen_andi_i32(var, var, 0xffff0000);
2590 tcg_gen_shri_i32(tmp, var, 16);
2591 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2592 tcg_temp_free_i32(tmp);
ad69471c
PB
2593}
2594
39d5492a 2595static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2596{
2597 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2598 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2599 switch (size) {
2600 case 0:
08307563 2601 gen_aa32_ld8u(tmp, addr, IS_USER(s));
8e18cde3
PM
2602 gen_neon_dup_u8(tmp, 0);
2603 break;
2604 case 1:
08307563 2605 gen_aa32_ld16u(tmp, addr, IS_USER(s));
8e18cde3
PM
2606 gen_neon_dup_low16(tmp);
2607 break;
2608 case 2:
08307563 2609 gen_aa32_ld32u(tmp, addr, IS_USER(s));
8e18cde3
PM
2610 break;
2611 default: /* Avoid compiler warnings. */
2612 abort();
2613 }
2614 return tmp;
2615}
2616
a1c7273b 2617/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 2618 (ie. an undefined instruction). */
0ecb72a5 2619static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
b7bcbe95
FB
2620{
2621 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2622 int dp, veclen;
39d5492a
PM
2623 TCGv_i32 addr;
2624 TCGv_i32 tmp;
2625 TCGv_i32 tmp2;
b7bcbe95 2626
40f137e1
PB
2627 if (!arm_feature(env, ARM_FEATURE_VFP))
2628 return 1;
2629
5df8bac1 2630 if (!s->vfp_enabled) {
9ee6e8bb 2631 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2632 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2633 return 1;
2634 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2635 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2636 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2637 return 1;
2638 }
6a57f3eb
WN
2639
2640 if (extract32(insn, 28, 4) == 0xf) {
2641 /* Encodings with T=1 (Thumb) or unconditional (ARM):
2642 * only used in v8 and above.
2643 */
2644 return 1;
2645 }
2646
b7bcbe95
FB
2647 dp = ((insn & 0xf00) == 0xb00);
2648 switch ((insn >> 24) & 0xf) {
2649 case 0xe:
2650 if (insn & (1 << 4)) {
2651 /* single register transfer */
b7bcbe95
FB
2652 rd = (insn >> 12) & 0xf;
2653 if (dp) {
9ee6e8bb
PB
2654 int size;
2655 int pass;
2656
2657 VFP_DREG_N(rn, insn);
2658 if (insn & 0xf)
b7bcbe95 2659 return 1;
9ee6e8bb
PB
2660 if (insn & 0x00c00060
2661 && !arm_feature(env, ARM_FEATURE_NEON))
2662 return 1;
2663
2664 pass = (insn >> 21) & 1;
2665 if (insn & (1 << 22)) {
2666 size = 0;
2667 offset = ((insn >> 5) & 3) * 8;
2668 } else if (insn & (1 << 5)) {
2669 size = 1;
2670 offset = (insn & (1 << 6)) ? 16 : 0;
2671 } else {
2672 size = 2;
2673 offset = 0;
2674 }
18c9b560 2675 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2676 /* vfp->arm */
ad69471c 2677 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2678 switch (size) {
2679 case 0:
9ee6e8bb 2680 if (offset)
ad69471c 2681 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2682 if (insn & (1 << 23))
ad69471c 2683 gen_uxtb(tmp);
9ee6e8bb 2684 else
ad69471c 2685 gen_sxtb(tmp);
9ee6e8bb
PB
2686 break;
2687 case 1:
9ee6e8bb
PB
2688 if (insn & (1 << 23)) {
2689 if (offset) {
ad69471c 2690 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2691 } else {
ad69471c 2692 gen_uxth(tmp);
9ee6e8bb
PB
2693 }
2694 } else {
2695 if (offset) {
ad69471c 2696 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2697 } else {
ad69471c 2698 gen_sxth(tmp);
9ee6e8bb
PB
2699 }
2700 }
2701 break;
2702 case 2:
9ee6e8bb
PB
2703 break;
2704 }
ad69471c 2705 store_reg(s, rd, tmp);
b7bcbe95
FB
2706 } else {
2707 /* arm->vfp */
ad69471c 2708 tmp = load_reg(s, rd);
9ee6e8bb
PB
2709 if (insn & (1 << 23)) {
2710 /* VDUP */
2711 if (size == 0) {
ad69471c 2712 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2713 } else if (size == 1) {
ad69471c 2714 gen_neon_dup_low16(tmp);
9ee6e8bb 2715 }
cbbccffc 2716 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2717 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2718 tcg_gen_mov_i32(tmp2, tmp);
2719 neon_store_reg(rn, n, tmp2);
2720 }
2721 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2722 } else {
2723 /* VMOV */
2724 switch (size) {
2725 case 0:
ad69471c 2726 tmp2 = neon_load_reg(rn, pass);
d593c48e 2727 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 2728 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2729 break;
2730 case 1:
ad69471c 2731 tmp2 = neon_load_reg(rn, pass);
d593c48e 2732 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 2733 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2734 break;
2735 case 2:
9ee6e8bb
PB
2736 break;
2737 }
ad69471c 2738 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2739 }
b7bcbe95 2740 }
9ee6e8bb
PB
2741 } else { /* !dp */
2742 if ((insn & 0x6f) != 0x00)
2743 return 1;
2744 rn = VFP_SREG_N(insn);
18c9b560 2745 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2746 /* vfp->arm */
2747 if (insn & (1 << 21)) {
2748 /* system register */
40f137e1 2749 rn >>= 1;
9ee6e8bb 2750
b7bcbe95 2751 switch (rn) {
40f137e1 2752 case ARM_VFP_FPSID:
4373f3ce 2753 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2754 VFP3 restricts all id registers to privileged
2755 accesses. */
2756 if (IS_USER(s)
2757 && arm_feature(env, ARM_FEATURE_VFP3))
2758 return 1;
4373f3ce 2759 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2760 break;
40f137e1 2761 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2762 if (IS_USER(s))
2763 return 1;
4373f3ce 2764 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2765 break;
40f137e1
PB
2766 case ARM_VFP_FPINST:
2767 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2768 /* Not present in VFP3. */
2769 if (IS_USER(s)
2770 || arm_feature(env, ARM_FEATURE_VFP3))
2771 return 1;
4373f3ce 2772 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2773 break;
40f137e1 2774 case ARM_VFP_FPSCR:
601d70b9 2775 if (rd == 15) {
4373f3ce
PB
2776 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2777 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2778 } else {
7d1b0095 2779 tmp = tcg_temp_new_i32();
4373f3ce
PB
2780 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2781 }
b7bcbe95 2782 break;
9ee6e8bb
PB
2783 case ARM_VFP_MVFR0:
2784 case ARM_VFP_MVFR1:
2785 if (IS_USER(s)
06ed5d66 2786 || !arm_feature(env, ARM_FEATURE_MVFR))
9ee6e8bb 2787 return 1;
4373f3ce 2788 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2789 break;
b7bcbe95
FB
2790 default:
2791 return 1;
2792 }
2793 } else {
2794 gen_mov_F0_vreg(0, rn);
4373f3ce 2795 tmp = gen_vfp_mrs();
b7bcbe95
FB
2796 }
2797 if (rd == 15) {
b5ff1b31 2798 /* Set the 4 flag bits in the CPSR. */
4373f3ce 2799 gen_set_nzcv(tmp);
7d1b0095 2800 tcg_temp_free_i32(tmp);
4373f3ce
PB
2801 } else {
2802 store_reg(s, rd, tmp);
2803 }
b7bcbe95
FB
2804 } else {
2805 /* arm->vfp */
b7bcbe95 2806 if (insn & (1 << 21)) {
40f137e1 2807 rn >>= 1;
b7bcbe95
FB
2808 /* system register */
2809 switch (rn) {
40f137e1 2810 case ARM_VFP_FPSID:
9ee6e8bb
PB
2811 case ARM_VFP_MVFR0:
2812 case ARM_VFP_MVFR1:
b7bcbe95
FB
2813 /* Writes are ignored. */
2814 break;
40f137e1 2815 case ARM_VFP_FPSCR:
e4c1cfa5 2816 tmp = load_reg(s, rd);
4373f3ce 2817 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 2818 tcg_temp_free_i32(tmp);
b5ff1b31 2819 gen_lookup_tb(s);
b7bcbe95 2820 break;
40f137e1 2821 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2822 if (IS_USER(s))
2823 return 1;
71b3c3de
JR
2824 /* TODO: VFP subarchitecture support.
2825 * For now, keep the EN bit only */
e4c1cfa5 2826 tmp = load_reg(s, rd);
71b3c3de 2827 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2828 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2829 gen_lookup_tb(s);
2830 break;
2831 case ARM_VFP_FPINST:
2832 case ARM_VFP_FPINST2:
e4c1cfa5 2833 tmp = load_reg(s, rd);
4373f3ce 2834 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2835 break;
b7bcbe95
FB
2836 default:
2837 return 1;
2838 }
2839 } else {
e4c1cfa5 2840 tmp = load_reg(s, rd);
4373f3ce 2841 gen_vfp_msr(tmp);
b7bcbe95
FB
2842 gen_mov_vreg_F0(0, rn);
2843 }
2844 }
2845 }
2846 } else {
2847 /* data processing */
2848 /* The opcode is in bits 23, 21, 20 and 6. */
2849 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2850 if (dp) {
2851 if (op == 15) {
2852 /* rn is opcode */
2853 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2854 } else {
2855 /* rn is register number */
9ee6e8bb 2856 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2857 }
2858
04595bf6 2859 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2860 /* Integer or single precision destination. */
9ee6e8bb 2861 rd = VFP_SREG_D(insn);
b7bcbe95 2862 } else {
9ee6e8bb 2863 VFP_DREG_D(rd, insn);
b7bcbe95 2864 }
04595bf6
PM
2865 if (op == 15 &&
2866 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2867 /* VCVT from int is always from S reg regardless of dp bit.
2868 * VCVT with immediate frac_bits has same format as SREG_M
2869 */
2870 rm = VFP_SREG_M(insn);
b7bcbe95 2871 } else {
9ee6e8bb 2872 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2873 }
2874 } else {
9ee6e8bb 2875 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2876 if (op == 15 && rn == 15) {
2877 /* Double precision destination. */
9ee6e8bb
PB
2878 VFP_DREG_D(rd, insn);
2879 } else {
2880 rd = VFP_SREG_D(insn);
2881 }
04595bf6
PM
2882 /* NB that we implicitly rely on the encoding for the frac_bits
2883 * in VCVT of fixed to float being the same as that of an SREG_M
2884 */
9ee6e8bb 2885 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2886 }
2887
69d1fc22 2888 veclen = s->vec_len;
b7bcbe95
FB
2889 if (op == 15 && rn > 3)
2890 veclen = 0;
2891
2892 /* Shut up compiler warnings. */
2893 delta_m = 0;
2894 delta_d = 0;
2895 bank_mask = 0;
3b46e624 2896
b7bcbe95
FB
2897 if (veclen > 0) {
2898 if (dp)
2899 bank_mask = 0xc;
2900 else
2901 bank_mask = 0x18;
2902
2903 /* Figure out what type of vector operation this is. */
2904 if ((rd & bank_mask) == 0) {
2905 /* scalar */
2906 veclen = 0;
2907 } else {
2908 if (dp)
69d1fc22 2909 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 2910 else
69d1fc22 2911 delta_d = s->vec_stride + 1;
b7bcbe95
FB
2912
2913 if ((rm & bank_mask) == 0) {
2914 /* mixed scalar/vector */
2915 delta_m = 0;
2916 } else {
2917 /* vector */
2918 delta_m = delta_d;
2919 }
2920 }
2921 }
2922
2923 /* Load the initial operands. */
2924 if (op == 15) {
2925 switch (rn) {
2926 case 16:
2927 case 17:
2928 /* Integer source */
2929 gen_mov_F0_vreg(0, rm);
2930 break;
2931 case 8:
2932 case 9:
2933 /* Compare */
2934 gen_mov_F0_vreg(dp, rd);
2935 gen_mov_F1_vreg(dp, rm);
2936 break;
2937 case 10:
2938 case 11:
2939 /* Compare with zero */
2940 gen_mov_F0_vreg(dp, rd);
2941 gen_vfp_F1_ld0(dp);
2942 break;
9ee6e8bb
PB
2943 case 20:
2944 case 21:
2945 case 22:
2946 case 23:
644ad806
PB
2947 case 28:
2948 case 29:
2949 case 30:
2950 case 31:
9ee6e8bb
PB
2951 /* Source and destination the same. */
2952 gen_mov_F0_vreg(dp, rd);
2953 break;
6e0c0ed1
PM
2954 case 4:
2955 case 5:
2956 case 6:
2957 case 7:
2958 /* VCVTB, VCVTT: only present with the halfprec extension,
2959 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
2960 */
2961 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
2962 return 1;
2963 }
2964 /* Otherwise fall through */
b7bcbe95
FB
2965 default:
2966 /* One source operand. */
2967 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2968 break;
b7bcbe95
FB
2969 }
2970 } else {
2971 /* Two source operands. */
2972 gen_mov_F0_vreg(dp, rn);
2973 gen_mov_F1_vreg(dp, rm);
2974 }
2975
2976 for (;;) {
2977 /* Perform the calculation. */
2978 switch (op) {
605a6aed
PM
2979 case 0: /* VMLA: fd + (fn * fm) */
2980 /* Note that order of inputs to the add matters for NaNs */
2981 gen_vfp_F1_mul(dp);
2982 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
2983 gen_vfp_add(dp);
2984 break;
605a6aed 2985 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 2986 gen_vfp_mul(dp);
605a6aed
PM
2987 gen_vfp_F1_neg(dp);
2988 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
2989 gen_vfp_add(dp);
2990 break;
605a6aed
PM
2991 case 2: /* VNMLS: -fd + (fn * fm) */
2992 /* Note that it isn't valid to replace (-A + B) with (B - A)
2993 * or similar plausible looking simplifications
2994 * because this will give wrong results for NaNs.
2995 */
2996 gen_vfp_F1_mul(dp);
2997 gen_mov_F0_vreg(dp, rd);
2998 gen_vfp_neg(dp);
2999 gen_vfp_add(dp);
b7bcbe95 3000 break;
605a6aed 3001 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3002 gen_vfp_mul(dp);
605a6aed
PM
3003 gen_vfp_F1_neg(dp);
3004 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3005 gen_vfp_neg(dp);
605a6aed 3006 gen_vfp_add(dp);
b7bcbe95
FB
3007 break;
3008 case 4: /* mul: fn * fm */
3009 gen_vfp_mul(dp);
3010 break;
3011 case 5: /* nmul: -(fn * fm) */
3012 gen_vfp_mul(dp);
3013 gen_vfp_neg(dp);
3014 break;
3015 case 6: /* add: fn + fm */
3016 gen_vfp_add(dp);
3017 break;
3018 case 7: /* sub: fn - fm */
3019 gen_vfp_sub(dp);
3020 break;
3021 case 8: /* div: fn / fm */
3022 gen_vfp_div(dp);
3023 break;
da97f52c
PM
3024 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3025 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3026 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3027 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3028 /* These are fused multiply-add, and must be done as one
3029 * floating point operation with no rounding between the
3030 * multiplication and addition steps.
3031 * NB that doing the negations here as separate steps is
3032 * correct : an input NaN should come out with its sign bit
3033 * flipped if it is a negated-input.
3034 */
3035 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
3036 return 1;
3037 }
3038 if (dp) {
3039 TCGv_ptr fpst;
3040 TCGv_i64 frd;
3041 if (op & 1) {
3042 /* VFNMS, VFMS */
3043 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3044 }
3045 frd = tcg_temp_new_i64();
3046 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3047 if (op & 2) {
3048 /* VFNMA, VFNMS */
3049 gen_helper_vfp_negd(frd, frd);
3050 }
3051 fpst = get_fpstatus_ptr(0);
3052 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3053 cpu_F1d, frd, fpst);
3054 tcg_temp_free_ptr(fpst);
3055 tcg_temp_free_i64(frd);
3056 } else {
3057 TCGv_ptr fpst;
3058 TCGv_i32 frd;
3059 if (op & 1) {
3060 /* VFNMS, VFMS */
3061 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3062 }
3063 frd = tcg_temp_new_i32();
3064 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3065 if (op & 2) {
3066 gen_helper_vfp_negs(frd, frd);
3067 }
3068 fpst = get_fpstatus_ptr(0);
3069 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3070 cpu_F1s, frd, fpst);
3071 tcg_temp_free_ptr(fpst);
3072 tcg_temp_free_i32(frd);
3073 }
3074 break;
9ee6e8bb
PB
3075 case 14: /* fconst */
3076 if (!arm_feature(env, ARM_FEATURE_VFP3))
3077 return 1;
3078
3079 n = (insn << 12) & 0x80000000;
3080 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3081 if (dp) {
3082 if (i & 0x40)
3083 i |= 0x3f80;
3084 else
3085 i |= 0x4000;
3086 n |= i << 16;
4373f3ce 3087 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3088 } else {
3089 if (i & 0x40)
3090 i |= 0x780;
3091 else
3092 i |= 0x800;
3093 n |= i << 19;
5b340b51 3094 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3095 }
9ee6e8bb 3096 break;
b7bcbe95
FB
3097 case 15: /* extension space */
3098 switch (rn) {
3099 case 0: /* cpy */
3100 /* no-op */
3101 break;
3102 case 1: /* abs */
3103 gen_vfp_abs(dp);
3104 break;
3105 case 2: /* neg */
3106 gen_vfp_neg(dp);
3107 break;
3108 case 3: /* sqrt */
3109 gen_vfp_sqrt(dp);
3110 break;
60011498 3111 case 4: /* vcvtb.f32.f16 */
60011498
PB
3112 tmp = gen_vfp_mrs();
3113 tcg_gen_ext16u_i32(tmp, tmp);
3114 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3115 tcg_temp_free_i32(tmp);
60011498
PB
3116 break;
3117 case 5: /* vcvtt.f32.f16 */
60011498
PB
3118 tmp = gen_vfp_mrs();
3119 tcg_gen_shri_i32(tmp, tmp, 16);
3120 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3121 tcg_temp_free_i32(tmp);
60011498
PB
3122 break;
3123 case 6: /* vcvtb.f16.f32 */
7d1b0095 3124 tmp = tcg_temp_new_i32();
60011498
PB
3125 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3126 gen_mov_F0_vreg(0, rd);
3127 tmp2 = gen_vfp_mrs();
3128 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3129 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3130 tcg_temp_free_i32(tmp2);
60011498
PB
3131 gen_vfp_msr(tmp);
3132 break;
3133 case 7: /* vcvtt.f16.f32 */
7d1b0095 3134 tmp = tcg_temp_new_i32();
60011498
PB
3135 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3136 tcg_gen_shli_i32(tmp, tmp, 16);
3137 gen_mov_F0_vreg(0, rd);
3138 tmp2 = gen_vfp_mrs();
3139 tcg_gen_ext16u_i32(tmp2, tmp2);
3140 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3141 tcg_temp_free_i32(tmp2);
60011498
PB
3142 gen_vfp_msr(tmp);
3143 break;
b7bcbe95
FB
3144 case 8: /* cmp */
3145 gen_vfp_cmp(dp);
3146 break;
3147 case 9: /* cmpe */
3148 gen_vfp_cmpe(dp);
3149 break;
3150 case 10: /* cmpz */
3151 gen_vfp_cmp(dp);
3152 break;
3153 case 11: /* cmpez */
3154 gen_vfp_F1_ld0(dp);
3155 gen_vfp_cmpe(dp);
3156 break;
3157 case 15: /* single<->double conversion */
3158 if (dp)
4373f3ce 3159 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3160 else
4373f3ce 3161 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3162 break;
3163 case 16: /* fuito */
5500b06c 3164 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3165 break;
3166 case 17: /* fsito */
5500b06c 3167 gen_vfp_sito(dp, 0);
b7bcbe95 3168 break;
9ee6e8bb
PB
3169 case 20: /* fshto */
3170 if (!arm_feature(env, ARM_FEATURE_VFP3))
3171 return 1;
5500b06c 3172 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3173 break;
3174 case 21: /* fslto */
3175 if (!arm_feature(env, ARM_FEATURE_VFP3))
3176 return 1;
5500b06c 3177 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3178 break;
3179 case 22: /* fuhto */
3180 if (!arm_feature(env, ARM_FEATURE_VFP3))
3181 return 1;
5500b06c 3182 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3183 break;
3184 case 23: /* fulto */
3185 if (!arm_feature(env, ARM_FEATURE_VFP3))
3186 return 1;
5500b06c 3187 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3188 break;
b7bcbe95 3189 case 24: /* ftoui */
5500b06c 3190 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3191 break;
3192 case 25: /* ftouiz */
5500b06c 3193 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3194 break;
3195 case 26: /* ftosi */
5500b06c 3196 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3197 break;
3198 case 27: /* ftosiz */
5500b06c 3199 gen_vfp_tosiz(dp, 0);
b7bcbe95 3200 break;
9ee6e8bb
PB
3201 case 28: /* ftosh */
3202 if (!arm_feature(env, ARM_FEATURE_VFP3))
3203 return 1;
5500b06c 3204 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3205 break;
3206 case 29: /* ftosl */
3207 if (!arm_feature(env, ARM_FEATURE_VFP3))
3208 return 1;
5500b06c 3209 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3210 break;
3211 case 30: /* ftouh */
3212 if (!arm_feature(env, ARM_FEATURE_VFP3))
3213 return 1;
5500b06c 3214 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3215 break;
3216 case 31: /* ftoul */
3217 if (!arm_feature(env, ARM_FEATURE_VFP3))
3218 return 1;
5500b06c 3219 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3220 break;
b7bcbe95 3221 default: /* undefined */
b7bcbe95
FB
3222 return 1;
3223 }
3224 break;
3225 default: /* undefined */
b7bcbe95
FB
3226 return 1;
3227 }
3228
3229 /* Write back the result. */
3230 if (op == 15 && (rn >= 8 && rn <= 11))
3231 ; /* Comparison, do nothing. */
04595bf6
PM
3232 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3233 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3234 gen_mov_vreg_F0(0, rd);
3235 else if (op == 15 && rn == 15)
3236 /* conversion */
3237 gen_mov_vreg_F0(!dp, rd);
3238 else
3239 gen_mov_vreg_F0(dp, rd);
3240
3241 /* break out of the loop if we have finished */
3242 if (veclen == 0)
3243 break;
3244
3245 if (op == 15 && delta_m == 0) {
3246 /* single source one-many */
3247 while (veclen--) {
3248 rd = ((rd + delta_d) & (bank_mask - 1))
3249 | (rd & bank_mask);
3250 gen_mov_vreg_F0(dp, rd);
3251 }
3252 break;
3253 }
3254 /* Setup the next operands. */
3255 veclen--;
3256 rd = ((rd + delta_d) & (bank_mask - 1))
3257 | (rd & bank_mask);
3258
3259 if (op == 15) {
3260 /* One source operand. */
3261 rm = ((rm + delta_m) & (bank_mask - 1))
3262 | (rm & bank_mask);
3263 gen_mov_F0_vreg(dp, rm);
3264 } else {
3265 /* Two source operands. */
3266 rn = ((rn + delta_d) & (bank_mask - 1))
3267 | (rn & bank_mask);
3268 gen_mov_F0_vreg(dp, rn);
3269 if (delta_m) {
3270 rm = ((rm + delta_m) & (bank_mask - 1))
3271 | (rm & bank_mask);
3272 gen_mov_F1_vreg(dp, rm);
3273 }
3274 }
3275 }
3276 }
3277 break;
3278 case 0xc:
3279 case 0xd:
8387da81 3280 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3281 /* two-register transfer */
3282 rn = (insn >> 16) & 0xf;
3283 rd = (insn >> 12) & 0xf;
3284 if (dp) {
9ee6e8bb
PB
3285 VFP_DREG_M(rm, insn);
3286 } else {
3287 rm = VFP_SREG_M(insn);
3288 }
b7bcbe95 3289
18c9b560 3290 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3291 /* vfp->arm */
3292 if (dp) {
4373f3ce
PB
3293 gen_mov_F0_vreg(0, rm * 2);
3294 tmp = gen_vfp_mrs();
3295 store_reg(s, rd, tmp);
3296 gen_mov_F0_vreg(0, rm * 2 + 1);
3297 tmp = gen_vfp_mrs();
3298 store_reg(s, rn, tmp);
b7bcbe95
FB
3299 } else {
3300 gen_mov_F0_vreg(0, rm);
4373f3ce 3301 tmp = gen_vfp_mrs();
8387da81 3302 store_reg(s, rd, tmp);
b7bcbe95 3303 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3304 tmp = gen_vfp_mrs();
8387da81 3305 store_reg(s, rn, tmp);
b7bcbe95
FB
3306 }
3307 } else {
3308 /* arm->vfp */
3309 if (dp) {
4373f3ce
PB
3310 tmp = load_reg(s, rd);
3311 gen_vfp_msr(tmp);
3312 gen_mov_vreg_F0(0, rm * 2);
3313 tmp = load_reg(s, rn);
3314 gen_vfp_msr(tmp);
3315 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3316 } else {
8387da81 3317 tmp = load_reg(s, rd);
4373f3ce 3318 gen_vfp_msr(tmp);
b7bcbe95 3319 gen_mov_vreg_F0(0, rm);
8387da81 3320 tmp = load_reg(s, rn);
4373f3ce 3321 gen_vfp_msr(tmp);
b7bcbe95
FB
3322 gen_mov_vreg_F0(0, rm + 1);
3323 }
3324 }
3325 } else {
3326 /* Load/store */
3327 rn = (insn >> 16) & 0xf;
3328 if (dp)
9ee6e8bb 3329 VFP_DREG_D(rd, insn);
b7bcbe95 3330 else
9ee6e8bb 3331 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3332 if ((insn & 0x01200000) == 0x01000000) {
3333 /* Single load/store */
3334 offset = (insn & 0xff) << 2;
3335 if ((insn & (1 << 23)) == 0)
3336 offset = -offset;
934814f1
PM
3337 if (s->thumb && rn == 15) {
3338 /* This is actually UNPREDICTABLE */
3339 addr = tcg_temp_new_i32();
3340 tcg_gen_movi_i32(addr, s->pc & ~2);
3341 } else {
3342 addr = load_reg(s, rn);
3343 }
312eea9f 3344 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3345 if (insn & (1 << 20)) {
312eea9f 3346 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3347 gen_mov_vreg_F0(dp, rd);
3348 } else {
3349 gen_mov_F0_vreg(dp, rd);
312eea9f 3350 gen_vfp_st(s, dp, addr);
b7bcbe95 3351 }
7d1b0095 3352 tcg_temp_free_i32(addr);
b7bcbe95
FB
3353 } else {
3354 /* load/store multiple */
934814f1 3355 int w = insn & (1 << 21);
b7bcbe95
FB
3356 if (dp)
3357 n = (insn >> 1) & 0x7f;
3358 else
3359 n = insn & 0xff;
3360
934814f1
PM
3361 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3362 /* P == U , W == 1 => UNDEF */
3363 return 1;
3364 }
3365 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3366 /* UNPREDICTABLE cases for bad immediates: we choose to
3367 * UNDEF to avoid generating huge numbers of TCG ops
3368 */
3369 return 1;
3370 }
3371 if (rn == 15 && w) {
3372 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3373 return 1;
3374 }
3375
3376 if (s->thumb && rn == 15) {
3377 /* This is actually UNPREDICTABLE */
3378 addr = tcg_temp_new_i32();
3379 tcg_gen_movi_i32(addr, s->pc & ~2);
3380 } else {
3381 addr = load_reg(s, rn);
3382 }
b7bcbe95 3383 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3384 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3385
3386 if (dp)
3387 offset = 8;
3388 else
3389 offset = 4;
3390 for (i = 0; i < n; i++) {
18c9b560 3391 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3392 /* load */
312eea9f 3393 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3394 gen_mov_vreg_F0(dp, rd + i);
3395 } else {
3396 /* store */
3397 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3398 gen_vfp_st(s, dp, addr);
b7bcbe95 3399 }
312eea9f 3400 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3401 }
934814f1 3402 if (w) {
b7bcbe95
FB
3403 /* writeback */
3404 if (insn & (1 << 24))
3405 offset = -offset * n;
3406 else if (dp && (insn & 1))
3407 offset = 4;
3408 else
3409 offset = 0;
3410
3411 if (offset != 0)
312eea9f
FN
3412 tcg_gen_addi_i32(addr, addr, offset);
3413 store_reg(s, rn, addr);
3414 } else {
7d1b0095 3415 tcg_temp_free_i32(addr);
b7bcbe95
FB
3416 }
3417 }
3418 }
3419 break;
3420 default:
3421 /* Should never happen. */
3422 return 1;
3423 }
3424 return 0;
3425}
3426
0a2461fa 3427static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
c53be334 3428{
6e256c93
FB
3429 TranslationBlock *tb;
3430
3431 tb = s->tb;
3432 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3433 tcg_gen_goto_tb(n);
eaed129d 3434 gen_set_pc_im(s, dest);
8cfd0495 3435 tcg_gen_exit_tb((uintptr_t)tb + n);
6e256c93 3436 } else {
eaed129d 3437 gen_set_pc_im(s, dest);
57fec1fe 3438 tcg_gen_exit_tb(0);
6e256c93 3439 }
c53be334
FB
3440}
3441
8aaca4c0
FB
3442static inline void gen_jmp (DisasContext *s, uint32_t dest)
3443{
551bd27f 3444 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3445 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3446 if (s->thumb)
d9ba4830
PB
3447 dest |= 1;
3448 gen_bx_im(s, dest);
8aaca4c0 3449 } else {
6e256c93 3450 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3451 s->is_jmp = DISAS_TB_JUMP;
3452 }
3453}
3454
39d5492a 3455static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 3456{
ee097184 3457 if (x)
d9ba4830 3458 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3459 else
d9ba4830 3460 gen_sxth(t0);
ee097184 3461 if (y)
d9ba4830 3462 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3463 else
d9ba4830
PB
3464 gen_sxth(t1);
3465 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3466}
3467
3468/* Return the mask of PSR bits set by a MSR instruction. */
0ecb72a5 3469static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3470 uint32_t mask;
3471
3472 mask = 0;
3473 if (flags & (1 << 0))
3474 mask |= 0xff;
3475 if (flags & (1 << 1))
3476 mask |= 0xff00;
3477 if (flags & (1 << 2))
3478 mask |= 0xff0000;
3479 if (flags & (1 << 3))
3480 mask |= 0xff000000;
9ee6e8bb 3481
2ae23e75 3482 /* Mask out undefined bits. */
9ee6e8bb 3483 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3484 if (!arm_feature(env, ARM_FEATURE_V4T))
3485 mask &= ~CPSR_T;
3486 if (!arm_feature(env, ARM_FEATURE_V5))
3487 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3488 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3489 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3490 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3491 mask &= ~CPSR_IT;
9ee6e8bb 3492 /* Mask out execution state bits. */
2ae23e75 3493 if (!spsr)
e160c51c 3494 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3495 /* Mask out privileged bits. */
3496 if (IS_USER(s))
9ee6e8bb 3497 mask &= CPSR_USER;
b5ff1b31
FB
3498 return mask;
3499}
3500
2fbac54b 3501/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 3502static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 3503{
39d5492a 3504 TCGv_i32 tmp;
b5ff1b31
FB
3505 if (spsr) {
3506 /* ??? This is also undefined in system mode. */
3507 if (IS_USER(s))
3508 return 1;
d9ba4830
PB
3509
3510 tmp = load_cpu_field(spsr);
3511 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3512 tcg_gen_andi_i32(t0, t0, mask);
3513 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3514 store_cpu_field(tmp, spsr);
b5ff1b31 3515 } else {
2fbac54b 3516 gen_set_cpsr(t0, mask);
b5ff1b31 3517 }
7d1b0095 3518 tcg_temp_free_i32(t0);
b5ff1b31
FB
3519 gen_lookup_tb(s);
3520 return 0;
3521}
3522
2fbac54b
FN
3523/* Returns nonzero if access to the PSR is not permitted. */
3524static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3525{
39d5492a 3526 TCGv_i32 tmp;
7d1b0095 3527 tmp = tcg_temp_new_i32();
2fbac54b
FN
3528 tcg_gen_movi_i32(tmp, val);
3529 return gen_set_psr(s, mask, spsr, tmp);
3530}
3531
e9bb4aa9 3532/* Generate an old-style exception return. Marks pc as dead. */
39d5492a 3533static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
b5ff1b31 3534{
39d5492a 3535 TCGv_i32 tmp;
e9bb4aa9 3536 store_reg(s, 15, pc);
d9ba4830
PB
3537 tmp = load_cpu_field(spsr);
3538 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3539 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3540 s->is_jmp = DISAS_UPDATE;
3541}
3542
b0109805 3543/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 3544static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 3545{
b0109805 3546 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3547 tcg_temp_free_i32(cpsr);
b0109805 3548 store_reg(s, 15, pc);
9ee6e8bb
PB
3549 s->is_jmp = DISAS_UPDATE;
3550}
3b46e624 3551
9ee6e8bb
PB
3552static inline void
3553gen_set_condexec (DisasContext *s)
3554{
3555 if (s->condexec_mask) {
8f01245e 3556 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
39d5492a 3557 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 3558 tcg_gen_movi_i32(tmp, val);
d9ba4830 3559 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3560 }
3561}
3b46e624 3562
bc4a0de0
PM
3563static void gen_exception_insn(DisasContext *s, int offset, int excp)
3564{
3565 gen_set_condexec(s);
eaed129d 3566 gen_set_pc_im(s, s->pc - offset);
bc4a0de0
PM
3567 gen_exception(excp);
3568 s->is_jmp = DISAS_JUMP;
3569}
3570
9ee6e8bb
PB
3571static void gen_nop_hint(DisasContext *s, int val)
3572{
3573 switch (val) {
3574 case 3: /* wfi */
eaed129d 3575 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
3576 s->is_jmp = DISAS_WFI;
3577 break;
3578 case 2: /* wfe */
3579 case 4: /* sev */
12b10571
MR
3580 case 5: /* sevl */
3581 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
3582 default: /* nop */
3583 break;
3584 }
3585}
99c475ab 3586
ad69471c 3587#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3588
39d5492a 3589static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
3590{
3591 switch (size) {
dd8fbd78
FN
3592 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3593 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3594 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3595 default: abort();
9ee6e8bb 3596 }
9ee6e8bb
PB
3597}
3598
39d5492a 3599static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
3600{
3601 switch (size) {
dd8fbd78
FN
3602 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3603 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3604 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3605 default: return;
3606 }
3607}
3608
3609/* 32-bit pairwise ops end up the same as the elementwise versions. */
3610#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3611#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3612#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3613#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3614
ad69471c
PB
3615#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3616 switch ((size << 1) | u) { \
3617 case 0: \
dd8fbd78 3618 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3619 break; \
3620 case 1: \
dd8fbd78 3621 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3622 break; \
3623 case 2: \
dd8fbd78 3624 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3625 break; \
3626 case 3: \
dd8fbd78 3627 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3628 break; \
3629 case 4: \
dd8fbd78 3630 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3631 break; \
3632 case 5: \
dd8fbd78 3633 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3634 break; \
3635 default: return 1; \
3636 }} while (0)
9ee6e8bb
PB
3637
3638#define GEN_NEON_INTEGER_OP(name) do { \
3639 switch ((size << 1) | u) { \
ad69471c 3640 case 0: \
dd8fbd78 3641 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3642 break; \
3643 case 1: \
dd8fbd78 3644 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3645 break; \
3646 case 2: \
dd8fbd78 3647 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3648 break; \
3649 case 3: \
dd8fbd78 3650 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3651 break; \
3652 case 4: \
dd8fbd78 3653 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3654 break; \
3655 case 5: \
dd8fbd78 3656 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3657 break; \
9ee6e8bb
PB
3658 default: return 1; \
3659 }} while (0)
3660
39d5492a 3661static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 3662{
39d5492a 3663 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
3664 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3665 return tmp;
9ee6e8bb
PB
3666}
3667
39d5492a 3668static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 3669{
dd8fbd78 3670 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3671 tcg_temp_free_i32(var);
9ee6e8bb
PB
3672}
3673
39d5492a 3674static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 3675{
39d5492a 3676 TCGv_i32 tmp;
9ee6e8bb 3677 if (size == 1) {
0fad6efc
PM
3678 tmp = neon_load_reg(reg & 7, reg >> 4);
3679 if (reg & 8) {
dd8fbd78 3680 gen_neon_dup_high16(tmp);
0fad6efc
PM
3681 } else {
3682 gen_neon_dup_low16(tmp);
dd8fbd78 3683 }
0fad6efc
PM
3684 } else {
3685 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3686 }
dd8fbd78 3687 return tmp;
9ee6e8bb
PB
3688}
3689
02acedf9 3690static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3691{
39d5492a 3692 TCGv_i32 tmp, tmp2;
600b828c 3693 if (!q && size == 2) {
02acedf9
PM
3694 return 1;
3695 }
3696 tmp = tcg_const_i32(rd);
3697 tmp2 = tcg_const_i32(rm);
3698 if (q) {
3699 switch (size) {
3700 case 0:
02da0b2d 3701 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3702 break;
3703 case 1:
02da0b2d 3704 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3705 break;
3706 case 2:
02da0b2d 3707 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
3708 break;
3709 default:
3710 abort();
3711 }
3712 } else {
3713 switch (size) {
3714 case 0:
02da0b2d 3715 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3716 break;
3717 case 1:
02da0b2d 3718 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3719 break;
3720 default:
3721 abort();
3722 }
3723 }
3724 tcg_temp_free_i32(tmp);
3725 tcg_temp_free_i32(tmp2);
3726 return 0;
19457615
FN
3727}
3728
d68a6f3a 3729static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 3730{
39d5492a 3731 TCGv_i32 tmp, tmp2;
600b828c 3732 if (!q && size == 2) {
d68a6f3a
PM
3733 return 1;
3734 }
3735 tmp = tcg_const_i32(rd);
3736 tmp2 = tcg_const_i32(rm);
3737 if (q) {
3738 switch (size) {
3739 case 0:
02da0b2d 3740 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3741 break;
3742 case 1:
02da0b2d 3743 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3744 break;
3745 case 2:
02da0b2d 3746 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
3747 break;
3748 default:
3749 abort();
3750 }
3751 } else {
3752 switch (size) {
3753 case 0:
02da0b2d 3754 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3755 break;
3756 case 1:
02da0b2d 3757 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3758 break;
3759 default:
3760 abort();
3761 }
3762 }
3763 tcg_temp_free_i32(tmp);
3764 tcg_temp_free_i32(tmp2);
3765 return 0;
19457615
FN
3766}
3767
39d5492a 3768static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 3769{
39d5492a 3770 TCGv_i32 rd, tmp;
19457615 3771
7d1b0095
PM
3772 rd = tcg_temp_new_i32();
3773 tmp = tcg_temp_new_i32();
19457615
FN
3774
3775 tcg_gen_shli_i32(rd, t0, 8);
3776 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3777 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3778 tcg_gen_or_i32(rd, rd, tmp);
3779
3780 tcg_gen_shri_i32(t1, t1, 8);
3781 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3782 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3783 tcg_gen_or_i32(t1, t1, tmp);
3784 tcg_gen_mov_i32(t0, rd);
3785
7d1b0095
PM
3786 tcg_temp_free_i32(tmp);
3787 tcg_temp_free_i32(rd);
19457615
FN
3788}
3789
39d5492a 3790static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 3791{
39d5492a 3792 TCGv_i32 rd, tmp;
19457615 3793
7d1b0095
PM
3794 rd = tcg_temp_new_i32();
3795 tmp = tcg_temp_new_i32();
19457615
FN
3796
3797 tcg_gen_shli_i32(rd, t0, 16);
3798 tcg_gen_andi_i32(tmp, t1, 0xffff);
3799 tcg_gen_or_i32(rd, rd, tmp);
3800 tcg_gen_shri_i32(t1, t1, 16);
3801 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3802 tcg_gen_or_i32(t1, t1, tmp);
3803 tcg_gen_mov_i32(t0, rd);
3804
7d1b0095
PM
3805 tcg_temp_free_i32(tmp);
3806 tcg_temp_free_i32(rd);
19457615
FN
3807}
3808
3809
9ee6e8bb
PB
3810static struct {
3811 int nregs;
3812 int interleave;
3813 int spacing;
3814} neon_ls_element_type[11] = {
3815 {4, 4, 1},
3816 {4, 4, 2},
3817 {4, 1, 1},
3818 {4, 2, 1},
3819 {3, 3, 1},
3820 {3, 3, 2},
3821 {3, 1, 1},
3822 {1, 1, 1},
3823 {2, 2, 1},
3824 {2, 2, 2},
3825 {2, 1, 1}
3826};
3827
3828/* Translate a NEON load/store element instruction. Return nonzero if the
3829 instruction is invalid. */
0ecb72a5 3830static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
3831{
3832 int rd, rn, rm;
3833 int op;
3834 int nregs;
3835 int interleave;
84496233 3836 int spacing;
9ee6e8bb
PB
3837 int stride;
3838 int size;
3839 int reg;
3840 int pass;
3841 int load;
3842 int shift;
9ee6e8bb 3843 int n;
39d5492a
PM
3844 TCGv_i32 addr;
3845 TCGv_i32 tmp;
3846 TCGv_i32 tmp2;
84496233 3847 TCGv_i64 tmp64;
9ee6e8bb 3848
5df8bac1 3849 if (!s->vfp_enabled)
9ee6e8bb
PB
3850 return 1;
3851 VFP_DREG_D(rd, insn);
3852 rn = (insn >> 16) & 0xf;
3853 rm = insn & 0xf;
3854 load = (insn & (1 << 21)) != 0;
3855 if ((insn & (1 << 23)) == 0) {
3856 /* Load store all elements. */
3857 op = (insn >> 8) & 0xf;
3858 size = (insn >> 6) & 3;
84496233 3859 if (op > 10)
9ee6e8bb 3860 return 1;
f2dd89d0
PM
3861 /* Catch UNDEF cases for bad values of align field */
3862 switch (op & 0xc) {
3863 case 4:
3864 if (((insn >> 5) & 1) == 1) {
3865 return 1;
3866 }
3867 break;
3868 case 8:
3869 if (((insn >> 4) & 3) == 3) {
3870 return 1;
3871 }
3872 break;
3873 default:
3874 break;
3875 }
9ee6e8bb
PB
3876 nregs = neon_ls_element_type[op].nregs;
3877 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3878 spacing = neon_ls_element_type[op].spacing;
3879 if (size == 3 && (interleave | spacing) != 1)
3880 return 1;
e318a60b 3881 addr = tcg_temp_new_i32();
dcc65026 3882 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3883 stride = (1 << size) * interleave;
3884 for (reg = 0; reg < nregs; reg++) {
3885 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3886 load_reg_var(s, addr, rn);
3887 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3888 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3889 load_reg_var(s, addr, rn);
3890 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3891 }
84496233 3892 if (size == 3) {
8ed1237d 3893 tmp64 = tcg_temp_new_i64();
84496233 3894 if (load) {
08307563 3895 gen_aa32_ld64(tmp64, addr, IS_USER(s));
84496233 3896 neon_store_reg64(tmp64, rd);
84496233 3897 } else {
84496233 3898 neon_load_reg64(tmp64, rd);
08307563 3899 gen_aa32_st64(tmp64, addr, IS_USER(s));
84496233 3900 }
8ed1237d 3901 tcg_temp_free_i64(tmp64);
84496233
JR
3902 tcg_gen_addi_i32(addr, addr, stride);
3903 } else {
3904 for (pass = 0; pass < 2; pass++) {
3905 if (size == 2) {
3906 if (load) {
58ab8e96 3907 tmp = tcg_temp_new_i32();
08307563 3908 gen_aa32_ld32u(tmp, addr, IS_USER(s));
84496233
JR
3909 neon_store_reg(rd, pass, tmp);
3910 } else {
3911 tmp = neon_load_reg(rd, pass);
08307563 3912 gen_aa32_st32(tmp, addr, IS_USER(s));
58ab8e96 3913 tcg_temp_free_i32(tmp);
84496233 3914 }
1b2b1e54 3915 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3916 } else if (size == 1) {
3917 if (load) {
58ab8e96 3918 tmp = tcg_temp_new_i32();
08307563 3919 gen_aa32_ld16u(tmp, addr, IS_USER(s));
84496233 3920 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 3921 tmp2 = tcg_temp_new_i32();
08307563 3922 gen_aa32_ld16u(tmp2, addr, IS_USER(s));
84496233 3923 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3924 tcg_gen_shli_i32(tmp2, tmp2, 16);
3925 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3926 tcg_temp_free_i32(tmp2);
84496233
JR
3927 neon_store_reg(rd, pass, tmp);
3928 } else {
3929 tmp = neon_load_reg(rd, pass);
7d1b0095 3930 tmp2 = tcg_temp_new_i32();
84496233 3931 tcg_gen_shri_i32(tmp2, tmp, 16);
08307563 3932 gen_aa32_st16(tmp, addr, IS_USER(s));
58ab8e96 3933 tcg_temp_free_i32(tmp);
84496233 3934 tcg_gen_addi_i32(addr, addr, stride);
08307563 3935 gen_aa32_st16(tmp2, addr, IS_USER(s));
58ab8e96 3936 tcg_temp_free_i32(tmp2);
1b2b1e54 3937 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3938 }
84496233
JR
3939 } else /* size == 0 */ {
3940 if (load) {
39d5492a 3941 TCGV_UNUSED_I32(tmp2);
84496233 3942 for (n = 0; n < 4; n++) {
58ab8e96 3943 tmp = tcg_temp_new_i32();
08307563 3944 gen_aa32_ld8u(tmp, addr, IS_USER(s));
84496233
JR
3945 tcg_gen_addi_i32(addr, addr, stride);
3946 if (n == 0) {
3947 tmp2 = tmp;
3948 } else {
41ba8341
PB
3949 tcg_gen_shli_i32(tmp, tmp, n * 8);
3950 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 3951 tcg_temp_free_i32(tmp);
84496233 3952 }
9ee6e8bb 3953 }
84496233
JR
3954 neon_store_reg(rd, pass, tmp2);
3955 } else {
3956 tmp2 = neon_load_reg(rd, pass);
3957 for (n = 0; n < 4; n++) {
7d1b0095 3958 tmp = tcg_temp_new_i32();
84496233
JR
3959 if (n == 0) {
3960 tcg_gen_mov_i32(tmp, tmp2);
3961 } else {
3962 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3963 }
08307563 3964 gen_aa32_st8(tmp, addr, IS_USER(s));
58ab8e96 3965 tcg_temp_free_i32(tmp);
84496233
JR
3966 tcg_gen_addi_i32(addr, addr, stride);
3967 }
7d1b0095 3968 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3969 }
3970 }
3971 }
3972 }
84496233 3973 rd += spacing;
9ee6e8bb 3974 }
e318a60b 3975 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3976 stride = nregs * 8;
3977 } else {
3978 size = (insn >> 10) & 3;
3979 if (size == 3) {
3980 /* Load single element to all lanes. */
8e18cde3
PM
3981 int a = (insn >> 4) & 1;
3982 if (!load) {
9ee6e8bb 3983 return 1;
8e18cde3 3984 }
9ee6e8bb
PB
3985 size = (insn >> 6) & 3;
3986 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
3987
3988 if (size == 3) {
3989 if (nregs != 4 || a == 0) {
9ee6e8bb 3990 return 1;
99c475ab 3991 }
8e18cde3
PM
3992 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3993 size = 2;
3994 }
3995 if (nregs == 1 && a == 1 && size == 0) {
3996 return 1;
3997 }
3998 if (nregs == 3 && a == 1) {
3999 return 1;
4000 }
e318a60b 4001 addr = tcg_temp_new_i32();
8e18cde3
PM
4002 load_reg_var(s, addr, rn);
4003 if (nregs == 1) {
4004 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4005 tmp = gen_load_and_replicate(s, addr, size);
4006 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4007 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4008 if (insn & (1 << 5)) {
4009 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4010 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4011 }
4012 tcg_temp_free_i32(tmp);
4013 } else {
4014 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4015 stride = (insn & (1 << 5)) ? 2 : 1;
4016 for (reg = 0; reg < nregs; reg++) {
4017 tmp = gen_load_and_replicate(s, addr, size);
4018 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4019 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4020 tcg_temp_free_i32(tmp);
4021 tcg_gen_addi_i32(addr, addr, 1 << size);
4022 rd += stride;
4023 }
9ee6e8bb 4024 }
e318a60b 4025 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4026 stride = (1 << size) * nregs;
4027 } else {
4028 /* Single element. */
93262b16 4029 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4030 pass = (insn >> 7) & 1;
4031 switch (size) {
4032 case 0:
4033 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4034 stride = 1;
4035 break;
4036 case 1:
4037 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4038 stride = (insn & (1 << 5)) ? 2 : 1;
4039 break;
4040 case 2:
4041 shift = 0;
9ee6e8bb
PB
4042 stride = (insn & (1 << 6)) ? 2 : 1;
4043 break;
4044 default:
4045 abort();
4046 }
4047 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4048 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4049 switch (nregs) {
4050 case 1:
4051 if (((idx & (1 << size)) != 0) ||
4052 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4053 return 1;
4054 }
4055 break;
4056 case 3:
4057 if ((idx & 1) != 0) {
4058 return 1;
4059 }
4060 /* fall through */
4061 case 2:
4062 if (size == 2 && (idx & 2) != 0) {
4063 return 1;
4064 }
4065 break;
4066 case 4:
4067 if ((size == 2) && ((idx & 3) == 3)) {
4068 return 1;
4069 }
4070 break;
4071 default:
4072 abort();
4073 }
4074 if ((rd + stride * (nregs - 1)) > 31) {
4075 /* Attempts to write off the end of the register file
4076 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4077 * the neon_load_reg() would write off the end of the array.
4078 */
4079 return 1;
4080 }
e318a60b 4081 addr = tcg_temp_new_i32();
dcc65026 4082 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4083 for (reg = 0; reg < nregs; reg++) {
4084 if (load) {
58ab8e96 4085 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
4086 switch (size) {
4087 case 0:
08307563 4088 gen_aa32_ld8u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4089 break;
4090 case 1:
08307563 4091 gen_aa32_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4092 break;
4093 case 2:
08307563 4094 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb 4095 break;
a50f5b91
PB
4096 default: /* Avoid compiler warnings. */
4097 abort();
9ee6e8bb
PB
4098 }
4099 if (size != 2) {
8f8e3aa4 4100 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4101 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4102 shift, size ? 16 : 8);
7d1b0095 4103 tcg_temp_free_i32(tmp2);
9ee6e8bb 4104 }
8f8e3aa4 4105 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4106 } else { /* Store */
8f8e3aa4
PB
4107 tmp = neon_load_reg(rd, pass);
4108 if (shift)
4109 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4110 switch (size) {
4111 case 0:
08307563 4112 gen_aa32_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4113 break;
4114 case 1:
08307563 4115 gen_aa32_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4116 break;
4117 case 2:
08307563 4118 gen_aa32_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4119 break;
99c475ab 4120 }
58ab8e96 4121 tcg_temp_free_i32(tmp);
99c475ab 4122 }
9ee6e8bb 4123 rd += stride;
1b2b1e54 4124 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4125 }
e318a60b 4126 tcg_temp_free_i32(addr);
9ee6e8bb 4127 stride = nregs * (1 << size);
99c475ab 4128 }
9ee6e8bb
PB
4129 }
4130 if (rm != 15) {
39d5492a 4131 TCGv_i32 base;
b26eefb6
PB
4132
4133 base = load_reg(s, rn);
9ee6e8bb 4134 if (rm == 13) {
b26eefb6 4135 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4136 } else {
39d5492a 4137 TCGv_i32 index;
b26eefb6
PB
4138 index = load_reg(s, rm);
4139 tcg_gen_add_i32(base, base, index);
7d1b0095 4140 tcg_temp_free_i32(index);
9ee6e8bb 4141 }
b26eefb6 4142 store_reg(s, rn, base);
9ee6e8bb
PB
4143 }
4144 return 0;
4145}
3b46e624 4146
8f8e3aa4 4147/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 4148static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
4149{
4150 tcg_gen_and_i32(t, t, c);
f669df27 4151 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4152 tcg_gen_or_i32(dest, t, f);
4153}
4154
39d5492a 4155static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4156{
4157 switch (size) {
4158 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4159 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4160 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4161 default: abort();
4162 }
4163}
4164
39d5492a 4165static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4166{
4167 switch (size) {
02da0b2d
PM
4168 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4169 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4170 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4171 default: abort();
4172 }
4173}
4174
39d5492a 4175static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4176{
4177 switch (size) {
02da0b2d
PM
4178 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4179 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4180 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4181 default: abort();
4182 }
4183}
4184
39d5492a 4185static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
4186{
4187 switch (size) {
02da0b2d
PM
4188 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4189 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4190 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4191 default: abort();
4192 }
4193}
4194
39d5492a 4195static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
4196 int q, int u)
4197{
4198 if (q) {
4199 if (u) {
4200 switch (size) {
4201 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4202 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4203 default: abort();
4204 }
4205 } else {
4206 switch (size) {
4207 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4208 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4209 default: abort();
4210 }
4211 }
4212 } else {
4213 if (u) {
4214 switch (size) {
b408a9b0
CL
4215 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4216 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4217 default: abort();
4218 }
4219 } else {
4220 switch (size) {
4221 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4222 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4223 default: abort();
4224 }
4225 }
4226 }
4227}
4228
39d5492a 4229static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
4230{
4231 if (u) {
4232 switch (size) {
4233 case 0: gen_helper_neon_widen_u8(dest, src); break;
4234 case 1: gen_helper_neon_widen_u16(dest, src); break;
4235 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4236 default: abort();
4237 }
4238 } else {
4239 switch (size) {
4240 case 0: gen_helper_neon_widen_s8(dest, src); break;
4241 case 1: gen_helper_neon_widen_s16(dest, src); break;
4242 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4243 default: abort();
4244 }
4245 }
7d1b0095 4246 tcg_temp_free_i32(src);
ad69471c
PB
4247}
4248
4249static inline void gen_neon_addl(int size)
4250{
4251 switch (size) {
4252 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4253 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4254 case 2: tcg_gen_add_i64(CPU_V001); break;
4255 default: abort();
4256 }
4257}
4258
4259static inline void gen_neon_subl(int size)
4260{
4261 switch (size) {
4262 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4263 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4264 case 2: tcg_gen_sub_i64(CPU_V001); break;
4265 default: abort();
4266 }
4267}
4268
a7812ae4 4269static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4270{
4271 switch (size) {
4272 case 0: gen_helper_neon_negl_u16(var, var); break;
4273 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
4274 case 2:
4275 tcg_gen_neg_i64(var, var);
4276 break;
ad69471c
PB
4277 default: abort();
4278 }
4279}
4280
a7812ae4 4281static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4282{
4283 switch (size) {
02da0b2d
PM
4284 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4285 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4286 default: abort();
4287 }
4288}
4289
39d5492a
PM
4290static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4291 int size, int u)
ad69471c 4292{
a7812ae4 4293 TCGv_i64 tmp;
ad69471c
PB
4294
4295 switch ((size << 1) | u) {
4296 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4297 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4298 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4299 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4300 case 4:
4301 tmp = gen_muls_i64_i32(a, b);
4302 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4303 tcg_temp_free_i64(tmp);
ad69471c
PB
4304 break;
4305 case 5:
4306 tmp = gen_mulu_i64_i32(a, b);
4307 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4308 tcg_temp_free_i64(tmp);
ad69471c
PB
4309 break;
4310 default: abort();
4311 }
c6067f04
CL
4312
4313 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4314 Don't forget to clean them now. */
4315 if (size < 2) {
7d1b0095
PM
4316 tcg_temp_free_i32(a);
4317 tcg_temp_free_i32(b);
c6067f04 4318 }
ad69471c
PB
4319}
4320
39d5492a
PM
4321static void gen_neon_narrow_op(int op, int u, int size,
4322 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
4323{
4324 if (op) {
4325 if (u) {
4326 gen_neon_unarrow_sats(size, dest, src);
4327 } else {
4328 gen_neon_narrow(size, dest, src);
4329 }
4330 } else {
4331 if (u) {
4332 gen_neon_narrow_satu(size, dest, src);
4333 } else {
4334 gen_neon_narrow_sats(size, dest, src);
4335 }
4336 }
4337}
4338
62698be3
PM
4339/* Symbolic constants for op fields for Neon 3-register same-length.
4340 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4341 * table A7-9.
4342 */
4343#define NEON_3R_VHADD 0
4344#define NEON_3R_VQADD 1
4345#define NEON_3R_VRHADD 2
4346#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4347#define NEON_3R_VHSUB 4
4348#define NEON_3R_VQSUB 5
4349#define NEON_3R_VCGT 6
4350#define NEON_3R_VCGE 7
4351#define NEON_3R_VSHL 8
4352#define NEON_3R_VQSHL 9
4353#define NEON_3R_VRSHL 10
4354#define NEON_3R_VQRSHL 11
4355#define NEON_3R_VMAX 12
4356#define NEON_3R_VMIN 13
4357#define NEON_3R_VABD 14
4358#define NEON_3R_VABA 15
4359#define NEON_3R_VADD_VSUB 16
4360#define NEON_3R_VTST_VCEQ 17
4361#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4362#define NEON_3R_VMUL 19
4363#define NEON_3R_VPMAX 20
4364#define NEON_3R_VPMIN 21
4365#define NEON_3R_VQDMULH_VQRDMULH 22
4366#define NEON_3R_VPADD 23
da97f52c 4367#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
4368#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4369#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4370#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4371#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4372#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4373#define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4374
4375static const uint8_t neon_3r_sizes[] = {
4376 [NEON_3R_VHADD] = 0x7,
4377 [NEON_3R_VQADD] = 0xf,
4378 [NEON_3R_VRHADD] = 0x7,
4379 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4380 [NEON_3R_VHSUB] = 0x7,
4381 [NEON_3R_VQSUB] = 0xf,
4382 [NEON_3R_VCGT] = 0x7,
4383 [NEON_3R_VCGE] = 0x7,
4384 [NEON_3R_VSHL] = 0xf,
4385 [NEON_3R_VQSHL] = 0xf,
4386 [NEON_3R_VRSHL] = 0xf,
4387 [NEON_3R_VQRSHL] = 0xf,
4388 [NEON_3R_VMAX] = 0x7,
4389 [NEON_3R_VMIN] = 0x7,
4390 [NEON_3R_VABD] = 0x7,
4391 [NEON_3R_VABA] = 0x7,
4392 [NEON_3R_VADD_VSUB] = 0xf,
4393 [NEON_3R_VTST_VCEQ] = 0x7,
4394 [NEON_3R_VML] = 0x7,
4395 [NEON_3R_VMUL] = 0x7,
4396 [NEON_3R_VPMAX] = 0x7,
4397 [NEON_3R_VPMIN] = 0x7,
4398 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4399 [NEON_3R_VPADD] = 0x7,
da97f52c 4400 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4401 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4402 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4403 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4404 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4405 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4406 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4407};
4408
600b828c
PM
4409/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4410 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4411 * table A7-13.
4412 */
4413#define NEON_2RM_VREV64 0
4414#define NEON_2RM_VREV32 1
4415#define NEON_2RM_VREV16 2
4416#define NEON_2RM_VPADDL 4
4417#define NEON_2RM_VPADDL_U 5
4418#define NEON_2RM_VCLS 8
4419#define NEON_2RM_VCLZ 9
4420#define NEON_2RM_VCNT 10
4421#define NEON_2RM_VMVN 11
4422#define NEON_2RM_VPADAL 12
4423#define NEON_2RM_VPADAL_U 13
4424#define NEON_2RM_VQABS 14
4425#define NEON_2RM_VQNEG 15
4426#define NEON_2RM_VCGT0 16
4427#define NEON_2RM_VCGE0 17
4428#define NEON_2RM_VCEQ0 18
4429#define NEON_2RM_VCLE0 19
4430#define NEON_2RM_VCLT0 20
4431#define NEON_2RM_VABS 22
4432#define NEON_2RM_VNEG 23
4433#define NEON_2RM_VCGT0_F 24
4434#define NEON_2RM_VCGE0_F 25
4435#define NEON_2RM_VCEQ0_F 26
4436#define NEON_2RM_VCLE0_F 27
4437#define NEON_2RM_VCLT0_F 28
4438#define NEON_2RM_VABS_F 30
4439#define NEON_2RM_VNEG_F 31
4440#define NEON_2RM_VSWP 32
4441#define NEON_2RM_VTRN 33
4442#define NEON_2RM_VUZP 34
4443#define NEON_2RM_VZIP 35
4444#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4445#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4446#define NEON_2RM_VSHLL 38
4447#define NEON_2RM_VCVT_F16_F32 44
4448#define NEON_2RM_VCVT_F32_F16 46
4449#define NEON_2RM_VRECPE 56
4450#define NEON_2RM_VRSQRTE 57
4451#define NEON_2RM_VRECPE_F 58
4452#define NEON_2RM_VRSQRTE_F 59
4453#define NEON_2RM_VCVT_FS 60
4454#define NEON_2RM_VCVT_FU 61
4455#define NEON_2RM_VCVT_SF 62
4456#define NEON_2RM_VCVT_UF 63
4457
4458static int neon_2rm_is_float_op(int op)
4459{
4460 /* Return true if this neon 2reg-misc op is float-to-float */
4461 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4462 op >= NEON_2RM_VRECPE_F);
4463}
4464
4465/* Each entry in this array has bit n set if the insn allows
4466 * size value n (otherwise it will UNDEF). Since unallocated
4467 * op values will have no bits set they always UNDEF.
4468 */
4469static const uint8_t neon_2rm_sizes[] = {
4470 [NEON_2RM_VREV64] = 0x7,
4471 [NEON_2RM_VREV32] = 0x3,
4472 [NEON_2RM_VREV16] = 0x1,
4473 [NEON_2RM_VPADDL] = 0x7,
4474 [NEON_2RM_VPADDL_U] = 0x7,
4475 [NEON_2RM_VCLS] = 0x7,
4476 [NEON_2RM_VCLZ] = 0x7,
4477 [NEON_2RM_VCNT] = 0x1,
4478 [NEON_2RM_VMVN] = 0x1,
4479 [NEON_2RM_VPADAL] = 0x7,
4480 [NEON_2RM_VPADAL_U] = 0x7,
4481 [NEON_2RM_VQABS] = 0x7,
4482 [NEON_2RM_VQNEG] = 0x7,
4483 [NEON_2RM_VCGT0] = 0x7,
4484 [NEON_2RM_VCGE0] = 0x7,
4485 [NEON_2RM_VCEQ0] = 0x7,
4486 [NEON_2RM_VCLE0] = 0x7,
4487 [NEON_2RM_VCLT0] = 0x7,
4488 [NEON_2RM_VABS] = 0x7,
4489 [NEON_2RM_VNEG] = 0x7,
4490 [NEON_2RM_VCGT0_F] = 0x4,
4491 [NEON_2RM_VCGE0_F] = 0x4,
4492 [NEON_2RM_VCEQ0_F] = 0x4,
4493 [NEON_2RM_VCLE0_F] = 0x4,
4494 [NEON_2RM_VCLT0_F] = 0x4,
4495 [NEON_2RM_VABS_F] = 0x4,
4496 [NEON_2RM_VNEG_F] = 0x4,
4497 [NEON_2RM_VSWP] = 0x1,
4498 [NEON_2RM_VTRN] = 0x7,
4499 [NEON_2RM_VUZP] = 0x7,
4500 [NEON_2RM_VZIP] = 0x7,
4501 [NEON_2RM_VMOVN] = 0x7,
4502 [NEON_2RM_VQMOVN] = 0x7,
4503 [NEON_2RM_VSHLL] = 0x7,
4504 [NEON_2RM_VCVT_F16_F32] = 0x2,
4505 [NEON_2RM_VCVT_F32_F16] = 0x2,
4506 [NEON_2RM_VRECPE] = 0x4,
4507 [NEON_2RM_VRSQRTE] = 0x4,
4508 [NEON_2RM_VRECPE_F] = 0x4,
4509 [NEON_2RM_VRSQRTE_F] = 0x4,
4510 [NEON_2RM_VCVT_FS] = 0x4,
4511 [NEON_2RM_VCVT_FU] = 0x4,
4512 [NEON_2RM_VCVT_SF] = 0x4,
4513 [NEON_2RM_VCVT_UF] = 0x4,
4514};
4515
9ee6e8bb
PB
4516/* Translate a NEON data processing instruction. Return nonzero if the
4517 instruction is invalid.
ad69471c
PB
4518 We process data in a mixture of 32-bit and 64-bit chunks.
4519 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4520
0ecb72a5 4521static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4522{
4523 int op;
4524 int q;
4525 int rd, rn, rm;
4526 int size;
4527 int shift;
4528 int pass;
4529 int count;
4530 int pairwise;
4531 int u;
ca9a32e4 4532 uint32_t imm, mask;
39d5492a 4533 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4534 TCGv_i64 tmp64;
9ee6e8bb 4535
5df8bac1 4536 if (!s->vfp_enabled)
9ee6e8bb
PB
4537 return 1;
4538 q = (insn & (1 << 6)) != 0;
4539 u = (insn >> 24) & 1;
4540 VFP_DREG_D(rd, insn);
4541 VFP_DREG_N(rn, insn);
4542 VFP_DREG_M(rm, insn);
4543 size = (insn >> 20) & 3;
4544 if ((insn & (1 << 23)) == 0) {
4545 /* Three register same length. */
4546 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4547 /* Catch invalid op and bad size combinations: UNDEF */
4548 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4549 return 1;
4550 }
25f84f79
PM
4551 /* All insns of this form UNDEF for either this condition or the
4552 * superset of cases "Q==1"; we catch the latter later.
4553 */
4554 if (q && ((rd | rn | rm) & 1)) {
4555 return 1;
4556 }
62698be3
PM
4557 if (size == 3 && op != NEON_3R_LOGIC) {
4558 /* 64-bit element instructions. */
9ee6e8bb 4559 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4560 neon_load_reg64(cpu_V0, rn + pass);
4561 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4562 switch (op) {
62698be3 4563 case NEON_3R_VQADD:
9ee6e8bb 4564 if (u) {
02da0b2d
PM
4565 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4566 cpu_V0, cpu_V1);
2c0262af 4567 } else {
02da0b2d
PM
4568 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4569 cpu_V0, cpu_V1);
2c0262af 4570 }
9ee6e8bb 4571 break;
62698be3 4572 case NEON_3R_VQSUB:
9ee6e8bb 4573 if (u) {
02da0b2d
PM
4574 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4575 cpu_V0, cpu_V1);
ad69471c 4576 } else {
02da0b2d
PM
4577 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4578 cpu_V0, cpu_V1);
ad69471c
PB
4579 }
4580 break;
62698be3 4581 case NEON_3R_VSHL:
ad69471c
PB
4582 if (u) {
4583 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4584 } else {
4585 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4586 }
4587 break;
62698be3 4588 case NEON_3R_VQSHL:
ad69471c 4589 if (u) {
02da0b2d
PM
4590 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4591 cpu_V1, cpu_V0);
ad69471c 4592 } else {
02da0b2d
PM
4593 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4594 cpu_V1, cpu_V0);
ad69471c
PB
4595 }
4596 break;
62698be3 4597 case NEON_3R_VRSHL:
ad69471c
PB
4598 if (u) {
4599 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4600 } else {
ad69471c
PB
4601 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4602 }
4603 break;
62698be3 4604 case NEON_3R_VQRSHL:
ad69471c 4605 if (u) {
02da0b2d
PM
4606 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4607 cpu_V1, cpu_V0);
ad69471c 4608 } else {
02da0b2d
PM
4609 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4610 cpu_V1, cpu_V0);
1e8d4eec 4611 }
9ee6e8bb 4612 break;
62698be3 4613 case NEON_3R_VADD_VSUB:
9ee6e8bb 4614 if (u) {
ad69471c 4615 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4616 } else {
ad69471c 4617 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4618 }
4619 break;
4620 default:
4621 abort();
2c0262af 4622 }
ad69471c 4623 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4624 }
9ee6e8bb 4625 return 0;
2c0262af 4626 }
25f84f79 4627 pairwise = 0;
9ee6e8bb 4628 switch (op) {
62698be3
PM
4629 case NEON_3R_VSHL:
4630 case NEON_3R_VQSHL:
4631 case NEON_3R_VRSHL:
4632 case NEON_3R_VQRSHL:
9ee6e8bb 4633 {
ad69471c
PB
4634 int rtmp;
4635 /* Shift instruction operands are reversed. */
4636 rtmp = rn;
9ee6e8bb 4637 rn = rm;
ad69471c 4638 rm = rtmp;
9ee6e8bb 4639 }
2c0262af 4640 break;
25f84f79
PM
4641 case NEON_3R_VPADD:
4642 if (u) {
4643 return 1;
4644 }
4645 /* Fall through */
62698be3
PM
4646 case NEON_3R_VPMAX:
4647 case NEON_3R_VPMIN:
9ee6e8bb 4648 pairwise = 1;
2c0262af 4649 break;
25f84f79
PM
4650 case NEON_3R_FLOAT_ARITH:
4651 pairwise = (u && size < 2); /* if VPADD (float) */
4652 break;
4653 case NEON_3R_FLOAT_MINMAX:
4654 pairwise = u; /* if VPMIN/VPMAX (float) */
4655 break;
4656 case NEON_3R_FLOAT_CMP:
4657 if (!u && size) {
4658 /* no encoding for U=0 C=1x */
4659 return 1;
4660 }
4661 break;
4662 case NEON_3R_FLOAT_ACMP:
4663 if (!u) {
4664 return 1;
4665 }
4666 break;
4667 case NEON_3R_VRECPS_VRSQRTS:
4668 if (u) {
4669 return 1;
4670 }
2c0262af 4671 break;
25f84f79
PM
4672 case NEON_3R_VMUL:
4673 if (u && (size != 0)) {
4674 /* UNDEF on invalid size for polynomial subcase */
4675 return 1;
4676 }
2c0262af 4677 break;
da97f52c
PM
4678 case NEON_3R_VFM:
4679 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
4680 return 1;
4681 }
4682 break;
9ee6e8bb 4683 default:
2c0262af 4684 break;
9ee6e8bb 4685 }
dd8fbd78 4686
25f84f79
PM
4687 if (pairwise && q) {
4688 /* All the pairwise insns UNDEF if Q is set */
4689 return 1;
4690 }
4691
9ee6e8bb
PB
4692 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4693
4694 if (pairwise) {
4695 /* Pairwise. */
a5a14945
JR
4696 if (pass < 1) {
4697 tmp = neon_load_reg(rn, 0);
4698 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 4699 } else {
a5a14945
JR
4700 tmp = neon_load_reg(rm, 0);
4701 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
4702 }
4703 } else {
4704 /* Elementwise. */
dd8fbd78
FN
4705 tmp = neon_load_reg(rn, pass);
4706 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4707 }
4708 switch (op) {
62698be3 4709 case NEON_3R_VHADD:
9ee6e8bb
PB
4710 GEN_NEON_INTEGER_OP(hadd);
4711 break;
62698be3 4712 case NEON_3R_VQADD:
02da0b2d 4713 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4714 break;
62698be3 4715 case NEON_3R_VRHADD:
9ee6e8bb 4716 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4717 break;
62698be3 4718 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
4719 switch ((u << 2) | size) {
4720 case 0: /* VAND */
dd8fbd78 4721 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4722 break;
4723 case 1: /* BIC */
f669df27 4724 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4725 break;
4726 case 2: /* VORR */
dd8fbd78 4727 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4728 break;
4729 case 3: /* VORN */
f669df27 4730 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4731 break;
4732 case 4: /* VEOR */
dd8fbd78 4733 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4734 break;
4735 case 5: /* VBSL */
dd8fbd78
FN
4736 tmp3 = neon_load_reg(rd, pass);
4737 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 4738 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4739 break;
4740 case 6: /* VBIT */
dd8fbd78
FN
4741 tmp3 = neon_load_reg(rd, pass);
4742 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 4743 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4744 break;
4745 case 7: /* VBIF */
dd8fbd78
FN
4746 tmp3 = neon_load_reg(rd, pass);
4747 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 4748 tcg_temp_free_i32(tmp3);
9ee6e8bb 4749 break;
2c0262af
FB
4750 }
4751 break;
62698be3 4752 case NEON_3R_VHSUB:
9ee6e8bb
PB
4753 GEN_NEON_INTEGER_OP(hsub);
4754 break;
62698be3 4755 case NEON_3R_VQSUB:
02da0b2d 4756 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4757 break;
62698be3 4758 case NEON_3R_VCGT:
9ee6e8bb
PB
4759 GEN_NEON_INTEGER_OP(cgt);
4760 break;
62698be3 4761 case NEON_3R_VCGE:
9ee6e8bb
PB
4762 GEN_NEON_INTEGER_OP(cge);
4763 break;
62698be3 4764 case NEON_3R_VSHL:
ad69471c 4765 GEN_NEON_INTEGER_OP(shl);
2c0262af 4766 break;
62698be3 4767 case NEON_3R_VQSHL:
02da0b2d 4768 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4769 break;
62698be3 4770 case NEON_3R_VRSHL:
ad69471c 4771 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4772 break;
62698be3 4773 case NEON_3R_VQRSHL:
02da0b2d 4774 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 4775 break;
62698be3 4776 case NEON_3R_VMAX:
9ee6e8bb
PB
4777 GEN_NEON_INTEGER_OP(max);
4778 break;
62698be3 4779 case NEON_3R_VMIN:
9ee6e8bb
PB
4780 GEN_NEON_INTEGER_OP(min);
4781 break;
62698be3 4782 case NEON_3R_VABD:
9ee6e8bb
PB
4783 GEN_NEON_INTEGER_OP(abd);
4784 break;
62698be3 4785 case NEON_3R_VABA:
9ee6e8bb 4786 GEN_NEON_INTEGER_OP(abd);
7d1b0095 4787 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
4788 tmp2 = neon_load_reg(rd, pass);
4789 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 4790 break;
62698be3 4791 case NEON_3R_VADD_VSUB:
9ee6e8bb 4792 if (!u) { /* VADD */
62698be3 4793 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4794 } else { /* VSUB */
4795 switch (size) {
dd8fbd78
FN
4796 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4797 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4798 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 4799 default: abort();
9ee6e8bb
PB
4800 }
4801 }
4802 break;
62698be3 4803 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
4804 if (!u) { /* VTST */
4805 switch (size) {
dd8fbd78
FN
4806 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4807 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4808 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 4809 default: abort();
9ee6e8bb
PB
4810 }
4811 } else { /* VCEQ */
4812 switch (size) {
dd8fbd78
FN
4813 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4814 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4815 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 4816 default: abort();
9ee6e8bb
PB
4817 }
4818 }
4819 break;
62698be3 4820 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 4821 switch (size) {
dd8fbd78
FN
4822 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4823 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4824 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4825 default: abort();
9ee6e8bb 4826 }
7d1b0095 4827 tcg_temp_free_i32(tmp2);
dd8fbd78 4828 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4829 if (u) { /* VMLS */
dd8fbd78 4830 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4831 } else { /* VMLA */
dd8fbd78 4832 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4833 }
4834 break;
62698be3 4835 case NEON_3R_VMUL:
9ee6e8bb 4836 if (u) { /* polynomial */
dd8fbd78 4837 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4838 } else { /* Integer */
4839 switch (size) {
dd8fbd78
FN
4840 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4841 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4842 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4843 default: abort();
9ee6e8bb
PB
4844 }
4845 }
4846 break;
62698be3 4847 case NEON_3R_VPMAX:
9ee6e8bb
PB
4848 GEN_NEON_INTEGER_OP(pmax);
4849 break;
62698be3 4850 case NEON_3R_VPMIN:
9ee6e8bb
PB
4851 GEN_NEON_INTEGER_OP(pmin);
4852 break;
62698be3 4853 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
4854 if (!u) { /* VQDMULH */
4855 switch (size) {
02da0b2d
PM
4856 case 1:
4857 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4858 break;
4859 case 2:
4860 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4861 break;
62698be3 4862 default: abort();
9ee6e8bb 4863 }
62698be3 4864 } else { /* VQRDMULH */
9ee6e8bb 4865 switch (size) {
02da0b2d
PM
4866 case 1:
4867 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4868 break;
4869 case 2:
4870 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4871 break;
62698be3 4872 default: abort();
9ee6e8bb
PB
4873 }
4874 }
4875 break;
62698be3 4876 case NEON_3R_VPADD:
9ee6e8bb 4877 switch (size) {
dd8fbd78
FN
4878 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4879 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4880 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 4881 default: abort();
9ee6e8bb
PB
4882 }
4883 break;
62698be3 4884 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
4885 {
4886 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
4887 switch ((u << 2) | size) {
4888 case 0: /* VADD */
aa47cfdd
PM
4889 case 4: /* VPADD */
4890 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4891 break;
4892 case 2: /* VSUB */
aa47cfdd 4893 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4894 break;
4895 case 6: /* VABD */
aa47cfdd 4896 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4897 break;
4898 default:
62698be3 4899 abort();
9ee6e8bb 4900 }
aa47cfdd 4901 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4902 break;
aa47cfdd 4903 }
62698be3 4904 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
4905 {
4906 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4907 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4908 if (!u) {
7d1b0095 4909 tcg_temp_free_i32(tmp2);
dd8fbd78 4910 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4911 if (size == 0) {
aa47cfdd 4912 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4913 } else {
aa47cfdd 4914 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
4915 }
4916 }
aa47cfdd 4917 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4918 break;
aa47cfdd 4919 }
62698be3 4920 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
4921 {
4922 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 4923 if (!u) {
aa47cfdd 4924 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 4925 } else {
aa47cfdd
PM
4926 if (size == 0) {
4927 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
4928 } else {
4929 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
4930 }
b5ff1b31 4931 }
aa47cfdd 4932 tcg_temp_free_ptr(fpstatus);
2c0262af 4933 break;
aa47cfdd 4934 }
62698be3 4935 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
4936 {
4937 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4938 if (size == 0) {
4939 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
4940 } else {
4941 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
4942 }
4943 tcg_temp_free_ptr(fpstatus);
2c0262af 4944 break;
aa47cfdd 4945 }
62698be3 4946 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
4947 {
4948 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4949 if (size == 0) {
4950 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
4951 } else {
4952 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
4953 }
4954 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4955 break;
aa47cfdd 4956 }
62698be3 4957 case NEON_3R_VRECPS_VRSQRTS:
9ee6e8bb 4958 if (size == 0)
dd8fbd78 4959 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4960 else
dd8fbd78 4961 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4962 break;
da97f52c
PM
4963 case NEON_3R_VFM:
4964 {
4965 /* VFMA, VFMS: fused multiply-add */
4966 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4967 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
4968 if (size) {
4969 /* VFMS */
4970 gen_helper_vfp_negs(tmp, tmp);
4971 }
4972 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
4973 tcg_temp_free_i32(tmp3);
4974 tcg_temp_free_ptr(fpstatus);
4975 break;
4976 }
9ee6e8bb
PB
4977 default:
4978 abort();
2c0262af 4979 }
7d1b0095 4980 tcg_temp_free_i32(tmp2);
dd8fbd78 4981
9ee6e8bb
PB
4982 /* Save the result. For elementwise operations we can put it
4983 straight into the destination register. For pairwise operations
4984 we have to be careful to avoid clobbering the source operands. */
4985 if (pairwise && rd == rm) {
dd8fbd78 4986 neon_store_scratch(pass, tmp);
9ee6e8bb 4987 } else {
dd8fbd78 4988 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4989 }
4990
4991 } /* for pass */
4992 if (pairwise && rd == rm) {
4993 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4994 tmp = neon_load_scratch(pass);
4995 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4996 }
4997 }
ad69471c 4998 /* End of 3 register same size operations. */
9ee6e8bb
PB
4999 } else if (insn & (1 << 4)) {
5000 if ((insn & 0x00380080) != 0) {
5001 /* Two registers and shift. */
5002 op = (insn >> 8) & 0xf;
5003 if (insn & (1 << 7)) {
cc13115b
PM
5004 /* 64-bit shift. */
5005 if (op > 7) {
5006 return 1;
5007 }
9ee6e8bb
PB
5008 size = 3;
5009 } else {
5010 size = 2;
5011 while ((insn & (1 << (size + 19))) == 0)
5012 size--;
5013 }
5014 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 5015 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
5016 by immediate using the variable shift operations. */
5017 if (op < 8) {
5018 /* Shift by immediate:
5019 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5020 if (q && ((rd | rm) & 1)) {
5021 return 1;
5022 }
5023 if (!u && (op == 4 || op == 6)) {
5024 return 1;
5025 }
9ee6e8bb
PB
5026 /* Right shifts are encoded as N - shift, where N is the
5027 element size in bits. */
5028 if (op <= 4)
5029 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
5030 if (size == 3) {
5031 count = q + 1;
5032 } else {
5033 count = q ? 4: 2;
5034 }
5035 switch (size) {
5036 case 0:
5037 imm = (uint8_t) shift;
5038 imm |= imm << 8;
5039 imm |= imm << 16;
5040 break;
5041 case 1:
5042 imm = (uint16_t) shift;
5043 imm |= imm << 16;
5044 break;
5045 case 2:
5046 case 3:
5047 imm = shift;
5048 break;
5049 default:
5050 abort();
5051 }
5052
5053 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5054 if (size == 3) {
5055 neon_load_reg64(cpu_V0, rm + pass);
5056 tcg_gen_movi_i64(cpu_V1, imm);
5057 switch (op) {
5058 case 0: /* VSHR */
5059 case 1: /* VSRA */
5060 if (u)
5061 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5062 else
ad69471c 5063 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5064 break;
ad69471c
PB
5065 case 2: /* VRSHR */
5066 case 3: /* VRSRA */
5067 if (u)
5068 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5069 else
ad69471c 5070 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5071 break;
ad69471c 5072 case 4: /* VSRI */
ad69471c
PB
5073 case 5: /* VSHL, VSLI */
5074 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5075 break;
0322b26e 5076 case 6: /* VQSHLU */
02da0b2d
PM
5077 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5078 cpu_V0, cpu_V1);
ad69471c 5079 break;
0322b26e
PM
5080 case 7: /* VQSHL */
5081 if (u) {
02da0b2d 5082 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5083 cpu_V0, cpu_V1);
5084 } else {
02da0b2d 5085 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5086 cpu_V0, cpu_V1);
5087 }
9ee6e8bb 5088 break;
9ee6e8bb 5089 }
ad69471c
PB
5090 if (op == 1 || op == 3) {
5091 /* Accumulate. */
5371cb81 5092 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5093 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5094 } else if (op == 4 || (op == 5 && u)) {
5095 /* Insert */
923e6509
CL
5096 neon_load_reg64(cpu_V1, rd + pass);
5097 uint64_t mask;
5098 if (shift < -63 || shift > 63) {
5099 mask = 0;
5100 } else {
5101 if (op == 4) {
5102 mask = 0xffffffffffffffffull >> -shift;
5103 } else {
5104 mask = 0xffffffffffffffffull << shift;
5105 }
5106 }
5107 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5108 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5109 }
5110 neon_store_reg64(cpu_V0, rd + pass);
5111 } else { /* size < 3 */
5112 /* Operands in T0 and T1. */
dd8fbd78 5113 tmp = neon_load_reg(rm, pass);
7d1b0095 5114 tmp2 = tcg_temp_new_i32();
dd8fbd78 5115 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5116 switch (op) {
5117 case 0: /* VSHR */
5118 case 1: /* VSRA */
5119 GEN_NEON_INTEGER_OP(shl);
5120 break;
5121 case 2: /* VRSHR */
5122 case 3: /* VRSRA */
5123 GEN_NEON_INTEGER_OP(rshl);
5124 break;
5125 case 4: /* VSRI */
ad69471c
PB
5126 case 5: /* VSHL, VSLI */
5127 switch (size) {
dd8fbd78
FN
5128 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5129 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5130 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5131 default: abort();
ad69471c
PB
5132 }
5133 break;
0322b26e 5134 case 6: /* VQSHLU */
ad69471c 5135 switch (size) {
0322b26e 5136 case 0:
02da0b2d
PM
5137 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5138 tmp, tmp2);
0322b26e
PM
5139 break;
5140 case 1:
02da0b2d
PM
5141 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5142 tmp, tmp2);
0322b26e
PM
5143 break;
5144 case 2:
02da0b2d
PM
5145 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5146 tmp, tmp2);
0322b26e
PM
5147 break;
5148 default:
cc13115b 5149 abort();
ad69471c
PB
5150 }
5151 break;
0322b26e 5152 case 7: /* VQSHL */
02da0b2d 5153 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5154 break;
ad69471c 5155 }
7d1b0095 5156 tcg_temp_free_i32(tmp2);
ad69471c
PB
5157
5158 if (op == 1 || op == 3) {
5159 /* Accumulate. */
dd8fbd78 5160 tmp2 = neon_load_reg(rd, pass);
5371cb81 5161 gen_neon_add(size, tmp, tmp2);
7d1b0095 5162 tcg_temp_free_i32(tmp2);
ad69471c
PB
5163 } else if (op == 4 || (op == 5 && u)) {
5164 /* Insert */
5165 switch (size) {
5166 case 0:
5167 if (op == 4)
ca9a32e4 5168 mask = 0xff >> -shift;
ad69471c 5169 else
ca9a32e4
JR
5170 mask = (uint8_t)(0xff << shift);
5171 mask |= mask << 8;
5172 mask |= mask << 16;
ad69471c
PB
5173 break;
5174 case 1:
5175 if (op == 4)
ca9a32e4 5176 mask = 0xffff >> -shift;
ad69471c 5177 else
ca9a32e4
JR
5178 mask = (uint16_t)(0xffff << shift);
5179 mask |= mask << 16;
ad69471c
PB
5180 break;
5181 case 2:
ca9a32e4
JR
5182 if (shift < -31 || shift > 31) {
5183 mask = 0;
5184 } else {
5185 if (op == 4)
5186 mask = 0xffffffffu >> -shift;
5187 else
5188 mask = 0xffffffffu << shift;
5189 }
ad69471c
PB
5190 break;
5191 default:
5192 abort();
5193 }
dd8fbd78 5194 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5195 tcg_gen_andi_i32(tmp, tmp, mask);
5196 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5197 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5198 tcg_temp_free_i32(tmp2);
ad69471c 5199 }
dd8fbd78 5200 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5201 }
5202 } /* for pass */
5203 } else if (op < 10) {
ad69471c 5204 /* Shift by immediate and narrow:
9ee6e8bb 5205 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5206 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5207 if (rm & 1) {
5208 return 1;
5209 }
9ee6e8bb
PB
5210 shift = shift - (1 << (size + 3));
5211 size++;
92cdfaeb 5212 if (size == 3) {
a7812ae4 5213 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5214 neon_load_reg64(cpu_V0, rm);
5215 neon_load_reg64(cpu_V1, rm + 1);
5216 for (pass = 0; pass < 2; pass++) {
5217 TCGv_i64 in;
5218 if (pass == 0) {
5219 in = cpu_V0;
5220 } else {
5221 in = cpu_V1;
5222 }
ad69471c 5223 if (q) {
0b36f4cd 5224 if (input_unsigned) {
92cdfaeb 5225 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5226 } else {
92cdfaeb 5227 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5228 }
ad69471c 5229 } else {
0b36f4cd 5230 if (input_unsigned) {
92cdfaeb 5231 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5232 } else {
92cdfaeb 5233 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5234 }
ad69471c 5235 }
7d1b0095 5236 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5237 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5238 neon_store_reg(rd, pass, tmp);
5239 } /* for pass */
5240 tcg_temp_free_i64(tmp64);
5241 } else {
5242 if (size == 1) {
5243 imm = (uint16_t)shift;
5244 imm |= imm << 16;
2c0262af 5245 } else {
92cdfaeb
PM
5246 /* size == 2 */
5247 imm = (uint32_t)shift;
5248 }
5249 tmp2 = tcg_const_i32(imm);
5250 tmp4 = neon_load_reg(rm + 1, 0);
5251 tmp5 = neon_load_reg(rm + 1, 1);
5252 for (pass = 0; pass < 2; pass++) {
5253 if (pass == 0) {
5254 tmp = neon_load_reg(rm, 0);
5255 } else {
5256 tmp = tmp4;
5257 }
0b36f4cd
CL
5258 gen_neon_shift_narrow(size, tmp, tmp2, q,
5259 input_unsigned);
92cdfaeb
PM
5260 if (pass == 0) {
5261 tmp3 = neon_load_reg(rm, 1);
5262 } else {
5263 tmp3 = tmp5;
5264 }
0b36f4cd
CL
5265 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5266 input_unsigned);
36aa55dc 5267 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5268 tcg_temp_free_i32(tmp);
5269 tcg_temp_free_i32(tmp3);
5270 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5271 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5272 neon_store_reg(rd, pass, tmp);
5273 } /* for pass */
c6067f04 5274 tcg_temp_free_i32(tmp2);
b75263d6 5275 }
9ee6e8bb 5276 } else if (op == 10) {
cc13115b
PM
5277 /* VSHLL, VMOVL */
5278 if (q || (rd & 1)) {
9ee6e8bb 5279 return 1;
cc13115b 5280 }
ad69471c
PB
5281 tmp = neon_load_reg(rm, 0);
5282 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5283 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5284 if (pass == 1)
5285 tmp = tmp2;
5286
5287 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5288
9ee6e8bb
PB
5289 if (shift != 0) {
5290 /* The shift is less than the width of the source
ad69471c
PB
5291 type, so we can just shift the whole register. */
5292 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5293 /* Widen the result of shift: we need to clear
5294 * the potential overflow bits resulting from
5295 * left bits of the narrow input appearing as
5296 * right bits of left the neighbour narrow
5297 * input. */
ad69471c
PB
5298 if (size < 2 || !u) {
5299 uint64_t imm64;
5300 if (size == 0) {
5301 imm = (0xffu >> (8 - shift));
5302 imm |= imm << 16;
acdf01ef 5303 } else if (size == 1) {
ad69471c 5304 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5305 } else {
5306 /* size == 2 */
5307 imm = 0xffffffff >> (32 - shift);
5308 }
5309 if (size < 2) {
5310 imm64 = imm | (((uint64_t)imm) << 32);
5311 } else {
5312 imm64 = imm;
9ee6e8bb 5313 }
acdf01ef 5314 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5315 }
5316 }
ad69471c 5317 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5318 }
f73534a5 5319 } else if (op >= 14) {
9ee6e8bb 5320 /* VCVT fixed-point. */
cc13115b
PM
5321 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5322 return 1;
5323 }
f73534a5
PM
5324 /* We have already masked out the must-be-1 top bit of imm6,
5325 * hence this 32-shift where the ARM ARM has 64-imm6.
5326 */
5327 shift = 32 - shift;
9ee6e8bb 5328 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5329 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5330 if (!(op & 1)) {
9ee6e8bb 5331 if (u)
5500b06c 5332 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5333 else
5500b06c 5334 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5335 } else {
5336 if (u)
5500b06c 5337 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5338 else
5500b06c 5339 gen_vfp_tosl(0, shift, 1);
2c0262af 5340 }
4373f3ce 5341 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5342 }
5343 } else {
9ee6e8bb
PB
5344 return 1;
5345 }
5346 } else { /* (insn & 0x00380080) == 0 */
5347 int invert;
7d80fee5
PM
5348 if (q && (rd & 1)) {
5349 return 1;
5350 }
9ee6e8bb
PB
5351
5352 op = (insn >> 8) & 0xf;
5353 /* One register and immediate. */
5354 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5355 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5356 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5357 * We choose to not special-case this and will behave as if a
5358 * valid constant encoding of 0 had been given.
5359 */
9ee6e8bb
PB
5360 switch (op) {
5361 case 0: case 1:
5362 /* no-op */
5363 break;
5364 case 2: case 3:
5365 imm <<= 8;
5366 break;
5367 case 4: case 5:
5368 imm <<= 16;
5369 break;
5370 case 6: case 7:
5371 imm <<= 24;
5372 break;
5373 case 8: case 9:
5374 imm |= imm << 16;
5375 break;
5376 case 10: case 11:
5377 imm = (imm << 8) | (imm << 24);
5378 break;
5379 case 12:
8e31209e 5380 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5381 break;
5382 case 13:
5383 imm = (imm << 16) | 0xffff;
5384 break;
5385 case 14:
5386 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5387 if (invert)
5388 imm = ~imm;
5389 break;
5390 case 15:
7d80fee5
PM
5391 if (invert) {
5392 return 1;
5393 }
9ee6e8bb
PB
5394 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5395 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5396 break;
5397 }
5398 if (invert)
5399 imm = ~imm;
5400
9ee6e8bb
PB
5401 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5402 if (op & 1 && op < 12) {
ad69471c 5403 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5404 if (invert) {
5405 /* The immediate value has already been inverted, so
5406 BIC becomes AND. */
ad69471c 5407 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5408 } else {
ad69471c 5409 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5410 }
9ee6e8bb 5411 } else {
ad69471c 5412 /* VMOV, VMVN. */
7d1b0095 5413 tmp = tcg_temp_new_i32();
9ee6e8bb 5414 if (op == 14 && invert) {
a5a14945 5415 int n;
ad69471c
PB
5416 uint32_t val;
5417 val = 0;
9ee6e8bb
PB
5418 for (n = 0; n < 4; n++) {
5419 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5420 val |= 0xff << (n * 8);
9ee6e8bb 5421 }
ad69471c
PB
5422 tcg_gen_movi_i32(tmp, val);
5423 } else {
5424 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5425 }
9ee6e8bb 5426 }
ad69471c 5427 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5428 }
5429 }
e4b3861d 5430 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5431 if (size != 3) {
5432 op = (insn >> 8) & 0xf;
5433 if ((insn & (1 << 6)) == 0) {
5434 /* Three registers of different lengths. */
5435 int src1_wide;
5436 int src2_wide;
5437 int prewiden;
695272dc
PM
5438 /* undefreq: bit 0 : UNDEF if size != 0
5439 * bit 1 : UNDEF if size == 0
5440 * bit 2 : UNDEF if U == 1
5441 * Note that [1:0] set implies 'always UNDEF'
5442 */
5443 int undefreq;
5444 /* prewiden, src1_wide, src2_wide, undefreq */
5445 static const int neon_3reg_wide[16][4] = {
5446 {1, 0, 0, 0}, /* VADDL */
5447 {1, 1, 0, 0}, /* VADDW */
5448 {1, 0, 0, 0}, /* VSUBL */
5449 {1, 1, 0, 0}, /* VSUBW */
5450 {0, 1, 1, 0}, /* VADDHN */
5451 {0, 0, 0, 0}, /* VABAL */
5452 {0, 1, 1, 0}, /* VSUBHN */
5453 {0, 0, 0, 0}, /* VABDL */
5454 {0, 0, 0, 0}, /* VMLAL */
5455 {0, 0, 0, 6}, /* VQDMLAL */
5456 {0, 0, 0, 0}, /* VMLSL */
5457 {0, 0, 0, 6}, /* VQDMLSL */
5458 {0, 0, 0, 0}, /* Integer VMULL */
5459 {0, 0, 0, 2}, /* VQDMULL */
5460 {0, 0, 0, 5}, /* Polynomial VMULL */
5461 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5462 };
5463
5464 prewiden = neon_3reg_wide[op][0];
5465 src1_wide = neon_3reg_wide[op][1];
5466 src2_wide = neon_3reg_wide[op][2];
695272dc 5467 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5468
695272dc
PM
5469 if (((undefreq & 1) && (size != 0)) ||
5470 ((undefreq & 2) && (size == 0)) ||
5471 ((undefreq & 4) && u)) {
5472 return 1;
5473 }
5474 if ((src1_wide && (rn & 1)) ||
5475 (src2_wide && (rm & 1)) ||
5476 (!src2_wide && (rd & 1))) {
ad69471c 5477 return 1;
695272dc 5478 }
ad69471c 5479
9ee6e8bb
PB
5480 /* Avoid overlapping operands. Wide source operands are
5481 always aligned so will never overlap with wide
5482 destinations in problematic ways. */
8f8e3aa4 5483 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5484 tmp = neon_load_reg(rm, 1);
5485 neon_store_scratch(2, tmp);
8f8e3aa4 5486 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5487 tmp = neon_load_reg(rn, 1);
5488 neon_store_scratch(2, tmp);
9ee6e8bb 5489 }
39d5492a 5490 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 5491 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5492 if (src1_wide) {
5493 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 5494 TCGV_UNUSED_I32(tmp);
9ee6e8bb 5495 } else {
ad69471c 5496 if (pass == 1 && rd == rn) {
dd8fbd78 5497 tmp = neon_load_scratch(2);
9ee6e8bb 5498 } else {
ad69471c
PB
5499 tmp = neon_load_reg(rn, pass);
5500 }
5501 if (prewiden) {
5502 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5503 }
5504 }
ad69471c
PB
5505 if (src2_wide) {
5506 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 5507 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 5508 } else {
ad69471c 5509 if (pass == 1 && rd == rm) {
dd8fbd78 5510 tmp2 = neon_load_scratch(2);
9ee6e8bb 5511 } else {
ad69471c
PB
5512 tmp2 = neon_load_reg(rm, pass);
5513 }
5514 if (prewiden) {
5515 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5516 }
9ee6e8bb
PB
5517 }
5518 switch (op) {
5519 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5520 gen_neon_addl(size);
9ee6e8bb 5521 break;
79b0e534 5522 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5523 gen_neon_subl(size);
9ee6e8bb
PB
5524 break;
5525 case 5: case 7: /* VABAL, VABDL */
5526 switch ((size << 1) | u) {
ad69471c
PB
5527 case 0:
5528 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5529 break;
5530 case 1:
5531 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5532 break;
5533 case 2:
5534 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5535 break;
5536 case 3:
5537 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5538 break;
5539 case 4:
5540 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5541 break;
5542 case 5:
5543 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5544 break;
9ee6e8bb
PB
5545 default: abort();
5546 }
7d1b0095
PM
5547 tcg_temp_free_i32(tmp2);
5548 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5549 break;
5550 case 8: case 9: case 10: case 11: case 12: case 13:
5551 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5552 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5553 break;
5554 case 14: /* Polynomial VMULL */
e5ca24cb 5555 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5556 tcg_temp_free_i32(tmp2);
5557 tcg_temp_free_i32(tmp);
e5ca24cb 5558 break;
695272dc
PM
5559 default: /* 15 is RESERVED: caught earlier */
5560 abort();
9ee6e8bb 5561 }
ebcd88ce
PM
5562 if (op == 13) {
5563 /* VQDMULL */
5564 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5565 neon_store_reg64(cpu_V0, rd + pass);
5566 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5567 /* Accumulate. */
ebcd88ce 5568 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5569 switch (op) {
4dc064e6
PM
5570 case 10: /* VMLSL */
5571 gen_neon_negl(cpu_V0, size);
5572 /* Fall through */
5573 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5574 gen_neon_addl(size);
9ee6e8bb
PB
5575 break;
5576 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5577 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5578 if (op == 11) {
5579 gen_neon_negl(cpu_V0, size);
5580 }
ad69471c
PB
5581 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5582 break;
9ee6e8bb
PB
5583 default:
5584 abort();
5585 }
ad69471c 5586 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5587 } else if (op == 4 || op == 6) {
5588 /* Narrowing operation. */
7d1b0095 5589 tmp = tcg_temp_new_i32();
79b0e534 5590 if (!u) {
9ee6e8bb 5591 switch (size) {
ad69471c
PB
5592 case 0:
5593 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5594 break;
5595 case 1:
5596 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5597 break;
5598 case 2:
5599 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5600 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5601 break;
9ee6e8bb
PB
5602 default: abort();
5603 }
5604 } else {
5605 switch (size) {
ad69471c
PB
5606 case 0:
5607 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5608 break;
5609 case 1:
5610 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5611 break;
5612 case 2:
5613 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5614 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5615 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5616 break;
9ee6e8bb
PB
5617 default: abort();
5618 }
5619 }
ad69471c
PB
5620 if (pass == 0) {
5621 tmp3 = tmp;
5622 } else {
5623 neon_store_reg(rd, 0, tmp3);
5624 neon_store_reg(rd, 1, tmp);
5625 }
9ee6e8bb
PB
5626 } else {
5627 /* Write back the result. */
ad69471c 5628 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5629 }
5630 }
5631 } else {
3e3326df
PM
5632 /* Two registers and a scalar. NB that for ops of this form
5633 * the ARM ARM labels bit 24 as Q, but it is in our variable
5634 * 'u', not 'q'.
5635 */
5636 if (size == 0) {
5637 return 1;
5638 }
9ee6e8bb 5639 switch (op) {
9ee6e8bb 5640 case 1: /* Float VMLA scalar */
9ee6e8bb 5641 case 5: /* Floating point VMLS scalar */
9ee6e8bb 5642 case 9: /* Floating point VMUL scalar */
3e3326df
PM
5643 if (size == 1) {
5644 return 1;
5645 }
5646 /* fall through */
5647 case 0: /* Integer VMLA scalar */
5648 case 4: /* Integer VMLS scalar */
5649 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
5650 case 12: /* VQDMULH scalar */
5651 case 13: /* VQRDMULH scalar */
3e3326df
PM
5652 if (u && ((rd | rn) & 1)) {
5653 return 1;
5654 }
dd8fbd78
FN
5655 tmp = neon_get_scalar(size, rm);
5656 neon_store_scratch(0, tmp);
9ee6e8bb 5657 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5658 tmp = neon_load_scratch(0);
5659 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5660 if (op == 12) {
5661 if (size == 1) {
02da0b2d 5662 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5663 } else {
02da0b2d 5664 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5665 }
5666 } else if (op == 13) {
5667 if (size == 1) {
02da0b2d 5668 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5669 } else {
02da0b2d 5670 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5671 }
5672 } else if (op & 1) {
aa47cfdd
PM
5673 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5674 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5675 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
5676 } else {
5677 switch (size) {
dd8fbd78
FN
5678 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5679 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5680 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 5681 default: abort();
9ee6e8bb
PB
5682 }
5683 }
7d1b0095 5684 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5685 if (op < 8) {
5686 /* Accumulate. */
dd8fbd78 5687 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5688 switch (op) {
5689 case 0:
dd8fbd78 5690 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5691 break;
5692 case 1:
aa47cfdd
PM
5693 {
5694 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5695 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5696 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5697 break;
aa47cfdd 5698 }
9ee6e8bb 5699 case 4:
dd8fbd78 5700 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5701 break;
5702 case 5:
aa47cfdd
PM
5703 {
5704 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5705 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5706 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5707 break;
aa47cfdd 5708 }
9ee6e8bb
PB
5709 default:
5710 abort();
5711 }
7d1b0095 5712 tcg_temp_free_i32(tmp2);
9ee6e8bb 5713 }
dd8fbd78 5714 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5715 }
5716 break;
9ee6e8bb 5717 case 3: /* VQDMLAL scalar */
9ee6e8bb 5718 case 7: /* VQDMLSL scalar */
9ee6e8bb 5719 case 11: /* VQDMULL scalar */
3e3326df 5720 if (u == 1) {
ad69471c 5721 return 1;
3e3326df
PM
5722 }
5723 /* fall through */
5724 case 2: /* VMLAL sclar */
5725 case 6: /* VMLSL scalar */
5726 case 10: /* VMULL scalar */
5727 if (rd & 1) {
5728 return 1;
5729 }
dd8fbd78 5730 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5731 /* We need a copy of tmp2 because gen_neon_mull
5732 * deletes it during pass 0. */
7d1b0095 5733 tmp4 = tcg_temp_new_i32();
c6067f04 5734 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5735 tmp3 = neon_load_reg(rn, 1);
ad69471c 5736
9ee6e8bb 5737 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5738 if (pass == 0) {
5739 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5740 } else {
dd8fbd78 5741 tmp = tmp3;
c6067f04 5742 tmp2 = tmp4;
9ee6e8bb 5743 }
ad69471c 5744 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5745 if (op != 11) {
5746 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5747 }
9ee6e8bb 5748 switch (op) {
4dc064e6
PM
5749 case 6:
5750 gen_neon_negl(cpu_V0, size);
5751 /* Fall through */
5752 case 2:
ad69471c 5753 gen_neon_addl(size);
9ee6e8bb
PB
5754 break;
5755 case 3: case 7:
ad69471c 5756 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5757 if (op == 7) {
5758 gen_neon_negl(cpu_V0, size);
5759 }
ad69471c 5760 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5761 break;
5762 case 10:
5763 /* no-op */
5764 break;
5765 case 11:
ad69471c 5766 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5767 break;
5768 default:
5769 abort();
5770 }
ad69471c 5771 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5772 }
dd8fbd78 5773
dd8fbd78 5774
9ee6e8bb
PB
5775 break;
5776 default: /* 14 and 15 are RESERVED */
5777 return 1;
5778 }
5779 }
5780 } else { /* size == 3 */
5781 if (!u) {
5782 /* Extract. */
9ee6e8bb 5783 imm = (insn >> 8) & 0xf;
ad69471c
PB
5784
5785 if (imm > 7 && !q)
5786 return 1;
5787
52579ea1
PM
5788 if (q && ((rd | rn | rm) & 1)) {
5789 return 1;
5790 }
5791
ad69471c
PB
5792 if (imm == 0) {
5793 neon_load_reg64(cpu_V0, rn);
5794 if (q) {
5795 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5796 }
ad69471c
PB
5797 } else if (imm == 8) {
5798 neon_load_reg64(cpu_V0, rn + 1);
5799 if (q) {
5800 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5801 }
ad69471c 5802 } else if (q) {
a7812ae4 5803 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5804 if (imm < 8) {
5805 neon_load_reg64(cpu_V0, rn);
a7812ae4 5806 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5807 } else {
5808 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5809 neon_load_reg64(tmp64, rm);
ad69471c
PB
5810 }
5811 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5812 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5813 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5814 if (imm < 8) {
5815 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5816 } else {
ad69471c
PB
5817 neon_load_reg64(cpu_V1, rm + 1);
5818 imm -= 8;
9ee6e8bb 5819 }
ad69471c 5820 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5821 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5822 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5823 tcg_temp_free_i64(tmp64);
ad69471c 5824 } else {
a7812ae4 5825 /* BUGFIX */
ad69471c 5826 neon_load_reg64(cpu_V0, rn);
a7812ae4 5827 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5828 neon_load_reg64(cpu_V1, rm);
a7812ae4 5829 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5830 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5831 }
5832 neon_store_reg64(cpu_V0, rd);
5833 if (q) {
5834 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5835 }
5836 } else if ((insn & (1 << 11)) == 0) {
5837 /* Two register misc. */
5838 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5839 size = (insn >> 18) & 3;
600b828c
PM
5840 /* UNDEF for unknown op values and bad op-size combinations */
5841 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5842 return 1;
5843 }
fc2a9b37
PM
5844 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5845 q && ((rm | rd) & 1)) {
5846 return 1;
5847 }
9ee6e8bb 5848 switch (op) {
600b828c 5849 case NEON_2RM_VREV64:
9ee6e8bb 5850 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5851 tmp = neon_load_reg(rm, pass * 2);
5852 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5853 switch (size) {
dd8fbd78
FN
5854 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5855 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5856 case 2: /* no-op */ break;
5857 default: abort();
5858 }
dd8fbd78 5859 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5860 if (size == 2) {
dd8fbd78 5861 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5862 } else {
9ee6e8bb 5863 switch (size) {
dd8fbd78
FN
5864 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5865 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5866 default: abort();
5867 }
dd8fbd78 5868 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5869 }
5870 }
5871 break;
600b828c
PM
5872 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5873 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
5874 for (pass = 0; pass < q + 1; pass++) {
5875 tmp = neon_load_reg(rm, pass * 2);
5876 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5877 tmp = neon_load_reg(rm, pass * 2 + 1);
5878 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5879 switch (size) {
5880 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5881 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5882 case 2: tcg_gen_add_i64(CPU_V001); break;
5883 default: abort();
5884 }
600b828c 5885 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 5886 /* Accumulate. */
ad69471c
PB
5887 neon_load_reg64(cpu_V1, rd + pass);
5888 gen_neon_addl(size);
9ee6e8bb 5889 }
ad69471c 5890 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5891 }
5892 break;
600b828c 5893 case NEON_2RM_VTRN:
9ee6e8bb 5894 if (size == 2) {
a5a14945 5895 int n;
9ee6e8bb 5896 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5897 tmp = neon_load_reg(rm, n);
5898 tmp2 = neon_load_reg(rd, n + 1);
5899 neon_store_reg(rm, n, tmp2);
5900 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5901 }
5902 } else {
5903 goto elementwise;
5904 }
5905 break;
600b828c 5906 case NEON_2RM_VUZP:
02acedf9 5907 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 5908 return 1;
9ee6e8bb
PB
5909 }
5910 break;
600b828c 5911 case NEON_2RM_VZIP:
d68a6f3a 5912 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 5913 return 1;
9ee6e8bb
PB
5914 }
5915 break;
600b828c
PM
5916 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5917 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
5918 if (rm & 1) {
5919 return 1;
5920 }
39d5492a 5921 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 5922 for (pass = 0; pass < 2; pass++) {
ad69471c 5923 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 5924 tmp = tcg_temp_new_i32();
600b828c
PM
5925 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5926 tmp, cpu_V0);
ad69471c
PB
5927 if (pass == 0) {
5928 tmp2 = tmp;
5929 } else {
5930 neon_store_reg(rd, 0, tmp2);
5931 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5932 }
9ee6e8bb
PB
5933 }
5934 break;
600b828c 5935 case NEON_2RM_VSHLL:
fc2a9b37 5936 if (q || (rd & 1)) {
9ee6e8bb 5937 return 1;
600b828c 5938 }
ad69471c
PB
5939 tmp = neon_load_reg(rm, 0);
5940 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5941 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5942 if (pass == 1)
5943 tmp = tmp2;
5944 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5945 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5946 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5947 }
5948 break;
600b828c 5949 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
5950 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5951 q || (rm & 1)) {
5952 return 1;
5953 }
7d1b0095
PM
5954 tmp = tcg_temp_new_i32();
5955 tmp2 = tcg_temp_new_i32();
60011498 5956 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 5957 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 5958 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 5959 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5960 tcg_gen_shli_i32(tmp2, tmp2, 16);
5961 tcg_gen_or_i32(tmp2, tmp2, tmp);
5962 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 5963 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
5964 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5965 neon_store_reg(rd, 0, tmp2);
7d1b0095 5966 tmp2 = tcg_temp_new_i32();
2d981da7 5967 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5968 tcg_gen_shli_i32(tmp2, tmp2, 16);
5969 tcg_gen_or_i32(tmp2, tmp2, tmp);
5970 neon_store_reg(rd, 1, tmp2);
7d1b0095 5971 tcg_temp_free_i32(tmp);
60011498 5972 break;
600b828c 5973 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
5974 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5975 q || (rd & 1)) {
5976 return 1;
5977 }
7d1b0095 5978 tmp3 = tcg_temp_new_i32();
60011498
PB
5979 tmp = neon_load_reg(rm, 0);
5980 tmp2 = neon_load_reg(rm, 1);
5981 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 5982 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5983 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5984 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 5985 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5986 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 5987 tcg_temp_free_i32(tmp);
60011498 5988 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 5989 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5990 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5991 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 5992 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5993 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
5994 tcg_temp_free_i32(tmp2);
5995 tcg_temp_free_i32(tmp3);
60011498 5996 break;
9ee6e8bb
PB
5997 default:
5998 elementwise:
5999 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 6000 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6001 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6002 neon_reg_offset(rm, pass));
39d5492a 6003 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6004 } else {
dd8fbd78 6005 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
6006 }
6007 switch (op) {
600b828c 6008 case NEON_2RM_VREV32:
9ee6e8bb 6009 switch (size) {
dd8fbd78
FN
6010 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6011 case 1: gen_swap_half(tmp); break;
600b828c 6012 default: abort();
9ee6e8bb
PB
6013 }
6014 break;
600b828c 6015 case NEON_2RM_VREV16:
dd8fbd78 6016 gen_rev16(tmp);
9ee6e8bb 6017 break;
600b828c 6018 case NEON_2RM_VCLS:
9ee6e8bb 6019 switch (size) {
dd8fbd78
FN
6020 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6021 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6022 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6023 default: abort();
9ee6e8bb
PB
6024 }
6025 break;
600b828c 6026 case NEON_2RM_VCLZ:
9ee6e8bb 6027 switch (size) {
dd8fbd78
FN
6028 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6029 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6030 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 6031 default: abort();
9ee6e8bb
PB
6032 }
6033 break;
600b828c 6034 case NEON_2RM_VCNT:
dd8fbd78 6035 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6036 break;
600b828c 6037 case NEON_2RM_VMVN:
dd8fbd78 6038 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 6039 break;
600b828c 6040 case NEON_2RM_VQABS:
9ee6e8bb 6041 switch (size) {
02da0b2d
PM
6042 case 0:
6043 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6044 break;
6045 case 1:
6046 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6047 break;
6048 case 2:
6049 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6050 break;
600b828c 6051 default: abort();
9ee6e8bb
PB
6052 }
6053 break;
600b828c 6054 case NEON_2RM_VQNEG:
9ee6e8bb 6055 switch (size) {
02da0b2d
PM
6056 case 0:
6057 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6058 break;
6059 case 1:
6060 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6061 break;
6062 case 2:
6063 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6064 break;
600b828c 6065 default: abort();
9ee6e8bb
PB
6066 }
6067 break;
600b828c 6068 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6069 tmp2 = tcg_const_i32(0);
9ee6e8bb 6070 switch(size) {
dd8fbd78
FN
6071 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6072 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6073 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6074 default: abort();
9ee6e8bb 6075 }
39d5492a 6076 tcg_temp_free_i32(tmp2);
600b828c 6077 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6078 tcg_gen_not_i32(tmp, tmp);
600b828c 6079 }
9ee6e8bb 6080 break;
600b828c 6081 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6082 tmp2 = tcg_const_i32(0);
9ee6e8bb 6083 switch(size) {
dd8fbd78
FN
6084 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6085 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6086 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6087 default: abort();
9ee6e8bb 6088 }
39d5492a 6089 tcg_temp_free_i32(tmp2);
600b828c 6090 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6091 tcg_gen_not_i32(tmp, tmp);
600b828c 6092 }
9ee6e8bb 6093 break;
600b828c 6094 case NEON_2RM_VCEQ0:
dd8fbd78 6095 tmp2 = tcg_const_i32(0);
9ee6e8bb 6096 switch(size) {
dd8fbd78
FN
6097 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6098 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6099 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6100 default: abort();
9ee6e8bb 6101 }
39d5492a 6102 tcg_temp_free_i32(tmp2);
9ee6e8bb 6103 break;
600b828c 6104 case NEON_2RM_VABS:
9ee6e8bb 6105 switch(size) {
dd8fbd78
FN
6106 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6107 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6108 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6109 default: abort();
9ee6e8bb
PB
6110 }
6111 break;
600b828c 6112 case NEON_2RM_VNEG:
dd8fbd78
FN
6113 tmp2 = tcg_const_i32(0);
6114 gen_neon_rsb(size, tmp, tmp2);
39d5492a 6115 tcg_temp_free_i32(tmp2);
9ee6e8bb 6116 break;
600b828c 6117 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6118 {
6119 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6120 tmp2 = tcg_const_i32(0);
aa47cfdd 6121 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6122 tcg_temp_free_i32(tmp2);
aa47cfdd 6123 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6124 break;
aa47cfdd 6125 }
600b828c 6126 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6127 {
6128 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6129 tmp2 = tcg_const_i32(0);
aa47cfdd 6130 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6131 tcg_temp_free_i32(tmp2);
aa47cfdd 6132 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6133 break;
aa47cfdd 6134 }
600b828c 6135 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6136 {
6137 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6138 tmp2 = tcg_const_i32(0);
aa47cfdd 6139 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6140 tcg_temp_free_i32(tmp2);
aa47cfdd 6141 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6142 break;
aa47cfdd 6143 }
600b828c 6144 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6145 {
6146 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6147 tmp2 = tcg_const_i32(0);
aa47cfdd 6148 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6149 tcg_temp_free_i32(tmp2);
aa47cfdd 6150 tcg_temp_free_ptr(fpstatus);
0e326109 6151 break;
aa47cfdd 6152 }
600b828c 6153 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6154 {
6155 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6156 tmp2 = tcg_const_i32(0);
aa47cfdd 6157 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6158 tcg_temp_free_i32(tmp2);
aa47cfdd 6159 tcg_temp_free_ptr(fpstatus);
0e326109 6160 break;
aa47cfdd 6161 }
600b828c 6162 case NEON_2RM_VABS_F:
4373f3ce 6163 gen_vfp_abs(0);
9ee6e8bb 6164 break;
600b828c 6165 case NEON_2RM_VNEG_F:
4373f3ce 6166 gen_vfp_neg(0);
9ee6e8bb 6167 break;
600b828c 6168 case NEON_2RM_VSWP:
dd8fbd78
FN
6169 tmp2 = neon_load_reg(rd, pass);
6170 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6171 break;
600b828c 6172 case NEON_2RM_VTRN:
dd8fbd78 6173 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6174 switch (size) {
dd8fbd78
FN
6175 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6176 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6177 default: abort();
9ee6e8bb 6178 }
dd8fbd78 6179 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6180 break;
600b828c 6181 case NEON_2RM_VRECPE:
dd8fbd78 6182 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb 6183 break;
600b828c 6184 case NEON_2RM_VRSQRTE:
dd8fbd78 6185 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb 6186 break;
600b828c 6187 case NEON_2RM_VRECPE_F:
4373f3ce 6188 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6189 break;
600b828c 6190 case NEON_2RM_VRSQRTE_F:
4373f3ce 6191 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6192 break;
600b828c 6193 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6194 gen_vfp_sito(0, 1);
9ee6e8bb 6195 break;
600b828c 6196 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6197 gen_vfp_uito(0, 1);
9ee6e8bb 6198 break;
600b828c 6199 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6200 gen_vfp_tosiz(0, 1);
9ee6e8bb 6201 break;
600b828c 6202 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6203 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6204 break;
6205 default:
600b828c
PM
6206 /* Reserved op values were caught by the
6207 * neon_2rm_sizes[] check earlier.
6208 */
6209 abort();
9ee6e8bb 6210 }
600b828c 6211 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6212 tcg_gen_st_f32(cpu_F0s, cpu_env,
6213 neon_reg_offset(rd, pass));
9ee6e8bb 6214 } else {
dd8fbd78 6215 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6216 }
6217 }
6218 break;
6219 }
6220 } else if ((insn & (1 << 10)) == 0) {
6221 /* VTBL, VTBX. */
56907d77
PM
6222 int n = ((insn >> 8) & 3) + 1;
6223 if ((rn + n) > 32) {
6224 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6225 * helper function running off the end of the register file.
6226 */
6227 return 1;
6228 }
6229 n <<= 3;
9ee6e8bb 6230 if (insn & (1 << 6)) {
8f8e3aa4 6231 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6232 } else {
7d1b0095 6233 tmp = tcg_temp_new_i32();
8f8e3aa4 6234 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6235 }
8f8e3aa4 6236 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6237 tmp4 = tcg_const_i32(rn);
6238 tmp5 = tcg_const_i32(n);
9ef39277 6239 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 6240 tcg_temp_free_i32(tmp);
9ee6e8bb 6241 if (insn & (1 << 6)) {
8f8e3aa4 6242 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6243 } else {
7d1b0095 6244 tmp = tcg_temp_new_i32();
8f8e3aa4 6245 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6246 }
8f8e3aa4 6247 tmp3 = neon_load_reg(rm, 1);
9ef39277 6248 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6249 tcg_temp_free_i32(tmp5);
6250 tcg_temp_free_i32(tmp4);
8f8e3aa4 6251 neon_store_reg(rd, 0, tmp2);
3018f259 6252 neon_store_reg(rd, 1, tmp3);
7d1b0095 6253 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6254 } else if ((insn & 0x380) == 0) {
6255 /* VDUP */
133da6aa
JR
6256 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6257 return 1;
6258 }
9ee6e8bb 6259 if (insn & (1 << 19)) {
dd8fbd78 6260 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6261 } else {
dd8fbd78 6262 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6263 }
6264 if (insn & (1 << 16)) {
dd8fbd78 6265 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6266 } else if (insn & (1 << 17)) {
6267 if ((insn >> 18) & 1)
dd8fbd78 6268 gen_neon_dup_high16(tmp);
9ee6e8bb 6269 else
dd8fbd78 6270 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6271 }
6272 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6273 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6274 tcg_gen_mov_i32(tmp2, tmp);
6275 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6276 }
7d1b0095 6277 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6278 } else {
6279 return 1;
6280 }
6281 }
6282 }
6283 return 0;
6284}
6285
0ecb72a5 6286static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb 6287{
4b6a83fb
PM
6288 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6289 const ARMCPRegInfo *ri;
6290 ARMCPU *cpu = arm_env_get_cpu(env);
9ee6e8bb
PB
6291
6292 cpnum = (insn >> 8) & 0xf;
6293 if (arm_feature(env, ARM_FEATURE_XSCALE)
6294 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6295 return 1;
6296
4b6a83fb 6297 /* First check for coprocessor space used for actual instructions */
9ee6e8bb
PB
6298 switch (cpnum) {
6299 case 0:
6300 case 1:
6301 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6302 return disas_iwmmxt_insn(env, s, insn);
6303 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6304 return disas_dsp_insn(env, s, insn);
6305 }
6306 return 1;
4b6a83fb
PM
6307 default:
6308 break;
6309 }
6310
6311 /* Otherwise treat as a generic register access */
6312 is64 = (insn & (1 << 25)) == 0;
6313 if (!is64 && ((insn & (1 << 4)) == 0)) {
6314 /* cdp */
6315 return 1;
6316 }
6317
6318 crm = insn & 0xf;
6319 if (is64) {
6320 crn = 0;
6321 opc1 = (insn >> 4) & 0xf;
6322 opc2 = 0;
6323 rt2 = (insn >> 16) & 0xf;
6324 } else {
6325 crn = (insn >> 16) & 0xf;
6326 opc1 = (insn >> 21) & 7;
6327 opc2 = (insn >> 5) & 7;
6328 rt2 = 0;
6329 }
6330 isread = (insn >> 20) & 1;
6331 rt = (insn >> 12) & 0xf;
6332
6333 ri = get_arm_cp_reginfo(cpu,
6334 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
6335 if (ri) {
6336 /* Check access permissions */
6337 if (!cp_access_ok(env, ri, isread)) {
6338 return 1;
6339 }
6340
6341 /* Handle special cases first */
6342 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
6343 case ARM_CP_NOP:
6344 return 0;
6345 case ARM_CP_WFI:
6346 if (isread) {
6347 return 1;
6348 }
eaed129d 6349 gen_set_pc_im(s, s->pc);
4b6a83fb 6350 s->is_jmp = DISAS_WFI;
2bee5105 6351 return 0;
4b6a83fb
PM
6352 default:
6353 break;
6354 }
6355
2452731c
PM
6356 if (use_icount && (ri->type & ARM_CP_IO)) {
6357 gen_io_start();
6358 }
6359
4b6a83fb
PM
6360 if (isread) {
6361 /* Read */
6362 if (is64) {
6363 TCGv_i64 tmp64;
6364 TCGv_i32 tmp;
6365 if (ri->type & ARM_CP_CONST) {
6366 tmp64 = tcg_const_i64(ri->resetvalue);
6367 } else if (ri->readfn) {
6368 TCGv_ptr tmpptr;
eaed129d 6369 gen_set_pc_im(s, s->pc);
4b6a83fb
PM
6370 tmp64 = tcg_temp_new_i64();
6371 tmpptr = tcg_const_ptr(ri);
6372 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
6373 tcg_temp_free_ptr(tmpptr);
6374 } else {
6375 tmp64 = tcg_temp_new_i64();
6376 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
6377 }
6378 tmp = tcg_temp_new_i32();
6379 tcg_gen_trunc_i64_i32(tmp, tmp64);
6380 store_reg(s, rt, tmp);
6381 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 6382 tmp = tcg_temp_new_i32();
4b6a83fb 6383 tcg_gen_trunc_i64_i32(tmp, tmp64);
ed336850 6384 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
6385 store_reg(s, rt2, tmp);
6386 } else {
39d5492a 6387 TCGv_i32 tmp;
4b6a83fb
PM
6388 if (ri->type & ARM_CP_CONST) {
6389 tmp = tcg_const_i32(ri->resetvalue);
6390 } else if (ri->readfn) {
6391 TCGv_ptr tmpptr;
eaed129d 6392 gen_set_pc_im(s, s->pc);
4b6a83fb
PM
6393 tmp = tcg_temp_new_i32();
6394 tmpptr = tcg_const_ptr(ri);
6395 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
6396 tcg_temp_free_ptr(tmpptr);
6397 } else {
6398 tmp = load_cpu_offset(ri->fieldoffset);
6399 }
6400 if (rt == 15) {
6401 /* Destination register of r15 for 32 bit loads sets
6402 * the condition codes from the high 4 bits of the value
6403 */
6404 gen_set_nzcv(tmp);
6405 tcg_temp_free_i32(tmp);
6406 } else {
6407 store_reg(s, rt, tmp);
6408 }
6409 }
6410 } else {
6411 /* Write */
6412 if (ri->type & ARM_CP_CONST) {
6413 /* If not forbidden by access permissions, treat as WI */
6414 return 0;
6415 }
6416
6417 if (is64) {
39d5492a 6418 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
6419 TCGv_i64 tmp64 = tcg_temp_new_i64();
6420 tmplo = load_reg(s, rt);
6421 tmphi = load_reg(s, rt2);
6422 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
6423 tcg_temp_free_i32(tmplo);
6424 tcg_temp_free_i32(tmphi);
6425 if (ri->writefn) {
6426 TCGv_ptr tmpptr = tcg_const_ptr(ri);
eaed129d 6427 gen_set_pc_im(s, s->pc);
4b6a83fb
PM
6428 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
6429 tcg_temp_free_ptr(tmpptr);
6430 } else {
6431 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
6432 }
6433 tcg_temp_free_i64(tmp64);
6434 } else {
6435 if (ri->writefn) {
39d5492a 6436 TCGv_i32 tmp;
4b6a83fb 6437 TCGv_ptr tmpptr;
eaed129d 6438 gen_set_pc_im(s, s->pc);
4b6a83fb
PM
6439 tmp = load_reg(s, rt);
6440 tmpptr = tcg_const_ptr(ri);
6441 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
6442 tcg_temp_free_ptr(tmpptr);
6443 tcg_temp_free_i32(tmp);
6444 } else {
39d5492a 6445 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
6446 store_cpu_offset(tmp, ri->fieldoffset);
6447 }
6448 }
2452731c
PM
6449 }
6450
6451 if (use_icount && (ri->type & ARM_CP_IO)) {
6452 /* I/O operations must end the TB here (whether read or write) */
6453 gen_io_end();
6454 gen_lookup_tb(s);
6455 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
6456 /* We default to ending the TB on a coprocessor register write,
6457 * but allow this to be suppressed by the register definition
6458 * (usually only necessary to work around guest bugs).
6459 */
2452731c 6460 gen_lookup_tb(s);
4b6a83fb 6461 }
2452731c 6462
4b6a83fb
PM
6463 return 0;
6464 }
6465
4a9a539f 6466 return 1;
9ee6e8bb
PB
6467}
6468
5e3f878a
PB
6469
6470/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 6471static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 6472{
39d5492a 6473 TCGv_i32 tmp;
7d1b0095 6474 tmp = tcg_temp_new_i32();
5e3f878a
PB
6475 tcg_gen_trunc_i64_i32(tmp, val);
6476 store_reg(s, rlow, tmp);
7d1b0095 6477 tmp = tcg_temp_new_i32();
5e3f878a
PB
6478 tcg_gen_shri_i64(val, val, 32);
6479 tcg_gen_trunc_i64_i32(tmp, val);
6480 store_reg(s, rhigh, tmp);
6481}
6482
6483/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 6484static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 6485{
a7812ae4 6486 TCGv_i64 tmp;
39d5492a 6487 TCGv_i32 tmp2;
5e3f878a 6488
36aa55dc 6489 /* Load value and extend to 64 bits. */
a7812ae4 6490 tmp = tcg_temp_new_i64();
5e3f878a
PB
6491 tmp2 = load_reg(s, rlow);
6492 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 6493 tcg_temp_free_i32(tmp2);
5e3f878a 6494 tcg_gen_add_i64(val, val, tmp);
b75263d6 6495 tcg_temp_free_i64(tmp);
5e3f878a
PB
6496}
6497
6498/* load and add a 64-bit value from a register pair. */
a7812ae4 6499static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 6500{
a7812ae4 6501 TCGv_i64 tmp;
39d5492a
PM
6502 TCGv_i32 tmpl;
6503 TCGv_i32 tmph;
5e3f878a
PB
6504
6505 /* Load 64-bit value rd:rn. */
36aa55dc
PB
6506 tmpl = load_reg(s, rlow);
6507 tmph = load_reg(s, rhigh);
a7812ae4 6508 tmp = tcg_temp_new_i64();
36aa55dc 6509 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
6510 tcg_temp_free_i32(tmpl);
6511 tcg_temp_free_i32(tmph);
5e3f878a 6512 tcg_gen_add_i64(val, val, tmp);
b75263d6 6513 tcg_temp_free_i64(tmp);
5e3f878a
PB
6514}
6515
c9f10124 6516/* Set N and Z flags from hi|lo. */
39d5492a 6517static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 6518{
c9f10124
RH
6519 tcg_gen_mov_i32(cpu_NF, hi);
6520 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
6521}
6522
426f5abc
PB
6523/* Load/Store exclusive instructions are implemented by remembering
6524 the value/address loaded, and seeing if these are the same
b90372ad 6525 when the store is performed. This should be sufficient to implement
426f5abc
PB
6526 the architecturally mandated semantics, and avoids having to monitor
6527 regular stores.
6528
6529 In system emulation mode only one CPU will be running at once, so
6530 this sequence is effectively atomic. In user emulation mode we
6531 throw an exception and handle the atomic operation elsewhere. */
6532static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 6533 TCGv_i32 addr, int size)
426f5abc 6534{
94ee24e7 6535 TCGv_i32 tmp = tcg_temp_new_i32();
426f5abc
PB
6536
6537 switch (size) {
6538 case 0:
08307563 6539 gen_aa32_ld8u(tmp, addr, IS_USER(s));
426f5abc
PB
6540 break;
6541 case 1:
08307563 6542 gen_aa32_ld16u(tmp, addr, IS_USER(s));
426f5abc
PB
6543 break;
6544 case 2:
6545 case 3:
08307563 6546 gen_aa32_ld32u(tmp, addr, IS_USER(s));
426f5abc
PB
6547 break;
6548 default:
6549 abort();
6550 }
6551 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6552 store_reg(s, rt, tmp);
6553 if (size == 3) {
39d5492a 6554 TCGv_i32 tmp2 = tcg_temp_new_i32();
2c9adbda 6555 tcg_gen_addi_i32(tmp2, addr, 4);
94ee24e7 6556 tmp = tcg_temp_new_i32();
08307563 6557 gen_aa32_ld32u(tmp, tmp2, IS_USER(s));
7d1b0095 6558 tcg_temp_free_i32(tmp2);
426f5abc
PB
6559 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6560 store_reg(s, rt2, tmp);
6561 }
6562 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6563}
6564
6565static void gen_clrex(DisasContext *s)
6566{
6567 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6568}
6569
6570#ifdef CONFIG_USER_ONLY
6571static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 6572 TCGv_i32 addr, int size)
426f5abc
PB
6573{
6574 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6575 tcg_gen_movi_i32(cpu_exclusive_info,
6576 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6577 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6578}
6579#else
6580static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 6581 TCGv_i32 addr, int size)
426f5abc 6582{
39d5492a 6583 TCGv_i32 tmp;
426f5abc
PB
6584 int done_label;
6585 int fail_label;
6586
6587 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6588 [addr] = {Rt};
6589 {Rd} = 0;
6590 } else {
6591 {Rd} = 1;
6592 } */
6593 fail_label = gen_new_label();
6594 done_label = gen_new_label();
6595 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
94ee24e7 6596 tmp = tcg_temp_new_i32();
426f5abc
PB
6597 switch (size) {
6598 case 0:
08307563 6599 gen_aa32_ld8u(tmp, addr, IS_USER(s));
426f5abc
PB
6600 break;
6601 case 1:
08307563 6602 gen_aa32_ld16u(tmp, addr, IS_USER(s));
426f5abc
PB
6603 break;
6604 case 2:
6605 case 3:
08307563 6606 gen_aa32_ld32u(tmp, addr, IS_USER(s));
426f5abc
PB
6607 break;
6608 default:
6609 abort();
6610 }
6611 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
7d1b0095 6612 tcg_temp_free_i32(tmp);
426f5abc 6613 if (size == 3) {
39d5492a 6614 TCGv_i32 tmp2 = tcg_temp_new_i32();
426f5abc 6615 tcg_gen_addi_i32(tmp2, addr, 4);
94ee24e7 6616 tmp = tcg_temp_new_i32();
08307563 6617 gen_aa32_ld32u(tmp, tmp2, IS_USER(s));
7d1b0095 6618 tcg_temp_free_i32(tmp2);
426f5abc 6619 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
7d1b0095 6620 tcg_temp_free_i32(tmp);
426f5abc
PB
6621 }
6622 tmp = load_reg(s, rt);
6623 switch (size) {
6624 case 0:
08307563 6625 gen_aa32_st8(tmp, addr, IS_USER(s));
426f5abc
PB
6626 break;
6627 case 1:
08307563 6628 gen_aa32_st16(tmp, addr, IS_USER(s));
426f5abc
PB
6629 break;
6630 case 2:
6631 case 3:
08307563 6632 gen_aa32_st32(tmp, addr, IS_USER(s));
426f5abc
PB
6633 break;
6634 default:
6635 abort();
6636 }
94ee24e7 6637 tcg_temp_free_i32(tmp);
426f5abc
PB
6638 if (size == 3) {
6639 tcg_gen_addi_i32(addr, addr, 4);
6640 tmp = load_reg(s, rt2);
08307563 6641 gen_aa32_st32(tmp, addr, IS_USER(s));
94ee24e7 6642 tcg_temp_free_i32(tmp);
426f5abc
PB
6643 }
6644 tcg_gen_movi_i32(cpu_R[rd], 0);
6645 tcg_gen_br(done_label);
6646 gen_set_label(fail_label);
6647 tcg_gen_movi_i32(cpu_R[rd], 1);
6648 gen_set_label(done_label);
6649 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6650}
6651#endif
6652
81465888
PM
6653/* gen_srs:
6654 * @env: CPUARMState
6655 * @s: DisasContext
6656 * @mode: mode field from insn (which stack to store to)
6657 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
6658 * @writeback: true if writeback bit set
6659 *
6660 * Generate code for the SRS (Store Return State) insn.
6661 */
6662static void gen_srs(DisasContext *s,
6663 uint32_t mode, uint32_t amode, bool writeback)
6664{
6665 int32_t offset;
6666 TCGv_i32 addr = tcg_temp_new_i32();
6667 TCGv_i32 tmp = tcg_const_i32(mode);
6668 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6669 tcg_temp_free_i32(tmp);
6670 switch (amode) {
6671 case 0: /* DA */
6672 offset = -4;
6673 break;
6674 case 1: /* IA */
6675 offset = 0;
6676 break;
6677 case 2: /* DB */
6678 offset = -8;
6679 break;
6680 case 3: /* IB */
6681 offset = 4;
6682 break;
6683 default:
6684 abort();
6685 }
6686 tcg_gen_addi_i32(addr, addr, offset);
6687 tmp = load_reg(s, 14);
08307563 6688 gen_aa32_st32(tmp, addr, 0);
5a839c0d 6689 tcg_temp_free_i32(tmp);
81465888
PM
6690 tmp = load_cpu_field(spsr);
6691 tcg_gen_addi_i32(addr, addr, 4);
08307563 6692 gen_aa32_st32(tmp, addr, 0);
5a839c0d 6693 tcg_temp_free_i32(tmp);
81465888
PM
6694 if (writeback) {
6695 switch (amode) {
6696 case 0:
6697 offset = -8;
6698 break;
6699 case 1:
6700 offset = 4;
6701 break;
6702 case 2:
6703 offset = -4;
6704 break;
6705 case 3:
6706 offset = 0;
6707 break;
6708 default:
6709 abort();
6710 }
6711 tcg_gen_addi_i32(addr, addr, offset);
6712 tmp = tcg_const_i32(mode);
6713 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6714 tcg_temp_free_i32(tmp);
6715 }
6716 tcg_temp_free_i32(addr);
6717}
6718
0ecb72a5 6719static void disas_arm_insn(CPUARMState * env, DisasContext *s)
9ee6e8bb
PB
6720{
6721 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
6722 TCGv_i32 tmp;
6723 TCGv_i32 tmp2;
6724 TCGv_i32 tmp3;
6725 TCGv_i32 addr;
a7812ae4 6726 TCGv_i64 tmp64;
9ee6e8bb 6727
d31dd73e 6728 insn = arm_ldl_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
6729 s->pc += 4;
6730
6731 /* M variants do not implement ARM mode. */
6732 if (IS_M(env))
6733 goto illegal_op;
6734 cond = insn >> 28;
6735 if (cond == 0xf){
be5e7a76
DES
6736 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6737 * choose to UNDEF. In ARMv5 and above the space is used
6738 * for miscellaneous unconditional instructions.
6739 */
6740 ARCH(5);
6741
9ee6e8bb
PB
6742 /* Unconditional instructions. */
6743 if (((insn >> 25) & 7) == 1) {
6744 /* NEON Data processing. */
6745 if (!arm_feature(env, ARM_FEATURE_NEON))
6746 goto illegal_op;
6747
6748 if (disas_neon_data_insn(env, s, insn))
6749 goto illegal_op;
6750 return;
6751 }
6752 if ((insn & 0x0f100000) == 0x04000000) {
6753 /* NEON load/store. */
6754 if (!arm_feature(env, ARM_FEATURE_NEON))
6755 goto illegal_op;
6756
6757 if (disas_neon_ls_insn(env, s, insn))
6758 goto illegal_op;
6759 return;
6760 }
6a57f3eb
WN
6761 if ((insn & 0x0f000e10) == 0x0e000a00) {
6762 /* VFP. */
6763 if (disas_vfp_insn(env, s, insn)) {
6764 goto illegal_op;
6765 }
6766 return;
6767 }
3d185e5d
PM
6768 if (((insn & 0x0f30f000) == 0x0510f000) ||
6769 ((insn & 0x0f30f010) == 0x0710f000)) {
6770 if ((insn & (1 << 22)) == 0) {
6771 /* PLDW; v7MP */
6772 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6773 goto illegal_op;
6774 }
6775 }
6776 /* Otherwise PLD; v5TE+ */
be5e7a76 6777 ARCH(5TE);
3d185e5d
PM
6778 return;
6779 }
6780 if (((insn & 0x0f70f000) == 0x0450f000) ||
6781 ((insn & 0x0f70f010) == 0x0650f000)) {
6782 ARCH(7);
6783 return; /* PLI; V7 */
6784 }
6785 if (((insn & 0x0f700000) == 0x04100000) ||
6786 ((insn & 0x0f700010) == 0x06100000)) {
6787 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6788 goto illegal_op;
6789 }
6790 return; /* v7MP: Unallocated memory hint: must NOP */
6791 }
6792
6793 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
6794 ARCH(6);
6795 /* setend */
10962fd5
PM
6796 if (((insn >> 9) & 1) != s->bswap_code) {
6797 /* Dynamic endianness switching not implemented. */
e0c270d9 6798 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
9ee6e8bb
PB
6799 goto illegal_op;
6800 }
6801 return;
6802 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6803 switch ((insn >> 4) & 0xf) {
6804 case 1: /* clrex */
6805 ARCH(6K);
426f5abc 6806 gen_clrex(s);
9ee6e8bb
PB
6807 return;
6808 case 4: /* dsb */
6809 case 5: /* dmb */
6810 case 6: /* isb */
6811 ARCH(7);
6812 /* We don't emulate caches so these are a no-op. */
6813 return;
6814 default:
6815 goto illegal_op;
6816 }
6817 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6818 /* srs */
81465888 6819 if (IS_USER(s)) {
9ee6e8bb 6820 goto illegal_op;
9ee6e8bb 6821 }
81465888
PM
6822 ARCH(6);
6823 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 6824 return;
ea825eee 6825 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6826 /* rfe */
c67b6b71 6827 int32_t offset;
9ee6e8bb
PB
6828 if (IS_USER(s))
6829 goto illegal_op;
6830 ARCH(6);
6831 rn = (insn >> 16) & 0xf;
b0109805 6832 addr = load_reg(s, rn);
9ee6e8bb
PB
6833 i = (insn >> 23) & 3;
6834 switch (i) {
b0109805 6835 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6836 case 1: offset = 0; break; /* IA */
6837 case 2: offset = -8; break; /* DB */
b0109805 6838 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6839 default: abort();
6840 }
6841 if (offset)
b0109805
PB
6842 tcg_gen_addi_i32(addr, addr, offset);
6843 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 6844 tmp = tcg_temp_new_i32();
08307563 6845 gen_aa32_ld32u(tmp, addr, 0);
b0109805 6846 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 6847 tmp2 = tcg_temp_new_i32();
08307563 6848 gen_aa32_ld32u(tmp2, addr, 0);
9ee6e8bb
PB
6849 if (insn & (1 << 21)) {
6850 /* Base writeback. */
6851 switch (i) {
b0109805 6852 case 0: offset = -8; break;
c67b6b71
FN
6853 case 1: offset = 4; break;
6854 case 2: offset = -4; break;
b0109805 6855 case 3: offset = 0; break;
9ee6e8bb
PB
6856 default: abort();
6857 }
6858 if (offset)
b0109805
PB
6859 tcg_gen_addi_i32(addr, addr, offset);
6860 store_reg(s, rn, addr);
6861 } else {
7d1b0095 6862 tcg_temp_free_i32(addr);
9ee6e8bb 6863 }
b0109805 6864 gen_rfe(s, tmp, tmp2);
c67b6b71 6865 return;
9ee6e8bb
PB
6866 } else if ((insn & 0x0e000000) == 0x0a000000) {
6867 /* branch link and change to thumb (blx <offset>) */
6868 int32_t offset;
6869
6870 val = (uint32_t)s->pc;
7d1b0095 6871 tmp = tcg_temp_new_i32();
d9ba4830
PB
6872 tcg_gen_movi_i32(tmp, val);
6873 store_reg(s, 14, tmp);
9ee6e8bb
PB
6874 /* Sign-extend the 24-bit offset */
6875 offset = (((int32_t)insn) << 8) >> 8;
6876 /* offset * 4 + bit24 * 2 + (thumb bit) */
6877 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6878 /* pipeline offset */
6879 val += 4;
be5e7a76 6880 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 6881 gen_bx_im(s, val);
9ee6e8bb
PB
6882 return;
6883 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6884 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6885 /* iWMMXt register transfer. */
6886 if (env->cp15.c15_cpar & (1 << 1))
6887 if (!disas_iwmmxt_insn(env, s, insn))
6888 return;
6889 }
6890 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6891 /* Coprocessor double register transfer. */
be5e7a76 6892 ARCH(5TE);
9ee6e8bb
PB
6893 } else if ((insn & 0x0f000010) == 0x0e000010) {
6894 /* Additional coprocessor register transfer. */
7997d92f 6895 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6896 uint32_t mask;
6897 uint32_t val;
6898 /* cps (privileged) */
6899 if (IS_USER(s))
6900 return;
6901 mask = val = 0;
6902 if (insn & (1 << 19)) {
6903 if (insn & (1 << 8))
6904 mask |= CPSR_A;
6905 if (insn & (1 << 7))
6906 mask |= CPSR_I;
6907 if (insn & (1 << 6))
6908 mask |= CPSR_F;
6909 if (insn & (1 << 18))
6910 val |= mask;
6911 }
7997d92f 6912 if (insn & (1 << 17)) {
9ee6e8bb
PB
6913 mask |= CPSR_M;
6914 val |= (insn & 0x1f);
6915 }
6916 if (mask) {
2fbac54b 6917 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6918 }
6919 return;
6920 }
6921 goto illegal_op;
6922 }
6923 if (cond != 0xe) {
6924 /* if not always execute, we generate a conditional jump to
6925 next instruction */
6926 s->condlabel = gen_new_label();
d9ba4830 6927 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6928 s->condjmp = 1;
6929 }
6930 if ((insn & 0x0f900000) == 0x03000000) {
6931 if ((insn & (1 << 21)) == 0) {
6932 ARCH(6T2);
6933 rd = (insn >> 12) & 0xf;
6934 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6935 if ((insn & (1 << 22)) == 0) {
6936 /* MOVW */
7d1b0095 6937 tmp = tcg_temp_new_i32();
5e3f878a 6938 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6939 } else {
6940 /* MOVT */
5e3f878a 6941 tmp = load_reg(s, rd);
86831435 6942 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6943 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6944 }
5e3f878a 6945 store_reg(s, rd, tmp);
9ee6e8bb
PB
6946 } else {
6947 if (((insn >> 12) & 0xf) != 0xf)
6948 goto illegal_op;
6949 if (((insn >> 16) & 0xf) == 0) {
6950 gen_nop_hint(s, insn & 0xff);
6951 } else {
6952 /* CPSR = immediate */
6953 val = insn & 0xff;
6954 shift = ((insn >> 8) & 0xf) * 2;
6955 if (shift)
6956 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6957 i = ((insn & (1 << 22)) != 0);
2fbac54b 6958 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6959 goto illegal_op;
6960 }
6961 }
6962 } else if ((insn & 0x0f900000) == 0x01000000
6963 && (insn & 0x00000090) != 0x00000090) {
6964 /* miscellaneous instructions */
6965 op1 = (insn >> 21) & 3;
6966 sh = (insn >> 4) & 0xf;
6967 rm = insn & 0xf;
6968 switch (sh) {
6969 case 0x0: /* move program status register */
6970 if (op1 & 1) {
6971 /* PSR = reg */
2fbac54b 6972 tmp = load_reg(s, rm);
9ee6e8bb 6973 i = ((op1 & 2) != 0);
2fbac54b 6974 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6975 goto illegal_op;
6976 } else {
6977 /* reg = PSR */
6978 rd = (insn >> 12) & 0xf;
6979 if (op1 & 2) {
6980 if (IS_USER(s))
6981 goto illegal_op;
d9ba4830 6982 tmp = load_cpu_field(spsr);
9ee6e8bb 6983 } else {
7d1b0095 6984 tmp = tcg_temp_new_i32();
9ef39277 6985 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 6986 }
d9ba4830 6987 store_reg(s, rd, tmp);
9ee6e8bb
PB
6988 }
6989 break;
6990 case 0x1:
6991 if (op1 == 1) {
6992 /* branch/exchange thumb (bx). */
be5e7a76 6993 ARCH(4T);
d9ba4830
PB
6994 tmp = load_reg(s, rm);
6995 gen_bx(s, tmp);
9ee6e8bb
PB
6996 } else if (op1 == 3) {
6997 /* clz */
be5e7a76 6998 ARCH(5);
9ee6e8bb 6999 rd = (insn >> 12) & 0xf;
1497c961
PB
7000 tmp = load_reg(s, rm);
7001 gen_helper_clz(tmp, tmp);
7002 store_reg(s, rd, tmp);
9ee6e8bb
PB
7003 } else {
7004 goto illegal_op;
7005 }
7006 break;
7007 case 0x2:
7008 if (op1 == 1) {
7009 ARCH(5J); /* bxj */
7010 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7011 tmp = load_reg(s, rm);
7012 gen_bx(s, tmp);
9ee6e8bb
PB
7013 } else {
7014 goto illegal_op;
7015 }
7016 break;
7017 case 0x3:
7018 if (op1 != 1)
7019 goto illegal_op;
7020
be5e7a76 7021 ARCH(5);
9ee6e8bb 7022 /* branch link/exchange thumb (blx) */
d9ba4830 7023 tmp = load_reg(s, rm);
7d1b0095 7024 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
7025 tcg_gen_movi_i32(tmp2, s->pc);
7026 store_reg(s, 14, tmp2);
7027 gen_bx(s, tmp);
9ee6e8bb
PB
7028 break;
7029 case 0x5: /* saturating add/subtract */
be5e7a76 7030 ARCH(5TE);
9ee6e8bb
PB
7031 rd = (insn >> 12) & 0xf;
7032 rn = (insn >> 16) & 0xf;
b40d0353 7033 tmp = load_reg(s, rm);
5e3f878a 7034 tmp2 = load_reg(s, rn);
9ee6e8bb 7035 if (op1 & 2)
9ef39277 7036 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 7037 if (op1 & 1)
9ef39277 7038 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 7039 else
9ef39277 7040 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 7041 tcg_temp_free_i32(tmp2);
5e3f878a 7042 store_reg(s, rd, tmp);
9ee6e8bb 7043 break;
49e14940
AL
7044 case 7:
7045 /* SMC instruction (op1 == 3)
7046 and undefined instructions (op1 == 0 || op1 == 2)
7047 will trap */
7048 if (op1 != 1) {
7049 goto illegal_op;
7050 }
7051 /* bkpt */
be5e7a76 7052 ARCH(5);
bc4a0de0 7053 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
7054 break;
7055 case 0x8: /* signed multiply */
7056 case 0xa:
7057 case 0xc:
7058 case 0xe:
be5e7a76 7059 ARCH(5TE);
9ee6e8bb
PB
7060 rs = (insn >> 8) & 0xf;
7061 rn = (insn >> 12) & 0xf;
7062 rd = (insn >> 16) & 0xf;
7063 if (op1 == 1) {
7064 /* (32 * 16) >> 16 */
5e3f878a
PB
7065 tmp = load_reg(s, rm);
7066 tmp2 = load_reg(s, rs);
9ee6e8bb 7067 if (sh & 4)
5e3f878a 7068 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7069 else
5e3f878a 7070 gen_sxth(tmp2);
a7812ae4
PB
7071 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7072 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 7073 tmp = tcg_temp_new_i32();
a7812ae4 7074 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7075 tcg_temp_free_i64(tmp64);
9ee6e8bb 7076 if ((sh & 2) == 0) {
5e3f878a 7077 tmp2 = load_reg(s, rn);
9ef39277 7078 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7079 tcg_temp_free_i32(tmp2);
9ee6e8bb 7080 }
5e3f878a 7081 store_reg(s, rd, tmp);
9ee6e8bb
PB
7082 } else {
7083 /* 16 * 16 */
5e3f878a
PB
7084 tmp = load_reg(s, rm);
7085 tmp2 = load_reg(s, rs);
7086 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 7087 tcg_temp_free_i32(tmp2);
9ee6e8bb 7088 if (op1 == 2) {
a7812ae4
PB
7089 tmp64 = tcg_temp_new_i64();
7090 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7091 tcg_temp_free_i32(tmp);
a7812ae4
PB
7092 gen_addq(s, tmp64, rn, rd);
7093 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7094 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7095 } else {
7096 if (op1 == 0) {
5e3f878a 7097 tmp2 = load_reg(s, rn);
9ef39277 7098 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7099 tcg_temp_free_i32(tmp2);
9ee6e8bb 7100 }
5e3f878a 7101 store_reg(s, rd, tmp);
9ee6e8bb
PB
7102 }
7103 }
7104 break;
7105 default:
7106 goto illegal_op;
7107 }
7108 } else if (((insn & 0x0e000000) == 0 &&
7109 (insn & 0x00000090) != 0x90) ||
7110 ((insn & 0x0e000000) == (1 << 25))) {
7111 int set_cc, logic_cc, shiftop;
7112
7113 op1 = (insn >> 21) & 0xf;
7114 set_cc = (insn >> 20) & 1;
7115 logic_cc = table_logic_cc[op1] & set_cc;
7116
7117 /* data processing instruction */
7118 if (insn & (1 << 25)) {
7119 /* immediate operand */
7120 val = insn & 0xff;
7121 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 7122 if (shift) {
9ee6e8bb 7123 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 7124 }
7d1b0095 7125 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
7126 tcg_gen_movi_i32(tmp2, val);
7127 if (logic_cc && shift) {
7128 gen_set_CF_bit31(tmp2);
7129 }
9ee6e8bb
PB
7130 } else {
7131 /* register */
7132 rm = (insn) & 0xf;
e9bb4aa9 7133 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7134 shiftop = (insn >> 5) & 3;
7135 if (!(insn & (1 << 4))) {
7136 shift = (insn >> 7) & 0x1f;
e9bb4aa9 7137 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
7138 } else {
7139 rs = (insn >> 8) & 0xf;
8984bd2e 7140 tmp = load_reg(s, rs);
e9bb4aa9 7141 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
7142 }
7143 }
7144 if (op1 != 0x0f && op1 != 0x0d) {
7145 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
7146 tmp = load_reg(s, rn);
7147 } else {
39d5492a 7148 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
7149 }
7150 rd = (insn >> 12) & 0xf;
7151 switch(op1) {
7152 case 0x00:
e9bb4aa9
JR
7153 tcg_gen_and_i32(tmp, tmp, tmp2);
7154 if (logic_cc) {
7155 gen_logic_CC(tmp);
7156 }
21aeb343 7157 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7158 break;
7159 case 0x01:
e9bb4aa9
JR
7160 tcg_gen_xor_i32(tmp, tmp, tmp2);
7161 if (logic_cc) {
7162 gen_logic_CC(tmp);
7163 }
21aeb343 7164 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7165 break;
7166 case 0x02:
7167 if (set_cc && rd == 15) {
7168 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 7169 if (IS_USER(s)) {
9ee6e8bb 7170 goto illegal_op;
e9bb4aa9 7171 }
72485ec4 7172 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 7173 gen_exception_return(s, tmp);
9ee6e8bb 7174 } else {
e9bb4aa9 7175 if (set_cc) {
72485ec4 7176 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7177 } else {
7178 tcg_gen_sub_i32(tmp, tmp, tmp2);
7179 }
21aeb343 7180 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7181 }
7182 break;
7183 case 0x03:
e9bb4aa9 7184 if (set_cc) {
72485ec4 7185 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7186 } else {
7187 tcg_gen_sub_i32(tmp, tmp2, tmp);
7188 }
21aeb343 7189 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7190 break;
7191 case 0x04:
e9bb4aa9 7192 if (set_cc) {
72485ec4 7193 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7194 } else {
7195 tcg_gen_add_i32(tmp, tmp, tmp2);
7196 }
21aeb343 7197 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7198 break;
7199 case 0x05:
e9bb4aa9 7200 if (set_cc) {
49b4c31e 7201 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7202 } else {
7203 gen_add_carry(tmp, tmp, tmp2);
7204 }
21aeb343 7205 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7206 break;
7207 case 0x06:
e9bb4aa9 7208 if (set_cc) {
2de68a49 7209 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7210 } else {
7211 gen_sub_carry(tmp, tmp, tmp2);
7212 }
21aeb343 7213 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7214 break;
7215 case 0x07:
e9bb4aa9 7216 if (set_cc) {
2de68a49 7217 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7218 } else {
7219 gen_sub_carry(tmp, tmp2, tmp);
7220 }
21aeb343 7221 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7222 break;
7223 case 0x08:
7224 if (set_cc) {
e9bb4aa9
JR
7225 tcg_gen_and_i32(tmp, tmp, tmp2);
7226 gen_logic_CC(tmp);
9ee6e8bb 7227 }
7d1b0095 7228 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7229 break;
7230 case 0x09:
7231 if (set_cc) {
e9bb4aa9
JR
7232 tcg_gen_xor_i32(tmp, tmp, tmp2);
7233 gen_logic_CC(tmp);
9ee6e8bb 7234 }
7d1b0095 7235 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7236 break;
7237 case 0x0a:
7238 if (set_cc) {
72485ec4 7239 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 7240 }
7d1b0095 7241 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7242 break;
7243 case 0x0b:
7244 if (set_cc) {
72485ec4 7245 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 7246 }
7d1b0095 7247 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7248 break;
7249 case 0x0c:
e9bb4aa9
JR
7250 tcg_gen_or_i32(tmp, tmp, tmp2);
7251 if (logic_cc) {
7252 gen_logic_CC(tmp);
7253 }
21aeb343 7254 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7255 break;
7256 case 0x0d:
7257 if (logic_cc && rd == 15) {
7258 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 7259 if (IS_USER(s)) {
9ee6e8bb 7260 goto illegal_op;
e9bb4aa9
JR
7261 }
7262 gen_exception_return(s, tmp2);
9ee6e8bb 7263 } else {
e9bb4aa9
JR
7264 if (logic_cc) {
7265 gen_logic_CC(tmp2);
7266 }
21aeb343 7267 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7268 }
7269 break;
7270 case 0x0e:
f669df27 7271 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
7272 if (logic_cc) {
7273 gen_logic_CC(tmp);
7274 }
21aeb343 7275 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7276 break;
7277 default:
7278 case 0x0f:
e9bb4aa9
JR
7279 tcg_gen_not_i32(tmp2, tmp2);
7280 if (logic_cc) {
7281 gen_logic_CC(tmp2);
7282 }
21aeb343 7283 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7284 break;
7285 }
e9bb4aa9 7286 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 7287 tcg_temp_free_i32(tmp2);
e9bb4aa9 7288 }
9ee6e8bb
PB
7289 } else {
7290 /* other instructions */
7291 op1 = (insn >> 24) & 0xf;
7292 switch(op1) {
7293 case 0x0:
7294 case 0x1:
7295 /* multiplies, extra load/stores */
7296 sh = (insn >> 5) & 3;
7297 if (sh == 0) {
7298 if (op1 == 0x0) {
7299 rd = (insn >> 16) & 0xf;
7300 rn = (insn >> 12) & 0xf;
7301 rs = (insn >> 8) & 0xf;
7302 rm = (insn) & 0xf;
7303 op1 = (insn >> 20) & 0xf;
7304 switch (op1) {
7305 case 0: case 1: case 2: case 3: case 6:
7306 /* 32 bit mul */
5e3f878a
PB
7307 tmp = load_reg(s, rs);
7308 tmp2 = load_reg(s, rm);
7309 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7310 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7311 if (insn & (1 << 22)) {
7312 /* Subtract (mls) */
7313 ARCH(6T2);
5e3f878a
PB
7314 tmp2 = load_reg(s, rn);
7315 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 7316 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7317 } else if (insn & (1 << 21)) {
7318 /* Add */
5e3f878a
PB
7319 tmp2 = load_reg(s, rn);
7320 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7321 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7322 }
7323 if (insn & (1 << 20))
5e3f878a
PB
7324 gen_logic_CC(tmp);
7325 store_reg(s, rd, tmp);
9ee6e8bb 7326 break;
8aac08b1
AJ
7327 case 4:
7328 /* 64 bit mul double accumulate (UMAAL) */
7329 ARCH(6);
7330 tmp = load_reg(s, rs);
7331 tmp2 = load_reg(s, rm);
7332 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7333 gen_addq_lo(s, tmp64, rn);
7334 gen_addq_lo(s, tmp64, rd);
7335 gen_storeq_reg(s, rn, rd, tmp64);
7336 tcg_temp_free_i64(tmp64);
7337 break;
7338 case 8: case 9: case 10: case 11:
7339 case 12: case 13: case 14: case 15:
7340 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
7341 tmp = load_reg(s, rs);
7342 tmp2 = load_reg(s, rm);
8aac08b1 7343 if (insn & (1 << 22)) {
c9f10124 7344 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 7345 } else {
c9f10124 7346 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
7347 }
7348 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
7349 TCGv_i32 al = load_reg(s, rn);
7350 TCGv_i32 ah = load_reg(s, rd);
c9f10124 7351 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
7352 tcg_temp_free_i32(al);
7353 tcg_temp_free_i32(ah);
9ee6e8bb 7354 }
8aac08b1 7355 if (insn & (1 << 20)) {
c9f10124 7356 gen_logicq_cc(tmp, tmp2);
8aac08b1 7357 }
c9f10124
RH
7358 store_reg(s, rn, tmp);
7359 store_reg(s, rd, tmp2);
9ee6e8bb 7360 break;
8aac08b1
AJ
7361 default:
7362 goto illegal_op;
9ee6e8bb
PB
7363 }
7364 } else {
7365 rn = (insn >> 16) & 0xf;
7366 rd = (insn >> 12) & 0xf;
7367 if (insn & (1 << 23)) {
7368 /* load/store exclusive */
2359bf80 7369 int op2 = (insn >> 8) & 3;
86753403 7370 op1 = (insn >> 21) & 0x3;
2359bf80
MR
7371
7372 switch (op2) {
7373 case 0: /* lda/stl */
7374 if (op1 == 1) {
7375 goto illegal_op;
7376 }
7377 ARCH(8);
7378 break;
7379 case 1: /* reserved */
7380 goto illegal_op;
7381 case 2: /* ldaex/stlex */
7382 ARCH(8);
7383 break;
7384 case 3: /* ldrex/strex */
7385 if (op1) {
7386 ARCH(6K);
7387 } else {
7388 ARCH(6);
7389 }
7390 break;
7391 }
7392
3174f8e9 7393 addr = tcg_temp_local_new_i32();
98a46317 7394 load_reg_var(s, addr, rn);
2359bf80
MR
7395
7396 /* Since the emulation does not have barriers,
7397 the acquire/release semantics need no special
7398 handling */
7399 if (op2 == 0) {
7400 if (insn & (1 << 20)) {
7401 tmp = tcg_temp_new_i32();
7402 switch (op1) {
7403 case 0: /* lda */
08307563 7404 gen_aa32_ld32u(tmp, addr, IS_USER(s));
2359bf80
MR
7405 break;
7406 case 2: /* ldab */
08307563 7407 gen_aa32_ld8u(tmp, addr, IS_USER(s));
2359bf80
MR
7408 break;
7409 case 3: /* ldah */
08307563 7410 gen_aa32_ld16u(tmp, addr, IS_USER(s));
2359bf80
MR
7411 break;
7412 default:
7413 abort();
7414 }
7415 store_reg(s, rd, tmp);
7416 } else {
7417 rm = insn & 0xf;
7418 tmp = load_reg(s, rm);
7419 switch (op1) {
7420 case 0: /* stl */
08307563 7421 gen_aa32_st32(tmp, addr, IS_USER(s));
2359bf80
MR
7422 break;
7423 case 2: /* stlb */
08307563 7424 gen_aa32_st8(tmp, addr, IS_USER(s));
2359bf80
MR
7425 break;
7426 case 3: /* stlh */
08307563 7427 gen_aa32_st16(tmp, addr, IS_USER(s));
2359bf80
MR
7428 break;
7429 default:
7430 abort();
7431 }
7432 tcg_temp_free_i32(tmp);
7433 }
7434 } else if (insn & (1 << 20)) {
86753403
PB
7435 switch (op1) {
7436 case 0: /* ldrex */
426f5abc 7437 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
7438 break;
7439 case 1: /* ldrexd */
426f5abc 7440 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
7441 break;
7442 case 2: /* ldrexb */
426f5abc 7443 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
7444 break;
7445 case 3: /* ldrexh */
426f5abc 7446 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
7447 break;
7448 default:
7449 abort();
7450 }
9ee6e8bb
PB
7451 } else {
7452 rm = insn & 0xf;
86753403
PB
7453 switch (op1) {
7454 case 0: /* strex */
426f5abc 7455 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
7456 break;
7457 case 1: /* strexd */
502e64fe 7458 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
7459 break;
7460 case 2: /* strexb */
426f5abc 7461 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
7462 break;
7463 case 3: /* strexh */
426f5abc 7464 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
7465 break;
7466 default:
7467 abort();
7468 }
9ee6e8bb 7469 }
39d5492a 7470 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7471 } else {
7472 /* SWP instruction */
7473 rm = (insn) & 0xf;
7474
8984bd2e
PB
7475 /* ??? This is not really atomic. However we know
7476 we never have multiple CPUs running in parallel,
7477 so it is good enough. */
7478 addr = load_reg(s, rn);
7479 tmp = load_reg(s, rm);
5a839c0d 7480 tmp2 = tcg_temp_new_i32();
9ee6e8bb 7481 if (insn & (1 << 22)) {
08307563
PM
7482 gen_aa32_ld8u(tmp2, addr, IS_USER(s));
7483 gen_aa32_st8(tmp, addr, IS_USER(s));
9ee6e8bb 7484 } else {
08307563
PM
7485 gen_aa32_ld32u(tmp2, addr, IS_USER(s));
7486 gen_aa32_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7487 }
5a839c0d 7488 tcg_temp_free_i32(tmp);
7d1b0095 7489 tcg_temp_free_i32(addr);
8984bd2e 7490 store_reg(s, rd, tmp2);
9ee6e8bb
PB
7491 }
7492 }
7493 } else {
7494 int address_offset;
7495 int load;
7496 /* Misc load/store */
7497 rn = (insn >> 16) & 0xf;
7498 rd = (insn >> 12) & 0xf;
b0109805 7499 addr = load_reg(s, rn);
9ee6e8bb 7500 if (insn & (1 << 24))
b0109805 7501 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
7502 address_offset = 0;
7503 if (insn & (1 << 20)) {
7504 /* load */
5a839c0d 7505 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
7506 switch(sh) {
7507 case 1:
08307563 7508 gen_aa32_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7509 break;
7510 case 2:
08307563 7511 gen_aa32_ld8s(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7512 break;
7513 default:
7514 case 3:
08307563 7515 gen_aa32_ld16s(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7516 break;
7517 }
7518 load = 1;
7519 } else if (sh & 2) {
be5e7a76 7520 ARCH(5TE);
9ee6e8bb
PB
7521 /* doubleword */
7522 if (sh & 1) {
7523 /* store */
b0109805 7524 tmp = load_reg(s, rd);
08307563 7525 gen_aa32_st32(tmp, addr, IS_USER(s));
5a839c0d 7526 tcg_temp_free_i32(tmp);
b0109805
PB
7527 tcg_gen_addi_i32(addr, addr, 4);
7528 tmp = load_reg(s, rd + 1);
08307563 7529 gen_aa32_st32(tmp, addr, IS_USER(s));
5a839c0d 7530 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7531 load = 0;
7532 } else {
7533 /* load */
5a839c0d 7534 tmp = tcg_temp_new_i32();
08307563 7535 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805
PB
7536 store_reg(s, rd, tmp);
7537 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 7538 tmp = tcg_temp_new_i32();
08307563 7539 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7540 rd++;
7541 load = 1;
7542 }
7543 address_offset = -4;
7544 } else {
7545 /* store */
b0109805 7546 tmp = load_reg(s, rd);
08307563 7547 gen_aa32_st16(tmp, addr, IS_USER(s));
5a839c0d 7548 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7549 load = 0;
7550 }
7551 /* Perform base writeback before the loaded value to
7552 ensure correct behavior with overlapping index registers.
7553 ldrd with base writeback is is undefined if the
7554 destination and index registers overlap. */
7555 if (!(insn & (1 << 24))) {
b0109805
PB
7556 gen_add_datah_offset(s, insn, address_offset, addr);
7557 store_reg(s, rn, addr);
9ee6e8bb
PB
7558 } else if (insn & (1 << 21)) {
7559 if (address_offset)
b0109805
PB
7560 tcg_gen_addi_i32(addr, addr, address_offset);
7561 store_reg(s, rn, addr);
7562 } else {
7d1b0095 7563 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7564 }
7565 if (load) {
7566 /* Complete the load. */
b0109805 7567 store_reg(s, rd, tmp);
9ee6e8bb
PB
7568 }
7569 }
7570 break;
7571 case 0x4:
7572 case 0x5:
7573 goto do_ldst;
7574 case 0x6:
7575 case 0x7:
7576 if (insn & (1 << 4)) {
7577 ARCH(6);
7578 /* Armv6 Media instructions. */
7579 rm = insn & 0xf;
7580 rn = (insn >> 16) & 0xf;
2c0262af 7581 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
7582 rs = (insn >> 8) & 0xf;
7583 switch ((insn >> 23) & 3) {
7584 case 0: /* Parallel add/subtract. */
7585 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
7586 tmp = load_reg(s, rn);
7587 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7588 sh = (insn >> 5) & 7;
7589 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7590 goto illegal_op;
6ddbc6e4 7591 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 7592 tcg_temp_free_i32(tmp2);
6ddbc6e4 7593 store_reg(s, rd, tmp);
9ee6e8bb
PB
7594 break;
7595 case 1:
7596 if ((insn & 0x00700020) == 0) {
6c95676b 7597 /* Halfword pack. */
3670669c
PB
7598 tmp = load_reg(s, rn);
7599 tmp2 = load_reg(s, rm);
9ee6e8bb 7600 shift = (insn >> 7) & 0x1f;
3670669c
PB
7601 if (insn & (1 << 6)) {
7602 /* pkhtb */
22478e79
AZ
7603 if (shift == 0)
7604 shift = 31;
7605 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 7606 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 7607 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
7608 } else {
7609 /* pkhbt */
22478e79
AZ
7610 if (shift)
7611 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 7612 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
7613 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7614 }
7615 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7616 tcg_temp_free_i32(tmp2);
3670669c 7617 store_reg(s, rd, tmp);
9ee6e8bb
PB
7618 } else if ((insn & 0x00200020) == 0x00200000) {
7619 /* [us]sat */
6ddbc6e4 7620 tmp = load_reg(s, rm);
9ee6e8bb
PB
7621 shift = (insn >> 7) & 0x1f;
7622 if (insn & (1 << 6)) {
7623 if (shift == 0)
7624 shift = 31;
6ddbc6e4 7625 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7626 } else {
6ddbc6e4 7627 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
7628 }
7629 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7630 tmp2 = tcg_const_i32(sh);
7631 if (insn & (1 << 22))
9ef39277 7632 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 7633 else
9ef39277 7634 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 7635 tcg_temp_free_i32(tmp2);
6ddbc6e4 7636 store_reg(s, rd, tmp);
9ee6e8bb
PB
7637 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7638 /* [us]sat16 */
6ddbc6e4 7639 tmp = load_reg(s, rm);
9ee6e8bb 7640 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7641 tmp2 = tcg_const_i32(sh);
7642 if (insn & (1 << 22))
9ef39277 7643 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7644 else
9ef39277 7645 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7646 tcg_temp_free_i32(tmp2);
6ddbc6e4 7647 store_reg(s, rd, tmp);
9ee6e8bb
PB
7648 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7649 /* Select bytes. */
6ddbc6e4
PB
7650 tmp = load_reg(s, rn);
7651 tmp2 = load_reg(s, rm);
7d1b0095 7652 tmp3 = tcg_temp_new_i32();
0ecb72a5 7653 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 7654 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7655 tcg_temp_free_i32(tmp3);
7656 tcg_temp_free_i32(tmp2);
6ddbc6e4 7657 store_reg(s, rd, tmp);
9ee6e8bb 7658 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 7659 tmp = load_reg(s, rm);
9ee6e8bb 7660 shift = (insn >> 10) & 3;
1301f322 7661 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
7662 rotate, a shift is sufficient. */
7663 if (shift != 0)
f669df27 7664 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7665 op1 = (insn >> 20) & 7;
7666 switch (op1) {
5e3f878a
PB
7667 case 0: gen_sxtb16(tmp); break;
7668 case 2: gen_sxtb(tmp); break;
7669 case 3: gen_sxth(tmp); break;
7670 case 4: gen_uxtb16(tmp); break;
7671 case 6: gen_uxtb(tmp); break;
7672 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
7673 default: goto illegal_op;
7674 }
7675 if (rn != 15) {
5e3f878a 7676 tmp2 = load_reg(s, rn);
9ee6e8bb 7677 if ((op1 & 3) == 0) {
5e3f878a 7678 gen_add16(tmp, tmp2);
9ee6e8bb 7679 } else {
5e3f878a 7680 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7681 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7682 }
7683 }
6c95676b 7684 store_reg(s, rd, tmp);
9ee6e8bb
PB
7685 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7686 /* rev */
b0109805 7687 tmp = load_reg(s, rm);
9ee6e8bb
PB
7688 if (insn & (1 << 22)) {
7689 if (insn & (1 << 7)) {
b0109805 7690 gen_revsh(tmp);
9ee6e8bb
PB
7691 } else {
7692 ARCH(6T2);
b0109805 7693 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7694 }
7695 } else {
7696 if (insn & (1 << 7))
b0109805 7697 gen_rev16(tmp);
9ee6e8bb 7698 else
66896cb8 7699 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 7700 }
b0109805 7701 store_reg(s, rd, tmp);
9ee6e8bb
PB
7702 } else {
7703 goto illegal_op;
7704 }
7705 break;
7706 case 2: /* Multiplies (Type 3). */
41e9564d
PM
7707 switch ((insn >> 20) & 0x7) {
7708 case 5:
7709 if (((insn >> 6) ^ (insn >> 7)) & 1) {
7710 /* op2 not 00x or 11x : UNDEF */
7711 goto illegal_op;
7712 }
838fa72d
AJ
7713 /* Signed multiply most significant [accumulate].
7714 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
7715 tmp = load_reg(s, rm);
7716 tmp2 = load_reg(s, rs);
a7812ae4 7717 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 7718
955a7dd5 7719 if (rd != 15) {
838fa72d 7720 tmp = load_reg(s, rd);
9ee6e8bb 7721 if (insn & (1 << 6)) {
838fa72d 7722 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 7723 } else {
838fa72d 7724 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
7725 }
7726 }
838fa72d
AJ
7727 if (insn & (1 << 5)) {
7728 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7729 }
7730 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7731 tmp = tcg_temp_new_i32();
838fa72d
AJ
7732 tcg_gen_trunc_i64_i32(tmp, tmp64);
7733 tcg_temp_free_i64(tmp64);
955a7dd5 7734 store_reg(s, rn, tmp);
41e9564d
PM
7735 break;
7736 case 0:
7737 case 4:
7738 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7739 if (insn & (1 << 7)) {
7740 goto illegal_op;
7741 }
7742 tmp = load_reg(s, rm);
7743 tmp2 = load_reg(s, rs);
9ee6e8bb 7744 if (insn & (1 << 5))
5e3f878a
PB
7745 gen_swap_half(tmp2);
7746 gen_smul_dual(tmp, tmp2);
5e3f878a 7747 if (insn & (1 << 6)) {
e1d177b9 7748 /* This subtraction cannot overflow. */
5e3f878a
PB
7749 tcg_gen_sub_i32(tmp, tmp, tmp2);
7750 } else {
e1d177b9
PM
7751 /* This addition cannot overflow 32 bits;
7752 * however it may overflow considered as a signed
7753 * operation, in which case we must set the Q flag.
7754 */
9ef39277 7755 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
5e3f878a 7756 }
7d1b0095 7757 tcg_temp_free_i32(tmp2);
9ee6e8bb 7758 if (insn & (1 << 22)) {
5e3f878a 7759 /* smlald, smlsld */
a7812ae4
PB
7760 tmp64 = tcg_temp_new_i64();
7761 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7762 tcg_temp_free_i32(tmp);
a7812ae4
PB
7763 gen_addq(s, tmp64, rd, rn);
7764 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7765 tcg_temp_free_i64(tmp64);
9ee6e8bb 7766 } else {
5e3f878a 7767 /* smuad, smusd, smlad, smlsd */
22478e79 7768 if (rd != 15)
9ee6e8bb 7769 {
22478e79 7770 tmp2 = load_reg(s, rd);
9ef39277 7771 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7772 tcg_temp_free_i32(tmp2);
9ee6e8bb 7773 }
22478e79 7774 store_reg(s, rn, tmp);
9ee6e8bb 7775 }
41e9564d 7776 break;
b8b8ea05
PM
7777 case 1:
7778 case 3:
7779 /* SDIV, UDIV */
7780 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
7781 goto illegal_op;
7782 }
7783 if (((insn >> 5) & 7) || (rd != 15)) {
7784 goto illegal_op;
7785 }
7786 tmp = load_reg(s, rm);
7787 tmp2 = load_reg(s, rs);
7788 if (insn & (1 << 21)) {
7789 gen_helper_udiv(tmp, tmp, tmp2);
7790 } else {
7791 gen_helper_sdiv(tmp, tmp, tmp2);
7792 }
7793 tcg_temp_free_i32(tmp2);
7794 store_reg(s, rn, tmp);
7795 break;
41e9564d
PM
7796 default:
7797 goto illegal_op;
9ee6e8bb
PB
7798 }
7799 break;
7800 case 3:
7801 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7802 switch (op1) {
7803 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7804 ARCH(6);
7805 tmp = load_reg(s, rm);
7806 tmp2 = load_reg(s, rs);
7807 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7808 tcg_temp_free_i32(tmp2);
ded9d295
AZ
7809 if (rd != 15) {
7810 tmp2 = load_reg(s, rd);
6ddbc6e4 7811 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7812 tcg_temp_free_i32(tmp2);
9ee6e8bb 7813 }
ded9d295 7814 store_reg(s, rn, tmp);
9ee6e8bb
PB
7815 break;
7816 case 0x20: case 0x24: case 0x28: case 0x2c:
7817 /* Bitfield insert/clear. */
7818 ARCH(6T2);
7819 shift = (insn >> 7) & 0x1f;
7820 i = (insn >> 16) & 0x1f;
7821 i = i + 1 - shift;
7822 if (rm == 15) {
7d1b0095 7823 tmp = tcg_temp_new_i32();
5e3f878a 7824 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7825 } else {
5e3f878a 7826 tmp = load_reg(s, rm);
9ee6e8bb
PB
7827 }
7828 if (i != 32) {
5e3f878a 7829 tmp2 = load_reg(s, rd);
d593c48e 7830 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 7831 tcg_temp_free_i32(tmp2);
9ee6e8bb 7832 }
5e3f878a 7833 store_reg(s, rd, tmp);
9ee6e8bb
PB
7834 break;
7835 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7836 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7837 ARCH(6T2);
5e3f878a 7838 tmp = load_reg(s, rm);
9ee6e8bb
PB
7839 shift = (insn >> 7) & 0x1f;
7840 i = ((insn >> 16) & 0x1f) + 1;
7841 if (shift + i > 32)
7842 goto illegal_op;
7843 if (i < 32) {
7844 if (op1 & 0x20) {
5e3f878a 7845 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7846 } else {
5e3f878a 7847 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7848 }
7849 }
5e3f878a 7850 store_reg(s, rd, tmp);
9ee6e8bb
PB
7851 break;
7852 default:
7853 goto illegal_op;
7854 }
7855 break;
7856 }
7857 break;
7858 }
7859 do_ldst:
7860 /* Check for undefined extension instructions
7861 * per the ARM Bible IE:
7862 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7863 */
7864 sh = (0xf << 20) | (0xf << 4);
7865 if (op1 == 0x7 && ((insn & sh) == sh))
7866 {
7867 goto illegal_op;
7868 }
7869 /* load/store byte/word */
7870 rn = (insn >> 16) & 0xf;
7871 rd = (insn >> 12) & 0xf;
b0109805 7872 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7873 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7874 if (insn & (1 << 24))
b0109805 7875 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7876 if (insn & (1 << 20)) {
7877 /* load */
5a839c0d 7878 tmp = tcg_temp_new_i32();
9ee6e8bb 7879 if (insn & (1 << 22)) {
08307563 7880 gen_aa32_ld8u(tmp, tmp2, i);
9ee6e8bb 7881 } else {
08307563 7882 gen_aa32_ld32u(tmp, tmp2, i);
9ee6e8bb 7883 }
9ee6e8bb
PB
7884 } else {
7885 /* store */
b0109805 7886 tmp = load_reg(s, rd);
5a839c0d 7887 if (insn & (1 << 22)) {
08307563 7888 gen_aa32_st8(tmp, tmp2, i);
5a839c0d 7889 } else {
08307563 7890 gen_aa32_st32(tmp, tmp2, i);
5a839c0d
PM
7891 }
7892 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7893 }
7894 if (!(insn & (1 << 24))) {
b0109805
PB
7895 gen_add_data_offset(s, insn, tmp2);
7896 store_reg(s, rn, tmp2);
7897 } else if (insn & (1 << 21)) {
7898 store_reg(s, rn, tmp2);
7899 } else {
7d1b0095 7900 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7901 }
7902 if (insn & (1 << 20)) {
7903 /* Complete the load. */
be5e7a76 7904 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
7905 }
7906 break;
7907 case 0x08:
7908 case 0x09:
7909 {
7910 int j, n, user, loaded_base;
39d5492a 7911 TCGv_i32 loaded_var;
9ee6e8bb
PB
7912 /* load/store multiple words */
7913 /* XXX: store correct base if write back */
7914 user = 0;
7915 if (insn & (1 << 22)) {
7916 if (IS_USER(s))
7917 goto illegal_op; /* only usable in supervisor mode */
7918
7919 if ((insn & (1 << 15)) == 0)
7920 user = 1;
7921 }
7922 rn = (insn >> 16) & 0xf;
b0109805 7923 addr = load_reg(s, rn);
9ee6e8bb
PB
7924
7925 /* compute total size */
7926 loaded_base = 0;
39d5492a 7927 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
7928 n = 0;
7929 for(i=0;i<16;i++) {
7930 if (insn & (1 << i))
7931 n++;
7932 }
7933 /* XXX: test invalid n == 0 case ? */
7934 if (insn & (1 << 23)) {
7935 if (insn & (1 << 24)) {
7936 /* pre increment */
b0109805 7937 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7938 } else {
7939 /* post increment */
7940 }
7941 } else {
7942 if (insn & (1 << 24)) {
7943 /* pre decrement */
b0109805 7944 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7945 } else {
7946 /* post decrement */
7947 if (n != 1)
b0109805 7948 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7949 }
7950 }
7951 j = 0;
7952 for(i=0;i<16;i++) {
7953 if (insn & (1 << i)) {
7954 if (insn & (1 << 20)) {
7955 /* load */
5a839c0d 7956 tmp = tcg_temp_new_i32();
08307563 7957 gen_aa32_ld32u(tmp, addr, IS_USER(s));
be5e7a76 7958 if (user) {
b75263d6 7959 tmp2 = tcg_const_i32(i);
1ce94f81 7960 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 7961 tcg_temp_free_i32(tmp2);
7d1b0095 7962 tcg_temp_free_i32(tmp);
9ee6e8bb 7963 } else if (i == rn) {
b0109805 7964 loaded_var = tmp;
9ee6e8bb
PB
7965 loaded_base = 1;
7966 } else {
be5e7a76 7967 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
7968 }
7969 } else {
7970 /* store */
7971 if (i == 15) {
7972 /* special case: r15 = PC + 8 */
7973 val = (long)s->pc + 4;
7d1b0095 7974 tmp = tcg_temp_new_i32();
b0109805 7975 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7976 } else if (user) {
7d1b0095 7977 tmp = tcg_temp_new_i32();
b75263d6 7978 tmp2 = tcg_const_i32(i);
9ef39277 7979 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 7980 tcg_temp_free_i32(tmp2);
9ee6e8bb 7981 } else {
b0109805 7982 tmp = load_reg(s, i);
9ee6e8bb 7983 }
08307563 7984 gen_aa32_st32(tmp, addr, IS_USER(s));
5a839c0d 7985 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7986 }
7987 j++;
7988 /* no need to add after the last transfer */
7989 if (j != n)
b0109805 7990 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7991 }
7992 }
7993 if (insn & (1 << 21)) {
7994 /* write back */
7995 if (insn & (1 << 23)) {
7996 if (insn & (1 << 24)) {
7997 /* pre increment */
7998 } else {
7999 /* post increment */
b0109805 8000 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8001 }
8002 } else {
8003 if (insn & (1 << 24)) {
8004 /* pre decrement */
8005 if (n != 1)
b0109805 8006 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
8007 } else {
8008 /* post decrement */
b0109805 8009 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
8010 }
8011 }
b0109805
PB
8012 store_reg(s, rn, addr);
8013 } else {
7d1b0095 8014 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8015 }
8016 if (loaded_base) {
b0109805 8017 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
8018 }
8019 if ((insn & (1 << 22)) && !user) {
8020 /* Restore CPSR from SPSR. */
d9ba4830
PB
8021 tmp = load_cpu_field(spsr);
8022 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 8023 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8024 s->is_jmp = DISAS_UPDATE;
8025 }
8026 }
8027 break;
8028 case 0xa:
8029 case 0xb:
8030 {
8031 int32_t offset;
8032
8033 /* branch (and link) */
8034 val = (int32_t)s->pc;
8035 if (insn & (1 << 24)) {
7d1b0095 8036 tmp = tcg_temp_new_i32();
5e3f878a
PB
8037 tcg_gen_movi_i32(tmp, val);
8038 store_reg(s, 14, tmp);
9ee6e8bb 8039 }
534df156
PM
8040 offset = sextract32(insn << 2, 0, 26);
8041 val += offset + 4;
9ee6e8bb
PB
8042 gen_jmp(s, val);
8043 }
8044 break;
8045 case 0xc:
8046 case 0xd:
8047 case 0xe:
6a57f3eb
WN
8048 if (((insn >> 8) & 0xe) == 10) {
8049 /* VFP. */
8050 if (disas_vfp_insn(env, s, insn)) {
8051 goto illegal_op;
8052 }
8053 } else if (disas_coproc_insn(env, s, insn)) {
8054 /* Coprocessor. */
9ee6e8bb 8055 goto illegal_op;
6a57f3eb 8056 }
9ee6e8bb
PB
8057 break;
8058 case 0xf:
8059 /* swi */
eaed129d 8060 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
8061 s->is_jmp = DISAS_SWI;
8062 break;
8063 default:
8064 illegal_op:
bc4a0de0 8065 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
8066 break;
8067 }
8068 }
8069}
8070
8071/* Return true if this is a Thumb-2 logical op. */
8072static int
8073thumb2_logic_op(int op)
8074{
8075 return (op < 8);
8076}
8077
8078/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
8079 then set condition code flags based on the result of the operation.
8080 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
8081 to the high bit of T1.
8082 Returns zero if the opcode is valid. */
8083
8084static int
39d5492a
PM
8085gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
8086 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
8087{
8088 int logic_cc;
8089
8090 logic_cc = 0;
8091 switch (op) {
8092 case 0: /* and */
396e467c 8093 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
8094 logic_cc = conds;
8095 break;
8096 case 1: /* bic */
f669df27 8097 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
8098 logic_cc = conds;
8099 break;
8100 case 2: /* orr */
396e467c 8101 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
8102 logic_cc = conds;
8103 break;
8104 case 3: /* orn */
29501f1b 8105 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
8106 logic_cc = conds;
8107 break;
8108 case 4: /* eor */
396e467c 8109 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
8110 logic_cc = conds;
8111 break;
8112 case 8: /* add */
8113 if (conds)
72485ec4 8114 gen_add_CC(t0, t0, t1);
9ee6e8bb 8115 else
396e467c 8116 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
8117 break;
8118 case 10: /* adc */
8119 if (conds)
49b4c31e 8120 gen_adc_CC(t0, t0, t1);
9ee6e8bb 8121 else
396e467c 8122 gen_adc(t0, t1);
9ee6e8bb
PB
8123 break;
8124 case 11: /* sbc */
2de68a49
RH
8125 if (conds) {
8126 gen_sbc_CC(t0, t0, t1);
8127 } else {
396e467c 8128 gen_sub_carry(t0, t0, t1);
2de68a49 8129 }
9ee6e8bb
PB
8130 break;
8131 case 13: /* sub */
8132 if (conds)
72485ec4 8133 gen_sub_CC(t0, t0, t1);
9ee6e8bb 8134 else
396e467c 8135 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
8136 break;
8137 case 14: /* rsb */
8138 if (conds)
72485ec4 8139 gen_sub_CC(t0, t1, t0);
9ee6e8bb 8140 else
396e467c 8141 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
8142 break;
8143 default: /* 5, 6, 7, 9, 12, 15. */
8144 return 1;
8145 }
8146 if (logic_cc) {
396e467c 8147 gen_logic_CC(t0);
9ee6e8bb 8148 if (shifter_out)
396e467c 8149 gen_set_CF_bit31(t1);
9ee6e8bb
PB
8150 }
8151 return 0;
8152}
8153
8154/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
8155 is not legal. */
0ecb72a5 8156static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 8157{
b0109805 8158 uint32_t insn, imm, shift, offset;
9ee6e8bb 8159 uint32_t rd, rn, rm, rs;
39d5492a
PM
8160 TCGv_i32 tmp;
8161 TCGv_i32 tmp2;
8162 TCGv_i32 tmp3;
8163 TCGv_i32 addr;
a7812ae4 8164 TCGv_i64 tmp64;
9ee6e8bb
PB
8165 int op;
8166 int shiftop;
8167 int conds;
8168 int logic_cc;
8169
8170 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
8171 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 8172 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
8173 16-bit instructions to get correct prefetch abort behavior. */
8174 insn = insn_hw1;
8175 if ((insn & (1 << 12)) == 0) {
be5e7a76 8176 ARCH(5);
9ee6e8bb
PB
8177 /* Second half of blx. */
8178 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
8179 tmp = load_reg(s, 14);
8180 tcg_gen_addi_i32(tmp, tmp, offset);
8181 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 8182
7d1b0095 8183 tmp2 = tcg_temp_new_i32();
b0109805 8184 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8185 store_reg(s, 14, tmp2);
8186 gen_bx(s, tmp);
9ee6e8bb
PB
8187 return 0;
8188 }
8189 if (insn & (1 << 11)) {
8190 /* Second half of bl. */
8191 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 8192 tmp = load_reg(s, 14);
6a0d8a1d 8193 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 8194
7d1b0095 8195 tmp2 = tcg_temp_new_i32();
b0109805 8196 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8197 store_reg(s, 14, tmp2);
8198 gen_bx(s, tmp);
9ee6e8bb
PB
8199 return 0;
8200 }
8201 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
8202 /* Instruction spans a page boundary. Implement it as two
8203 16-bit instructions in case the second half causes an
8204 prefetch abort. */
8205 offset = ((int32_t)insn << 21) >> 9;
396e467c 8206 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
8207 return 0;
8208 }
8209 /* Fall through to 32-bit decode. */
8210 }
8211
d31dd73e 8212 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
8213 s->pc += 2;
8214 insn |= (uint32_t)insn_hw1 << 16;
8215
8216 if ((insn & 0xf800e800) != 0xf000e800) {
8217 ARCH(6T2);
8218 }
8219
8220 rn = (insn >> 16) & 0xf;
8221 rs = (insn >> 12) & 0xf;
8222 rd = (insn >> 8) & 0xf;
8223 rm = insn & 0xf;
8224 switch ((insn >> 25) & 0xf) {
8225 case 0: case 1: case 2: case 3:
8226 /* 16-bit instructions. Should never happen. */
8227 abort();
8228 case 4:
8229 if (insn & (1 << 22)) {
8230 /* Other load/store, table branch. */
8231 if (insn & 0x01200000) {
8232 /* Load/store doubleword. */
8233 if (rn == 15) {
7d1b0095 8234 addr = tcg_temp_new_i32();
b0109805 8235 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 8236 } else {
b0109805 8237 addr = load_reg(s, rn);
9ee6e8bb
PB
8238 }
8239 offset = (insn & 0xff) * 4;
8240 if ((insn & (1 << 23)) == 0)
8241 offset = -offset;
8242 if (insn & (1 << 24)) {
b0109805 8243 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
8244 offset = 0;
8245 }
8246 if (insn & (1 << 20)) {
8247 /* ldrd */
e2592fad 8248 tmp = tcg_temp_new_i32();
08307563 8249 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805
PB
8250 store_reg(s, rs, tmp);
8251 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 8252 tmp = tcg_temp_new_i32();
08307563 8253 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 8254 store_reg(s, rd, tmp);
9ee6e8bb
PB
8255 } else {
8256 /* strd */
b0109805 8257 tmp = load_reg(s, rs);
08307563 8258 gen_aa32_st32(tmp, addr, IS_USER(s));
e2592fad 8259 tcg_temp_free_i32(tmp);
b0109805
PB
8260 tcg_gen_addi_i32(addr, addr, 4);
8261 tmp = load_reg(s, rd);
08307563 8262 gen_aa32_st32(tmp, addr, IS_USER(s));
e2592fad 8263 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8264 }
8265 if (insn & (1 << 21)) {
8266 /* Base writeback. */
8267 if (rn == 15)
8268 goto illegal_op;
b0109805
PB
8269 tcg_gen_addi_i32(addr, addr, offset - 4);
8270 store_reg(s, rn, addr);
8271 } else {
7d1b0095 8272 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8273 }
8274 } else if ((insn & (1 << 23)) == 0) {
8275 /* Load/store exclusive word. */
39d5492a 8276 addr = tcg_temp_local_new_i32();
98a46317 8277 load_reg_var(s, addr, rn);
426f5abc 8278 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 8279 if (insn & (1 << 20)) {
426f5abc 8280 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 8281 } else {
426f5abc 8282 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 8283 }
39d5492a 8284 tcg_temp_free_i32(addr);
2359bf80 8285 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
8286 /* Table Branch. */
8287 if (rn == 15) {
7d1b0095 8288 addr = tcg_temp_new_i32();
b0109805 8289 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 8290 } else {
b0109805 8291 addr = load_reg(s, rn);
9ee6e8bb 8292 }
b26eefb6 8293 tmp = load_reg(s, rm);
b0109805 8294 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
8295 if (insn & (1 << 4)) {
8296 /* tbh */
b0109805 8297 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8298 tcg_temp_free_i32(tmp);
e2592fad 8299 tmp = tcg_temp_new_i32();
08307563 8300 gen_aa32_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb 8301 } else { /* tbb */
7d1b0095 8302 tcg_temp_free_i32(tmp);
e2592fad 8303 tmp = tcg_temp_new_i32();
08307563 8304 gen_aa32_ld8u(tmp, addr, IS_USER(s));
9ee6e8bb 8305 }
7d1b0095 8306 tcg_temp_free_i32(addr);
b0109805
PB
8307 tcg_gen_shli_i32(tmp, tmp, 1);
8308 tcg_gen_addi_i32(tmp, tmp, s->pc);
8309 store_reg(s, 15, tmp);
9ee6e8bb 8310 } else {
2359bf80 8311 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 8312 op = (insn >> 4) & 0x3;
2359bf80
MR
8313 switch (op2) {
8314 case 0:
426f5abc 8315 goto illegal_op;
2359bf80
MR
8316 case 1:
8317 /* Load/store exclusive byte/halfword/doubleword */
8318 if (op == 2) {
8319 goto illegal_op;
8320 }
8321 ARCH(7);
8322 break;
8323 case 2:
8324 /* Load-acquire/store-release */
8325 if (op == 3) {
8326 goto illegal_op;
8327 }
8328 /* Fall through */
8329 case 3:
8330 /* Load-acquire/store-release exclusive */
8331 ARCH(8);
8332 break;
426f5abc 8333 }
39d5492a 8334 addr = tcg_temp_local_new_i32();
98a46317 8335 load_reg_var(s, addr, rn);
2359bf80
MR
8336 if (!(op2 & 1)) {
8337 if (insn & (1 << 20)) {
8338 tmp = tcg_temp_new_i32();
8339 switch (op) {
8340 case 0: /* ldab */
08307563 8341 gen_aa32_ld8u(tmp, addr, IS_USER(s));
2359bf80
MR
8342 break;
8343 case 1: /* ldah */
08307563 8344 gen_aa32_ld16u(tmp, addr, IS_USER(s));
2359bf80
MR
8345 break;
8346 case 2: /* lda */
08307563 8347 gen_aa32_ld32u(tmp, addr, IS_USER(s));
2359bf80
MR
8348 break;
8349 default:
8350 abort();
8351 }
8352 store_reg(s, rs, tmp);
8353 } else {
8354 tmp = load_reg(s, rs);
8355 switch (op) {
8356 case 0: /* stlb */
08307563 8357 gen_aa32_st8(tmp, addr, IS_USER(s));
2359bf80
MR
8358 break;
8359 case 1: /* stlh */
08307563 8360 gen_aa32_st16(tmp, addr, IS_USER(s));
2359bf80
MR
8361 break;
8362 case 2: /* stl */
08307563 8363 gen_aa32_st32(tmp, addr, IS_USER(s));
2359bf80
MR
8364 break;
8365 default:
8366 abort();
8367 }
8368 tcg_temp_free_i32(tmp);
8369 }
8370 } else if (insn & (1 << 20)) {
426f5abc 8371 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 8372 } else {
426f5abc 8373 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 8374 }
39d5492a 8375 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8376 }
8377 } else {
8378 /* Load/store multiple, RFE, SRS. */
8379 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976
PM
8380 /* RFE, SRS: not available in user mode or on M profile */
8381 if (IS_USER(s) || IS_M(env)) {
9ee6e8bb 8382 goto illegal_op;
00115976 8383 }
9ee6e8bb
PB
8384 if (insn & (1 << 20)) {
8385 /* rfe */
b0109805
PB
8386 addr = load_reg(s, rn);
8387 if ((insn & (1 << 24)) == 0)
8388 tcg_gen_addi_i32(addr, addr, -8);
8389 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 8390 tmp = tcg_temp_new_i32();
08307563 8391 gen_aa32_ld32u(tmp, addr, 0);
b0109805 8392 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 8393 tmp2 = tcg_temp_new_i32();
08307563 8394 gen_aa32_ld32u(tmp2, addr, 0);
9ee6e8bb
PB
8395 if (insn & (1 << 21)) {
8396 /* Base writeback. */
b0109805
PB
8397 if (insn & (1 << 24)) {
8398 tcg_gen_addi_i32(addr, addr, 4);
8399 } else {
8400 tcg_gen_addi_i32(addr, addr, -4);
8401 }
8402 store_reg(s, rn, addr);
8403 } else {
7d1b0095 8404 tcg_temp_free_i32(addr);
9ee6e8bb 8405 }
b0109805 8406 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
8407 } else {
8408 /* srs */
81465888
PM
8409 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
8410 insn & (1 << 21));
9ee6e8bb
PB
8411 }
8412 } else {
5856d44e 8413 int i, loaded_base = 0;
39d5492a 8414 TCGv_i32 loaded_var;
9ee6e8bb 8415 /* Load/store multiple. */
b0109805 8416 addr = load_reg(s, rn);
9ee6e8bb
PB
8417 offset = 0;
8418 for (i = 0; i < 16; i++) {
8419 if (insn & (1 << i))
8420 offset += 4;
8421 }
8422 if (insn & (1 << 24)) {
b0109805 8423 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8424 }
8425
39d5492a 8426 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
8427 for (i = 0; i < 16; i++) {
8428 if ((insn & (1 << i)) == 0)
8429 continue;
8430 if (insn & (1 << 20)) {
8431 /* Load. */
e2592fad 8432 tmp = tcg_temp_new_i32();
08307563 8433 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb 8434 if (i == 15) {
b0109805 8435 gen_bx(s, tmp);
5856d44e
YO
8436 } else if (i == rn) {
8437 loaded_var = tmp;
8438 loaded_base = 1;
9ee6e8bb 8439 } else {
b0109805 8440 store_reg(s, i, tmp);
9ee6e8bb
PB
8441 }
8442 } else {
8443 /* Store. */
b0109805 8444 tmp = load_reg(s, i);
08307563 8445 gen_aa32_st32(tmp, addr, IS_USER(s));
e2592fad 8446 tcg_temp_free_i32(tmp);
9ee6e8bb 8447 }
b0109805 8448 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8449 }
5856d44e
YO
8450 if (loaded_base) {
8451 store_reg(s, rn, loaded_var);
8452 }
9ee6e8bb
PB
8453 if (insn & (1 << 21)) {
8454 /* Base register writeback. */
8455 if (insn & (1 << 24)) {
b0109805 8456 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8457 }
8458 /* Fault if writeback register is in register list. */
8459 if (insn & (1 << rn))
8460 goto illegal_op;
b0109805
PB
8461 store_reg(s, rn, addr);
8462 } else {
7d1b0095 8463 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8464 }
8465 }
8466 }
8467 break;
2af9ab77
JB
8468 case 5:
8469
9ee6e8bb 8470 op = (insn >> 21) & 0xf;
2af9ab77
JB
8471 if (op == 6) {
8472 /* Halfword pack. */
8473 tmp = load_reg(s, rn);
8474 tmp2 = load_reg(s, rm);
8475 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8476 if (insn & (1 << 5)) {
8477 /* pkhtb */
8478 if (shift == 0)
8479 shift = 31;
8480 tcg_gen_sari_i32(tmp2, tmp2, shift);
8481 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8482 tcg_gen_ext16u_i32(tmp2, tmp2);
8483 } else {
8484 /* pkhbt */
8485 if (shift)
8486 tcg_gen_shli_i32(tmp2, tmp2, shift);
8487 tcg_gen_ext16u_i32(tmp, tmp);
8488 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8489 }
8490 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8491 tcg_temp_free_i32(tmp2);
3174f8e9
FN
8492 store_reg(s, rd, tmp);
8493 } else {
2af9ab77
JB
8494 /* Data processing register constant shift. */
8495 if (rn == 15) {
7d1b0095 8496 tmp = tcg_temp_new_i32();
2af9ab77
JB
8497 tcg_gen_movi_i32(tmp, 0);
8498 } else {
8499 tmp = load_reg(s, rn);
8500 }
8501 tmp2 = load_reg(s, rm);
8502
8503 shiftop = (insn >> 4) & 3;
8504 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8505 conds = (insn & (1 << 20)) != 0;
8506 logic_cc = (conds && thumb2_logic_op(op));
8507 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8508 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8509 goto illegal_op;
7d1b0095 8510 tcg_temp_free_i32(tmp2);
2af9ab77
JB
8511 if (rd != 15) {
8512 store_reg(s, rd, tmp);
8513 } else {
7d1b0095 8514 tcg_temp_free_i32(tmp);
2af9ab77 8515 }
3174f8e9 8516 }
9ee6e8bb
PB
8517 break;
8518 case 13: /* Misc data processing. */
8519 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8520 if (op < 4 && (insn & 0xf000) != 0xf000)
8521 goto illegal_op;
8522 switch (op) {
8523 case 0: /* Register controlled shift. */
8984bd2e
PB
8524 tmp = load_reg(s, rn);
8525 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8526 if ((insn & 0x70) != 0)
8527 goto illegal_op;
8528 op = (insn >> 21) & 3;
8984bd2e
PB
8529 logic_cc = (insn & (1 << 20)) != 0;
8530 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8531 if (logic_cc)
8532 gen_logic_CC(tmp);
21aeb343 8533 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8534 break;
8535 case 1: /* Sign/zero extend. */
5e3f878a 8536 tmp = load_reg(s, rm);
9ee6e8bb 8537 shift = (insn >> 4) & 3;
1301f322 8538 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8539 rotate, a shift is sufficient. */
8540 if (shift != 0)
f669df27 8541 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8542 op = (insn >> 20) & 7;
8543 switch (op) {
5e3f878a
PB
8544 case 0: gen_sxth(tmp); break;
8545 case 1: gen_uxth(tmp); break;
8546 case 2: gen_sxtb16(tmp); break;
8547 case 3: gen_uxtb16(tmp); break;
8548 case 4: gen_sxtb(tmp); break;
8549 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
8550 default: goto illegal_op;
8551 }
8552 if (rn != 15) {
5e3f878a 8553 tmp2 = load_reg(s, rn);
9ee6e8bb 8554 if ((op >> 1) == 1) {
5e3f878a 8555 gen_add16(tmp, tmp2);
9ee6e8bb 8556 } else {
5e3f878a 8557 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8558 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8559 }
8560 }
5e3f878a 8561 store_reg(s, rd, tmp);
9ee6e8bb
PB
8562 break;
8563 case 2: /* SIMD add/subtract. */
8564 op = (insn >> 20) & 7;
8565 shift = (insn >> 4) & 7;
8566 if ((op & 3) == 3 || (shift & 3) == 3)
8567 goto illegal_op;
6ddbc6e4
PB
8568 tmp = load_reg(s, rn);
8569 tmp2 = load_reg(s, rm);
8570 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 8571 tcg_temp_free_i32(tmp2);
6ddbc6e4 8572 store_reg(s, rd, tmp);
9ee6e8bb
PB
8573 break;
8574 case 3: /* Other data processing. */
8575 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8576 if (op < 4) {
8577 /* Saturating add/subtract. */
d9ba4830
PB
8578 tmp = load_reg(s, rn);
8579 tmp2 = load_reg(s, rm);
9ee6e8bb 8580 if (op & 1)
9ef39277 8581 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 8582 if (op & 2)
9ef39277 8583 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 8584 else
9ef39277 8585 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8586 tcg_temp_free_i32(tmp2);
9ee6e8bb 8587 } else {
d9ba4830 8588 tmp = load_reg(s, rn);
9ee6e8bb
PB
8589 switch (op) {
8590 case 0x0a: /* rbit */
d9ba4830 8591 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8592 break;
8593 case 0x08: /* rev */
66896cb8 8594 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
8595 break;
8596 case 0x09: /* rev16 */
d9ba4830 8597 gen_rev16(tmp);
9ee6e8bb
PB
8598 break;
8599 case 0x0b: /* revsh */
d9ba4830 8600 gen_revsh(tmp);
9ee6e8bb
PB
8601 break;
8602 case 0x10: /* sel */
d9ba4830 8603 tmp2 = load_reg(s, rm);
7d1b0095 8604 tmp3 = tcg_temp_new_i32();
0ecb72a5 8605 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 8606 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8607 tcg_temp_free_i32(tmp3);
8608 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8609 break;
8610 case 0x18: /* clz */
d9ba4830 8611 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
8612 break;
8613 default:
8614 goto illegal_op;
8615 }
8616 }
d9ba4830 8617 store_reg(s, rd, tmp);
9ee6e8bb
PB
8618 break;
8619 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8620 op = (insn >> 4) & 0xf;
d9ba4830
PB
8621 tmp = load_reg(s, rn);
8622 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8623 switch ((insn >> 20) & 7) {
8624 case 0: /* 32 x 32 -> 32 */
d9ba4830 8625 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8626 tcg_temp_free_i32(tmp2);
9ee6e8bb 8627 if (rs != 15) {
d9ba4830 8628 tmp2 = load_reg(s, rs);
9ee6e8bb 8629 if (op)
d9ba4830 8630 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 8631 else
d9ba4830 8632 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8633 tcg_temp_free_i32(tmp2);
9ee6e8bb 8634 }
9ee6e8bb
PB
8635 break;
8636 case 1: /* 16 x 16 -> 32 */
d9ba4830 8637 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8638 tcg_temp_free_i32(tmp2);
9ee6e8bb 8639 if (rs != 15) {
d9ba4830 8640 tmp2 = load_reg(s, rs);
9ef39277 8641 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8642 tcg_temp_free_i32(tmp2);
9ee6e8bb 8643 }
9ee6e8bb
PB
8644 break;
8645 case 2: /* Dual multiply add. */
8646 case 4: /* Dual multiply subtract. */
8647 if (op)
d9ba4830
PB
8648 gen_swap_half(tmp2);
8649 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8650 if (insn & (1 << 22)) {
e1d177b9 8651 /* This subtraction cannot overflow. */
d9ba4830 8652 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8653 } else {
e1d177b9
PM
8654 /* This addition cannot overflow 32 bits;
8655 * however it may overflow considered as a signed
8656 * operation, in which case we must set the Q flag.
8657 */
9ef39277 8658 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8659 }
7d1b0095 8660 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8661 if (rs != 15)
8662 {
d9ba4830 8663 tmp2 = load_reg(s, rs);
9ef39277 8664 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8665 tcg_temp_free_i32(tmp2);
9ee6e8bb 8666 }
9ee6e8bb
PB
8667 break;
8668 case 3: /* 32 * 16 -> 32msb */
8669 if (op)
d9ba4830 8670 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8671 else
d9ba4830 8672 gen_sxth(tmp2);
a7812ae4
PB
8673 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8674 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8675 tmp = tcg_temp_new_i32();
a7812ae4 8676 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 8677 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8678 if (rs != 15)
8679 {
d9ba4830 8680 tmp2 = load_reg(s, rs);
9ef39277 8681 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8682 tcg_temp_free_i32(tmp2);
9ee6e8bb 8683 }
9ee6e8bb 8684 break;
838fa72d
AJ
8685 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8686 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8687 if (rs != 15) {
838fa72d
AJ
8688 tmp = load_reg(s, rs);
8689 if (insn & (1 << 20)) {
8690 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 8691 } else {
838fa72d 8692 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 8693 }
2c0262af 8694 }
838fa72d
AJ
8695 if (insn & (1 << 4)) {
8696 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8697 }
8698 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8699 tmp = tcg_temp_new_i32();
838fa72d
AJ
8700 tcg_gen_trunc_i64_i32(tmp, tmp64);
8701 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8702 break;
8703 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 8704 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8705 tcg_temp_free_i32(tmp2);
9ee6e8bb 8706 if (rs != 15) {
d9ba4830
PB
8707 tmp2 = load_reg(s, rs);
8708 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8709 tcg_temp_free_i32(tmp2);
5fd46862 8710 }
9ee6e8bb 8711 break;
2c0262af 8712 }
d9ba4830 8713 store_reg(s, rd, tmp);
2c0262af 8714 break;
9ee6e8bb
PB
8715 case 6: case 7: /* 64-bit multiply, Divide. */
8716 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
8717 tmp = load_reg(s, rn);
8718 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8719 if ((op & 0x50) == 0x10) {
8720 /* sdiv, udiv */
47789990 8721 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 8722 goto illegal_op;
47789990 8723 }
9ee6e8bb 8724 if (op & 0x20)
5e3f878a 8725 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 8726 else
5e3f878a 8727 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 8728 tcg_temp_free_i32(tmp2);
5e3f878a 8729 store_reg(s, rd, tmp);
9ee6e8bb
PB
8730 } else if ((op & 0xe) == 0xc) {
8731 /* Dual multiply accumulate long. */
8732 if (op & 1)
5e3f878a
PB
8733 gen_swap_half(tmp2);
8734 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8735 if (op & 0x10) {
5e3f878a 8736 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 8737 } else {
5e3f878a 8738 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 8739 }
7d1b0095 8740 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8741 /* BUGFIX */
8742 tmp64 = tcg_temp_new_i64();
8743 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8744 tcg_temp_free_i32(tmp);
a7812ae4
PB
8745 gen_addq(s, tmp64, rs, rd);
8746 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8747 tcg_temp_free_i64(tmp64);
2c0262af 8748 } else {
9ee6e8bb
PB
8749 if (op & 0x20) {
8750 /* Unsigned 64-bit multiply */
a7812ae4 8751 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 8752 } else {
9ee6e8bb
PB
8753 if (op & 8) {
8754 /* smlalxy */
5e3f878a 8755 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8756 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8757 tmp64 = tcg_temp_new_i64();
8758 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8759 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8760 } else {
8761 /* Signed 64-bit multiply */
a7812ae4 8762 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8763 }
b5ff1b31 8764 }
9ee6e8bb
PB
8765 if (op & 4) {
8766 /* umaal */
a7812ae4
PB
8767 gen_addq_lo(s, tmp64, rs);
8768 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
8769 } else if (op & 0x40) {
8770 /* 64-bit accumulate. */
a7812ae4 8771 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 8772 }
a7812ae4 8773 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8774 tcg_temp_free_i64(tmp64);
5fd46862 8775 }
2c0262af 8776 break;
9ee6e8bb
PB
8777 }
8778 break;
8779 case 6: case 7: case 14: case 15:
8780 /* Coprocessor. */
8781 if (((insn >> 24) & 3) == 3) {
8782 /* Translate into the equivalent ARM encoding. */
f06053e3 8783 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
8784 if (disas_neon_data_insn(env, s, insn))
8785 goto illegal_op;
6a57f3eb
WN
8786 } else if (((insn >> 8) & 0xe) == 10) {
8787 if (disas_vfp_insn(env, s, insn)) {
8788 goto illegal_op;
8789 }
9ee6e8bb
PB
8790 } else {
8791 if (insn & (1 << 28))
8792 goto illegal_op;
8793 if (disas_coproc_insn (env, s, insn))
8794 goto illegal_op;
8795 }
8796 break;
8797 case 8: case 9: case 10: case 11:
8798 if (insn & (1 << 15)) {
8799 /* Branches, misc control. */
8800 if (insn & 0x5000) {
8801 /* Unconditional branch. */
8802 /* signextend(hw1[10:0]) -> offset[:12]. */
8803 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8804 /* hw1[10:0] -> offset[11:1]. */
8805 offset |= (insn & 0x7ff) << 1;
8806 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8807 offset[24:22] already have the same value because of the
8808 sign extension above. */
8809 offset ^= ((~insn) & (1 << 13)) << 10;
8810 offset ^= ((~insn) & (1 << 11)) << 11;
8811
9ee6e8bb
PB
8812 if (insn & (1 << 14)) {
8813 /* Branch and link. */
3174f8e9 8814 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 8815 }
3b46e624 8816
b0109805 8817 offset += s->pc;
9ee6e8bb
PB
8818 if (insn & (1 << 12)) {
8819 /* b/bl */
b0109805 8820 gen_jmp(s, offset);
9ee6e8bb
PB
8821 } else {
8822 /* blx */
b0109805 8823 offset &= ~(uint32_t)2;
be5e7a76 8824 /* thumb2 bx, no need to check */
b0109805 8825 gen_bx_im(s, offset);
2c0262af 8826 }
9ee6e8bb
PB
8827 } else if (((insn >> 23) & 7) == 7) {
8828 /* Misc control */
8829 if (insn & (1 << 13))
8830 goto illegal_op;
8831
8832 if (insn & (1 << 26)) {
8833 /* Secure monitor call (v6Z) */
e0c270d9
SW
8834 qemu_log_mask(LOG_UNIMP,
8835 "arm: unimplemented secure monitor call\n");
9ee6e8bb 8836 goto illegal_op; /* not implemented. */
2c0262af 8837 } else {
9ee6e8bb
PB
8838 op = (insn >> 20) & 7;
8839 switch (op) {
8840 case 0: /* msr cpsr. */
8841 if (IS_M(env)) {
8984bd2e
PB
8842 tmp = load_reg(s, rn);
8843 addr = tcg_const_i32(insn & 0xff);
8844 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8845 tcg_temp_free_i32(addr);
7d1b0095 8846 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8847 gen_lookup_tb(s);
8848 break;
8849 }
8850 /* fall through */
8851 case 1: /* msr spsr. */
8852 if (IS_M(env))
8853 goto illegal_op;
2fbac54b
FN
8854 tmp = load_reg(s, rn);
8855 if (gen_set_psr(s,
9ee6e8bb 8856 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8857 op == 1, tmp))
9ee6e8bb
PB
8858 goto illegal_op;
8859 break;
8860 case 2: /* cps, nop-hint. */
8861 if (((insn >> 8) & 7) == 0) {
8862 gen_nop_hint(s, insn & 0xff);
8863 }
8864 /* Implemented as NOP in user mode. */
8865 if (IS_USER(s))
8866 break;
8867 offset = 0;
8868 imm = 0;
8869 if (insn & (1 << 10)) {
8870 if (insn & (1 << 7))
8871 offset |= CPSR_A;
8872 if (insn & (1 << 6))
8873 offset |= CPSR_I;
8874 if (insn & (1 << 5))
8875 offset |= CPSR_F;
8876 if (insn & (1 << 9))
8877 imm = CPSR_A | CPSR_I | CPSR_F;
8878 }
8879 if (insn & (1 << 8)) {
8880 offset |= 0x1f;
8881 imm |= (insn & 0x1f);
8882 }
8883 if (offset) {
2fbac54b 8884 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8885 }
8886 break;
8887 case 3: /* Special control operations. */
426f5abc 8888 ARCH(7);
9ee6e8bb
PB
8889 op = (insn >> 4) & 0xf;
8890 switch (op) {
8891 case 2: /* clrex */
426f5abc 8892 gen_clrex(s);
9ee6e8bb
PB
8893 break;
8894 case 4: /* dsb */
8895 case 5: /* dmb */
8896 case 6: /* isb */
8897 /* These execute as NOPs. */
9ee6e8bb
PB
8898 break;
8899 default:
8900 goto illegal_op;
8901 }
8902 break;
8903 case 4: /* bxj */
8904 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8905 tmp = load_reg(s, rn);
8906 gen_bx(s, tmp);
9ee6e8bb
PB
8907 break;
8908 case 5: /* Exception return. */
b8b45b68
RV
8909 if (IS_USER(s)) {
8910 goto illegal_op;
8911 }
8912 if (rn != 14 || rd != 15) {
8913 goto illegal_op;
8914 }
8915 tmp = load_reg(s, rn);
8916 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8917 gen_exception_return(s, tmp);
8918 break;
9ee6e8bb 8919 case 6: /* mrs cpsr. */
7d1b0095 8920 tmp = tcg_temp_new_i32();
9ee6e8bb 8921 if (IS_M(env)) {
8984bd2e
PB
8922 addr = tcg_const_i32(insn & 0xff);
8923 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8924 tcg_temp_free_i32(addr);
9ee6e8bb 8925 } else {
9ef39277 8926 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8927 }
8984bd2e 8928 store_reg(s, rd, tmp);
9ee6e8bb
PB
8929 break;
8930 case 7: /* mrs spsr. */
8931 /* Not accessible in user mode. */
8932 if (IS_USER(s) || IS_M(env))
8933 goto illegal_op;
d9ba4830
PB
8934 tmp = load_cpu_field(spsr);
8935 store_reg(s, rd, tmp);
9ee6e8bb 8936 break;
2c0262af
FB
8937 }
8938 }
9ee6e8bb
PB
8939 } else {
8940 /* Conditional branch. */
8941 op = (insn >> 22) & 0xf;
8942 /* Generate a conditional jump to next instruction. */
8943 s->condlabel = gen_new_label();
d9ba4830 8944 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8945 s->condjmp = 1;
8946
8947 /* offset[11:1] = insn[10:0] */
8948 offset = (insn & 0x7ff) << 1;
8949 /* offset[17:12] = insn[21:16]. */
8950 offset |= (insn & 0x003f0000) >> 4;
8951 /* offset[31:20] = insn[26]. */
8952 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8953 /* offset[18] = insn[13]. */
8954 offset |= (insn & (1 << 13)) << 5;
8955 /* offset[19] = insn[11]. */
8956 offset |= (insn & (1 << 11)) << 8;
8957
8958 /* jump to the offset */
b0109805 8959 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8960 }
8961 } else {
8962 /* Data processing immediate. */
8963 if (insn & (1 << 25)) {
8964 if (insn & (1 << 24)) {
8965 if (insn & (1 << 20))
8966 goto illegal_op;
8967 /* Bitfield/Saturate. */
8968 op = (insn >> 21) & 7;
8969 imm = insn & 0x1f;
8970 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 8971 if (rn == 15) {
7d1b0095 8972 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
8973 tcg_gen_movi_i32(tmp, 0);
8974 } else {
8975 tmp = load_reg(s, rn);
8976 }
9ee6e8bb
PB
8977 switch (op) {
8978 case 2: /* Signed bitfield extract. */
8979 imm++;
8980 if (shift + imm > 32)
8981 goto illegal_op;
8982 if (imm < 32)
6ddbc6e4 8983 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8984 break;
8985 case 6: /* Unsigned bitfield extract. */
8986 imm++;
8987 if (shift + imm > 32)
8988 goto illegal_op;
8989 if (imm < 32)
6ddbc6e4 8990 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8991 break;
8992 case 3: /* Bitfield insert/clear. */
8993 if (imm < shift)
8994 goto illegal_op;
8995 imm = imm + 1 - shift;
8996 if (imm != 32) {
6ddbc6e4 8997 tmp2 = load_reg(s, rd);
d593c48e 8998 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 8999 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9000 }
9001 break;
9002 case 7:
9003 goto illegal_op;
9004 default: /* Saturate. */
9ee6e8bb
PB
9005 if (shift) {
9006 if (op & 1)
6ddbc6e4 9007 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9008 else
6ddbc6e4 9009 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 9010 }
6ddbc6e4 9011 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
9012 if (op & 4) {
9013 /* Unsigned. */
9ee6e8bb 9014 if ((op & 1) && shift == 0)
9ef39277 9015 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9016 else
9ef39277 9017 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
2c0262af 9018 } else {
9ee6e8bb 9019 /* Signed. */
9ee6e8bb 9020 if ((op & 1) && shift == 0)
9ef39277 9021 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9022 else
9ef39277 9023 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
2c0262af 9024 }
b75263d6 9025 tcg_temp_free_i32(tmp2);
9ee6e8bb 9026 break;
2c0262af 9027 }
6ddbc6e4 9028 store_reg(s, rd, tmp);
9ee6e8bb
PB
9029 } else {
9030 imm = ((insn & 0x04000000) >> 15)
9031 | ((insn & 0x7000) >> 4) | (insn & 0xff);
9032 if (insn & (1 << 22)) {
9033 /* 16-bit immediate. */
9034 imm |= (insn >> 4) & 0xf000;
9035 if (insn & (1 << 23)) {
9036 /* movt */
5e3f878a 9037 tmp = load_reg(s, rd);
86831435 9038 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 9039 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 9040 } else {
9ee6e8bb 9041 /* movw */
7d1b0095 9042 tmp = tcg_temp_new_i32();
5e3f878a 9043 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
9044 }
9045 } else {
9ee6e8bb
PB
9046 /* Add/sub 12-bit immediate. */
9047 if (rn == 15) {
b0109805 9048 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 9049 if (insn & (1 << 23))
b0109805 9050 offset -= imm;
9ee6e8bb 9051 else
b0109805 9052 offset += imm;
7d1b0095 9053 tmp = tcg_temp_new_i32();
5e3f878a 9054 tcg_gen_movi_i32(tmp, offset);
2c0262af 9055 } else {
5e3f878a 9056 tmp = load_reg(s, rn);
9ee6e8bb 9057 if (insn & (1 << 23))
5e3f878a 9058 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 9059 else
5e3f878a 9060 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 9061 }
9ee6e8bb 9062 }
5e3f878a 9063 store_reg(s, rd, tmp);
191abaa2 9064 }
9ee6e8bb
PB
9065 } else {
9066 int shifter_out = 0;
9067 /* modified 12-bit immediate. */
9068 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
9069 imm = (insn & 0xff);
9070 switch (shift) {
9071 case 0: /* XY */
9072 /* Nothing to do. */
9073 break;
9074 case 1: /* 00XY00XY */
9075 imm |= imm << 16;
9076 break;
9077 case 2: /* XY00XY00 */
9078 imm |= imm << 16;
9079 imm <<= 8;
9080 break;
9081 case 3: /* XYXYXYXY */
9082 imm |= imm << 16;
9083 imm |= imm << 8;
9084 break;
9085 default: /* Rotated constant. */
9086 shift = (shift << 1) | (imm >> 7);
9087 imm |= 0x80;
9088 imm = imm << (32 - shift);
9089 shifter_out = 1;
9090 break;
b5ff1b31 9091 }
7d1b0095 9092 tmp2 = tcg_temp_new_i32();
3174f8e9 9093 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 9094 rn = (insn >> 16) & 0xf;
3174f8e9 9095 if (rn == 15) {
7d1b0095 9096 tmp = tcg_temp_new_i32();
3174f8e9
FN
9097 tcg_gen_movi_i32(tmp, 0);
9098 } else {
9099 tmp = load_reg(s, rn);
9100 }
9ee6e8bb
PB
9101 op = (insn >> 21) & 0xf;
9102 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 9103 shifter_out, tmp, tmp2))
9ee6e8bb 9104 goto illegal_op;
7d1b0095 9105 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9106 rd = (insn >> 8) & 0xf;
9107 if (rd != 15) {
3174f8e9
FN
9108 store_reg(s, rd, tmp);
9109 } else {
7d1b0095 9110 tcg_temp_free_i32(tmp);
2c0262af 9111 }
2c0262af 9112 }
9ee6e8bb
PB
9113 }
9114 break;
9115 case 12: /* Load/store single data item. */
9116 {
9117 int postinc = 0;
9118 int writeback = 0;
b0109805 9119 int user;
9ee6e8bb
PB
9120 if ((insn & 0x01100000) == 0x01000000) {
9121 if (disas_neon_ls_insn(env, s, insn))
c1713132 9122 goto illegal_op;
9ee6e8bb
PB
9123 break;
9124 }
a2fdc890
PM
9125 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
9126 if (rs == 15) {
9127 if (!(insn & (1 << 20))) {
9128 goto illegal_op;
9129 }
9130 if (op != 2) {
9131 /* Byte or halfword load space with dest == r15 : memory hints.
9132 * Catch them early so we don't emit pointless addressing code.
9133 * This space is a mix of:
9134 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
9135 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
9136 * cores)
9137 * unallocated hints, which must be treated as NOPs
9138 * UNPREDICTABLE space, which we NOP or UNDEF depending on
9139 * which is easiest for the decoding logic
9140 * Some space which must UNDEF
9141 */
9142 int op1 = (insn >> 23) & 3;
9143 int op2 = (insn >> 6) & 0x3f;
9144 if (op & 2) {
9145 goto illegal_op;
9146 }
9147 if (rn == 15) {
02afbf64
PM
9148 /* UNPREDICTABLE, unallocated hint or
9149 * PLD/PLDW/PLI (literal)
9150 */
a2fdc890
PM
9151 return 0;
9152 }
9153 if (op1 & 1) {
02afbf64 9154 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
9155 }
9156 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 9157 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
9158 }
9159 /* UNDEF space, or an UNPREDICTABLE */
9160 return 1;
9161 }
9162 }
b0109805 9163 user = IS_USER(s);
9ee6e8bb 9164 if (rn == 15) {
7d1b0095 9165 addr = tcg_temp_new_i32();
9ee6e8bb
PB
9166 /* PC relative. */
9167 /* s->pc has already been incremented by 4. */
9168 imm = s->pc & 0xfffffffc;
9169 if (insn & (1 << 23))
9170 imm += insn & 0xfff;
9171 else
9172 imm -= insn & 0xfff;
b0109805 9173 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 9174 } else {
b0109805 9175 addr = load_reg(s, rn);
9ee6e8bb
PB
9176 if (insn & (1 << 23)) {
9177 /* Positive offset. */
9178 imm = insn & 0xfff;
b0109805 9179 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 9180 } else {
9ee6e8bb 9181 imm = insn & 0xff;
2a0308c5
PM
9182 switch ((insn >> 8) & 0xf) {
9183 case 0x0: /* Shifted Register. */
9ee6e8bb 9184 shift = (insn >> 4) & 0xf;
2a0308c5
PM
9185 if (shift > 3) {
9186 tcg_temp_free_i32(addr);
18c9b560 9187 goto illegal_op;
2a0308c5 9188 }
b26eefb6 9189 tmp = load_reg(s, rm);
9ee6e8bb 9190 if (shift)
b26eefb6 9191 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 9192 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9193 tcg_temp_free_i32(tmp);
9ee6e8bb 9194 break;
2a0308c5 9195 case 0xc: /* Negative offset. */
b0109805 9196 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 9197 break;
2a0308c5 9198 case 0xe: /* User privilege. */
b0109805
PB
9199 tcg_gen_addi_i32(addr, addr, imm);
9200 user = 1;
9ee6e8bb 9201 break;
2a0308c5 9202 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
9203 imm = -imm;
9204 /* Fall through. */
2a0308c5 9205 case 0xb: /* Post-increment. */
9ee6e8bb
PB
9206 postinc = 1;
9207 writeback = 1;
9208 break;
2a0308c5 9209 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
9210 imm = -imm;
9211 /* Fall through. */
2a0308c5 9212 case 0xf: /* Pre-increment. */
b0109805 9213 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
9214 writeback = 1;
9215 break;
9216 default:
2a0308c5 9217 tcg_temp_free_i32(addr);
b7bcbe95 9218 goto illegal_op;
9ee6e8bb
PB
9219 }
9220 }
9221 }
9ee6e8bb
PB
9222 if (insn & (1 << 20)) {
9223 /* Load. */
5a839c0d 9224 tmp = tcg_temp_new_i32();
a2fdc890 9225 switch (op) {
5a839c0d 9226 case 0:
08307563 9227 gen_aa32_ld8u(tmp, addr, user);
5a839c0d
PM
9228 break;
9229 case 4:
08307563 9230 gen_aa32_ld8s(tmp, addr, user);
5a839c0d
PM
9231 break;
9232 case 1:
08307563 9233 gen_aa32_ld16u(tmp, addr, user);
5a839c0d
PM
9234 break;
9235 case 5:
08307563 9236 gen_aa32_ld16s(tmp, addr, user);
5a839c0d
PM
9237 break;
9238 case 2:
08307563 9239 gen_aa32_ld32u(tmp, addr, user);
5a839c0d 9240 break;
2a0308c5 9241 default:
5a839c0d 9242 tcg_temp_free_i32(tmp);
2a0308c5
PM
9243 tcg_temp_free_i32(addr);
9244 goto illegal_op;
a2fdc890
PM
9245 }
9246 if (rs == 15) {
9247 gen_bx(s, tmp);
9ee6e8bb 9248 } else {
a2fdc890 9249 store_reg(s, rs, tmp);
9ee6e8bb
PB
9250 }
9251 } else {
9252 /* Store. */
b0109805 9253 tmp = load_reg(s, rs);
9ee6e8bb 9254 switch (op) {
5a839c0d 9255 case 0:
08307563 9256 gen_aa32_st8(tmp, addr, user);
5a839c0d
PM
9257 break;
9258 case 1:
08307563 9259 gen_aa32_st16(tmp, addr, user);
5a839c0d
PM
9260 break;
9261 case 2:
08307563 9262 gen_aa32_st32(tmp, addr, user);
5a839c0d 9263 break;
2a0308c5 9264 default:
5a839c0d 9265 tcg_temp_free_i32(tmp);
2a0308c5
PM
9266 tcg_temp_free_i32(addr);
9267 goto illegal_op;
b7bcbe95 9268 }
5a839c0d 9269 tcg_temp_free_i32(tmp);
2c0262af 9270 }
9ee6e8bb 9271 if (postinc)
b0109805
PB
9272 tcg_gen_addi_i32(addr, addr, imm);
9273 if (writeback) {
9274 store_reg(s, rn, addr);
9275 } else {
7d1b0095 9276 tcg_temp_free_i32(addr);
b0109805 9277 }
9ee6e8bb
PB
9278 }
9279 break;
9280 default:
9281 goto illegal_op;
2c0262af 9282 }
9ee6e8bb
PB
9283 return 0;
9284illegal_op:
9285 return 1;
2c0262af
FB
9286}
9287
0ecb72a5 9288static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
9289{
9290 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9291 int32_t offset;
9292 int i;
39d5492a
PM
9293 TCGv_i32 tmp;
9294 TCGv_i32 tmp2;
9295 TCGv_i32 addr;
99c475ab 9296
9ee6e8bb
PB
9297 if (s->condexec_mask) {
9298 cond = s->condexec_cond;
bedd2912
JB
9299 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9300 s->condlabel = gen_new_label();
9301 gen_test_cc(cond ^ 1, s->condlabel);
9302 s->condjmp = 1;
9303 }
9ee6e8bb
PB
9304 }
9305
d31dd73e 9306 insn = arm_lduw_code(env, s->pc, s->bswap_code);
99c475ab 9307 s->pc += 2;
b5ff1b31 9308
99c475ab
FB
9309 switch (insn >> 12) {
9310 case 0: case 1:
396e467c 9311
99c475ab
FB
9312 rd = insn & 7;
9313 op = (insn >> 11) & 3;
9314 if (op == 3) {
9315 /* add/subtract */
9316 rn = (insn >> 3) & 7;
396e467c 9317 tmp = load_reg(s, rn);
99c475ab
FB
9318 if (insn & (1 << 10)) {
9319 /* immediate */
7d1b0095 9320 tmp2 = tcg_temp_new_i32();
396e467c 9321 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
9322 } else {
9323 /* reg */
9324 rm = (insn >> 6) & 7;
396e467c 9325 tmp2 = load_reg(s, rm);
99c475ab 9326 }
9ee6e8bb
PB
9327 if (insn & (1 << 9)) {
9328 if (s->condexec_mask)
396e467c 9329 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9330 else
72485ec4 9331 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
9332 } else {
9333 if (s->condexec_mask)
396e467c 9334 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 9335 else
72485ec4 9336 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 9337 }
7d1b0095 9338 tcg_temp_free_i32(tmp2);
396e467c 9339 store_reg(s, rd, tmp);
99c475ab
FB
9340 } else {
9341 /* shift immediate */
9342 rm = (insn >> 3) & 7;
9343 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
9344 tmp = load_reg(s, rm);
9345 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9346 if (!s->condexec_mask)
9347 gen_logic_CC(tmp);
9348 store_reg(s, rd, tmp);
99c475ab
FB
9349 }
9350 break;
9351 case 2: case 3:
9352 /* arithmetic large immediate */
9353 op = (insn >> 11) & 3;
9354 rd = (insn >> 8) & 0x7;
396e467c 9355 if (op == 0) { /* mov */
7d1b0095 9356 tmp = tcg_temp_new_i32();
396e467c 9357 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 9358 if (!s->condexec_mask)
396e467c
FN
9359 gen_logic_CC(tmp);
9360 store_reg(s, rd, tmp);
9361 } else {
9362 tmp = load_reg(s, rd);
7d1b0095 9363 tmp2 = tcg_temp_new_i32();
396e467c
FN
9364 tcg_gen_movi_i32(tmp2, insn & 0xff);
9365 switch (op) {
9366 case 1: /* cmp */
72485ec4 9367 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9368 tcg_temp_free_i32(tmp);
9369 tcg_temp_free_i32(tmp2);
396e467c
FN
9370 break;
9371 case 2: /* add */
9372 if (s->condexec_mask)
9373 tcg_gen_add_i32(tmp, tmp, tmp2);
9374 else
72485ec4 9375 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 9376 tcg_temp_free_i32(tmp2);
396e467c
FN
9377 store_reg(s, rd, tmp);
9378 break;
9379 case 3: /* sub */
9380 if (s->condexec_mask)
9381 tcg_gen_sub_i32(tmp, tmp, tmp2);
9382 else
72485ec4 9383 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 9384 tcg_temp_free_i32(tmp2);
396e467c
FN
9385 store_reg(s, rd, tmp);
9386 break;
9387 }
99c475ab 9388 }
99c475ab
FB
9389 break;
9390 case 4:
9391 if (insn & (1 << 11)) {
9392 rd = (insn >> 8) & 7;
5899f386
FB
9393 /* load pc-relative. Bit 1 of PC is ignored. */
9394 val = s->pc + 2 + ((insn & 0xff) * 4);
9395 val &= ~(uint32_t)2;
7d1b0095 9396 addr = tcg_temp_new_i32();
b0109805 9397 tcg_gen_movi_i32(addr, val);
c40c8556 9398 tmp = tcg_temp_new_i32();
08307563 9399 gen_aa32_ld32u(tmp, addr, IS_USER(s));
7d1b0095 9400 tcg_temp_free_i32(addr);
b0109805 9401 store_reg(s, rd, tmp);
99c475ab
FB
9402 break;
9403 }
9404 if (insn & (1 << 10)) {
9405 /* data processing extended or blx */
9406 rd = (insn & 7) | ((insn >> 4) & 8);
9407 rm = (insn >> 3) & 0xf;
9408 op = (insn >> 8) & 3;
9409 switch (op) {
9410 case 0: /* add */
396e467c
FN
9411 tmp = load_reg(s, rd);
9412 tmp2 = load_reg(s, rm);
9413 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9414 tcg_temp_free_i32(tmp2);
396e467c 9415 store_reg(s, rd, tmp);
99c475ab
FB
9416 break;
9417 case 1: /* cmp */
396e467c
FN
9418 tmp = load_reg(s, rd);
9419 tmp2 = load_reg(s, rm);
72485ec4 9420 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9421 tcg_temp_free_i32(tmp2);
9422 tcg_temp_free_i32(tmp);
99c475ab
FB
9423 break;
9424 case 2: /* mov/cpy */
396e467c
FN
9425 tmp = load_reg(s, rm);
9426 store_reg(s, rd, tmp);
99c475ab
FB
9427 break;
9428 case 3:/* branch [and link] exchange thumb register */
b0109805 9429 tmp = load_reg(s, rm);
99c475ab 9430 if (insn & (1 << 7)) {
be5e7a76 9431 ARCH(5);
99c475ab 9432 val = (uint32_t)s->pc | 1;
7d1b0095 9433 tmp2 = tcg_temp_new_i32();
b0109805
PB
9434 tcg_gen_movi_i32(tmp2, val);
9435 store_reg(s, 14, tmp2);
99c475ab 9436 }
be5e7a76 9437 /* already thumb, no need to check */
d9ba4830 9438 gen_bx(s, tmp);
99c475ab
FB
9439 break;
9440 }
9441 break;
9442 }
9443
9444 /* data processing register */
9445 rd = insn & 7;
9446 rm = (insn >> 3) & 7;
9447 op = (insn >> 6) & 0xf;
9448 if (op == 2 || op == 3 || op == 4 || op == 7) {
9449 /* the shift/rotate ops want the operands backwards */
9450 val = rm;
9451 rm = rd;
9452 rd = val;
9453 val = 1;
9454 } else {
9455 val = 0;
9456 }
9457
396e467c 9458 if (op == 9) { /* neg */
7d1b0095 9459 tmp = tcg_temp_new_i32();
396e467c
FN
9460 tcg_gen_movi_i32(tmp, 0);
9461 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9462 tmp = load_reg(s, rd);
9463 } else {
39d5492a 9464 TCGV_UNUSED_I32(tmp);
396e467c 9465 }
99c475ab 9466
396e467c 9467 tmp2 = load_reg(s, rm);
5899f386 9468 switch (op) {
99c475ab 9469 case 0x0: /* and */
396e467c 9470 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 9471 if (!s->condexec_mask)
396e467c 9472 gen_logic_CC(tmp);
99c475ab
FB
9473 break;
9474 case 0x1: /* eor */
396e467c 9475 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 9476 if (!s->condexec_mask)
396e467c 9477 gen_logic_CC(tmp);
99c475ab
FB
9478 break;
9479 case 0x2: /* lsl */
9ee6e8bb 9480 if (s->condexec_mask) {
365af80e 9481 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 9482 } else {
9ef39277 9483 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9484 gen_logic_CC(tmp2);
9ee6e8bb 9485 }
99c475ab
FB
9486 break;
9487 case 0x3: /* lsr */
9ee6e8bb 9488 if (s->condexec_mask) {
365af80e 9489 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 9490 } else {
9ef39277 9491 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9492 gen_logic_CC(tmp2);
9ee6e8bb 9493 }
99c475ab
FB
9494 break;
9495 case 0x4: /* asr */
9ee6e8bb 9496 if (s->condexec_mask) {
365af80e 9497 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 9498 } else {
9ef39277 9499 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9500 gen_logic_CC(tmp2);
9ee6e8bb 9501 }
99c475ab
FB
9502 break;
9503 case 0x5: /* adc */
49b4c31e 9504 if (s->condexec_mask) {
396e467c 9505 gen_adc(tmp, tmp2);
49b4c31e
RH
9506 } else {
9507 gen_adc_CC(tmp, tmp, tmp2);
9508 }
99c475ab
FB
9509 break;
9510 case 0x6: /* sbc */
2de68a49 9511 if (s->condexec_mask) {
396e467c 9512 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
9513 } else {
9514 gen_sbc_CC(tmp, tmp, tmp2);
9515 }
99c475ab
FB
9516 break;
9517 case 0x7: /* ror */
9ee6e8bb 9518 if (s->condexec_mask) {
f669df27
AJ
9519 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9520 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 9521 } else {
9ef39277 9522 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9523 gen_logic_CC(tmp2);
9ee6e8bb 9524 }
99c475ab
FB
9525 break;
9526 case 0x8: /* tst */
396e467c
FN
9527 tcg_gen_and_i32(tmp, tmp, tmp2);
9528 gen_logic_CC(tmp);
99c475ab 9529 rd = 16;
5899f386 9530 break;
99c475ab 9531 case 0x9: /* neg */
9ee6e8bb 9532 if (s->condexec_mask)
396e467c 9533 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 9534 else
72485ec4 9535 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9536 break;
9537 case 0xa: /* cmp */
72485ec4 9538 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9539 rd = 16;
9540 break;
9541 case 0xb: /* cmn */
72485ec4 9542 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
9543 rd = 16;
9544 break;
9545 case 0xc: /* orr */
396e467c 9546 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 9547 if (!s->condexec_mask)
396e467c 9548 gen_logic_CC(tmp);
99c475ab
FB
9549 break;
9550 case 0xd: /* mul */
7b2919a0 9551 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 9552 if (!s->condexec_mask)
396e467c 9553 gen_logic_CC(tmp);
99c475ab
FB
9554 break;
9555 case 0xe: /* bic */
f669df27 9556 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 9557 if (!s->condexec_mask)
396e467c 9558 gen_logic_CC(tmp);
99c475ab
FB
9559 break;
9560 case 0xf: /* mvn */
396e467c 9561 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 9562 if (!s->condexec_mask)
396e467c 9563 gen_logic_CC(tmp2);
99c475ab 9564 val = 1;
5899f386 9565 rm = rd;
99c475ab
FB
9566 break;
9567 }
9568 if (rd != 16) {
396e467c
FN
9569 if (val) {
9570 store_reg(s, rm, tmp2);
9571 if (op != 0xf)
7d1b0095 9572 tcg_temp_free_i32(tmp);
396e467c
FN
9573 } else {
9574 store_reg(s, rd, tmp);
7d1b0095 9575 tcg_temp_free_i32(tmp2);
396e467c
FN
9576 }
9577 } else {
7d1b0095
PM
9578 tcg_temp_free_i32(tmp);
9579 tcg_temp_free_i32(tmp2);
99c475ab
FB
9580 }
9581 break;
9582
9583 case 5:
9584 /* load/store register offset. */
9585 rd = insn & 7;
9586 rn = (insn >> 3) & 7;
9587 rm = (insn >> 6) & 7;
9588 op = (insn >> 9) & 7;
b0109805 9589 addr = load_reg(s, rn);
b26eefb6 9590 tmp = load_reg(s, rm);
b0109805 9591 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9592 tcg_temp_free_i32(tmp);
99c475ab 9593
c40c8556 9594 if (op < 3) { /* store */
b0109805 9595 tmp = load_reg(s, rd);
c40c8556
PM
9596 } else {
9597 tmp = tcg_temp_new_i32();
9598 }
99c475ab
FB
9599
9600 switch (op) {
9601 case 0: /* str */
08307563 9602 gen_aa32_st32(tmp, addr, IS_USER(s));
99c475ab
FB
9603 break;
9604 case 1: /* strh */
08307563 9605 gen_aa32_st16(tmp, addr, IS_USER(s));
99c475ab
FB
9606 break;
9607 case 2: /* strb */
08307563 9608 gen_aa32_st8(tmp, addr, IS_USER(s));
99c475ab
FB
9609 break;
9610 case 3: /* ldrsb */
08307563 9611 gen_aa32_ld8s(tmp, addr, IS_USER(s));
99c475ab
FB
9612 break;
9613 case 4: /* ldr */
08307563 9614 gen_aa32_ld32u(tmp, addr, IS_USER(s));
99c475ab
FB
9615 break;
9616 case 5: /* ldrh */
08307563 9617 gen_aa32_ld16u(tmp, addr, IS_USER(s));
99c475ab
FB
9618 break;
9619 case 6: /* ldrb */
08307563 9620 gen_aa32_ld8u(tmp, addr, IS_USER(s));
99c475ab
FB
9621 break;
9622 case 7: /* ldrsh */
08307563 9623 gen_aa32_ld16s(tmp, addr, IS_USER(s));
99c475ab
FB
9624 break;
9625 }
c40c8556 9626 if (op >= 3) { /* load */
b0109805 9627 store_reg(s, rd, tmp);
c40c8556
PM
9628 } else {
9629 tcg_temp_free_i32(tmp);
9630 }
7d1b0095 9631 tcg_temp_free_i32(addr);
99c475ab
FB
9632 break;
9633
9634 case 6:
9635 /* load/store word immediate offset */
9636 rd = insn & 7;
9637 rn = (insn >> 3) & 7;
b0109805 9638 addr = load_reg(s, rn);
99c475ab 9639 val = (insn >> 4) & 0x7c;
b0109805 9640 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9641
9642 if (insn & (1 << 11)) {
9643 /* load */
c40c8556 9644 tmp = tcg_temp_new_i32();
08307563 9645 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 9646 store_reg(s, rd, tmp);
99c475ab
FB
9647 } else {
9648 /* store */
b0109805 9649 tmp = load_reg(s, rd);
08307563 9650 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 9651 tcg_temp_free_i32(tmp);
99c475ab 9652 }
7d1b0095 9653 tcg_temp_free_i32(addr);
99c475ab
FB
9654 break;
9655
9656 case 7:
9657 /* load/store byte immediate offset */
9658 rd = insn & 7;
9659 rn = (insn >> 3) & 7;
b0109805 9660 addr = load_reg(s, rn);
99c475ab 9661 val = (insn >> 6) & 0x1f;
b0109805 9662 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9663
9664 if (insn & (1 << 11)) {
9665 /* load */
c40c8556 9666 tmp = tcg_temp_new_i32();
08307563 9667 gen_aa32_ld8u(tmp, addr, IS_USER(s));
b0109805 9668 store_reg(s, rd, tmp);
99c475ab
FB
9669 } else {
9670 /* store */
b0109805 9671 tmp = load_reg(s, rd);
08307563 9672 gen_aa32_st8(tmp, addr, IS_USER(s));
c40c8556 9673 tcg_temp_free_i32(tmp);
99c475ab 9674 }
7d1b0095 9675 tcg_temp_free_i32(addr);
99c475ab
FB
9676 break;
9677
9678 case 8:
9679 /* load/store halfword immediate offset */
9680 rd = insn & 7;
9681 rn = (insn >> 3) & 7;
b0109805 9682 addr = load_reg(s, rn);
99c475ab 9683 val = (insn >> 5) & 0x3e;
b0109805 9684 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9685
9686 if (insn & (1 << 11)) {
9687 /* load */
c40c8556 9688 tmp = tcg_temp_new_i32();
08307563 9689 gen_aa32_ld16u(tmp, addr, IS_USER(s));
b0109805 9690 store_reg(s, rd, tmp);
99c475ab
FB
9691 } else {
9692 /* store */
b0109805 9693 tmp = load_reg(s, rd);
08307563 9694 gen_aa32_st16(tmp, addr, IS_USER(s));
c40c8556 9695 tcg_temp_free_i32(tmp);
99c475ab 9696 }
7d1b0095 9697 tcg_temp_free_i32(addr);
99c475ab
FB
9698 break;
9699
9700 case 9:
9701 /* load/store from stack */
9702 rd = (insn >> 8) & 7;
b0109805 9703 addr = load_reg(s, 13);
99c475ab 9704 val = (insn & 0xff) * 4;
b0109805 9705 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9706
9707 if (insn & (1 << 11)) {
9708 /* load */
c40c8556 9709 tmp = tcg_temp_new_i32();
08307563 9710 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 9711 store_reg(s, rd, tmp);
99c475ab
FB
9712 } else {
9713 /* store */
b0109805 9714 tmp = load_reg(s, rd);
08307563 9715 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 9716 tcg_temp_free_i32(tmp);
99c475ab 9717 }
7d1b0095 9718 tcg_temp_free_i32(addr);
99c475ab
FB
9719 break;
9720
9721 case 10:
9722 /* add to high reg */
9723 rd = (insn >> 8) & 7;
5899f386
FB
9724 if (insn & (1 << 11)) {
9725 /* SP */
5e3f878a 9726 tmp = load_reg(s, 13);
5899f386
FB
9727 } else {
9728 /* PC. bit 1 is ignored. */
7d1b0095 9729 tmp = tcg_temp_new_i32();
5e3f878a 9730 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 9731 }
99c475ab 9732 val = (insn & 0xff) * 4;
5e3f878a
PB
9733 tcg_gen_addi_i32(tmp, tmp, val);
9734 store_reg(s, rd, tmp);
99c475ab
FB
9735 break;
9736
9737 case 11:
9738 /* misc */
9739 op = (insn >> 8) & 0xf;
9740 switch (op) {
9741 case 0:
9742 /* adjust stack pointer */
b26eefb6 9743 tmp = load_reg(s, 13);
99c475ab
FB
9744 val = (insn & 0x7f) * 4;
9745 if (insn & (1 << 7))
6a0d8a1d 9746 val = -(int32_t)val;
b26eefb6
PB
9747 tcg_gen_addi_i32(tmp, tmp, val);
9748 store_reg(s, 13, tmp);
99c475ab
FB
9749 break;
9750
9ee6e8bb
PB
9751 case 2: /* sign/zero extend. */
9752 ARCH(6);
9753 rd = insn & 7;
9754 rm = (insn >> 3) & 7;
b0109805 9755 tmp = load_reg(s, rm);
9ee6e8bb 9756 switch ((insn >> 6) & 3) {
b0109805
PB
9757 case 0: gen_sxth(tmp); break;
9758 case 1: gen_sxtb(tmp); break;
9759 case 2: gen_uxth(tmp); break;
9760 case 3: gen_uxtb(tmp); break;
9ee6e8bb 9761 }
b0109805 9762 store_reg(s, rd, tmp);
9ee6e8bb 9763 break;
99c475ab
FB
9764 case 4: case 5: case 0xc: case 0xd:
9765 /* push/pop */
b0109805 9766 addr = load_reg(s, 13);
5899f386
FB
9767 if (insn & (1 << 8))
9768 offset = 4;
99c475ab 9769 else
5899f386
FB
9770 offset = 0;
9771 for (i = 0; i < 8; i++) {
9772 if (insn & (1 << i))
9773 offset += 4;
9774 }
9775 if ((insn & (1 << 11)) == 0) {
b0109805 9776 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9777 }
99c475ab
FB
9778 for (i = 0; i < 8; i++) {
9779 if (insn & (1 << i)) {
9780 if (insn & (1 << 11)) {
9781 /* pop */
c40c8556 9782 tmp = tcg_temp_new_i32();
08307563 9783 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 9784 store_reg(s, i, tmp);
99c475ab
FB
9785 } else {
9786 /* push */
b0109805 9787 tmp = load_reg(s, i);
08307563 9788 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 9789 tcg_temp_free_i32(tmp);
99c475ab 9790 }
5899f386 9791 /* advance to the next address. */
b0109805 9792 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9793 }
9794 }
39d5492a 9795 TCGV_UNUSED_I32(tmp);
99c475ab
FB
9796 if (insn & (1 << 8)) {
9797 if (insn & (1 << 11)) {
9798 /* pop pc */
c40c8556 9799 tmp = tcg_temp_new_i32();
08307563 9800 gen_aa32_ld32u(tmp, addr, IS_USER(s));
99c475ab
FB
9801 /* don't set the pc until the rest of the instruction
9802 has completed */
9803 } else {
9804 /* push lr */
b0109805 9805 tmp = load_reg(s, 14);
08307563 9806 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 9807 tcg_temp_free_i32(tmp);
99c475ab 9808 }
b0109805 9809 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 9810 }
5899f386 9811 if ((insn & (1 << 11)) == 0) {
b0109805 9812 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9813 }
99c475ab 9814 /* write back the new stack pointer */
b0109805 9815 store_reg(s, 13, addr);
99c475ab 9816 /* set the new PC value */
be5e7a76
DES
9817 if ((insn & 0x0900) == 0x0900) {
9818 store_reg_from_load(env, s, 15, tmp);
9819 }
99c475ab
FB
9820 break;
9821
9ee6e8bb
PB
9822 case 1: case 3: case 9: case 11: /* czb */
9823 rm = insn & 7;
d9ba4830 9824 tmp = load_reg(s, rm);
9ee6e8bb
PB
9825 s->condlabel = gen_new_label();
9826 s->condjmp = 1;
9827 if (insn & (1 << 11))
cb63669a 9828 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 9829 else
cb63669a 9830 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 9831 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9832 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9833 val = (uint32_t)s->pc + 2;
9834 val += offset;
9835 gen_jmp(s, val);
9836 break;
9837
9838 case 15: /* IT, nop-hint. */
9839 if ((insn & 0xf) == 0) {
9840 gen_nop_hint(s, (insn >> 4) & 0xf);
9841 break;
9842 }
9843 /* If Then. */
9844 s->condexec_cond = (insn >> 4) & 0xe;
9845 s->condexec_mask = insn & 0x1f;
9846 /* No actual code generated for this insn, just setup state. */
9847 break;
9848
06c949e6 9849 case 0xe: /* bkpt */
be5e7a76 9850 ARCH(5);
bc4a0de0 9851 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
9852 break;
9853
9ee6e8bb
PB
9854 case 0xa: /* rev */
9855 ARCH(6);
9856 rn = (insn >> 3) & 0x7;
9857 rd = insn & 0x7;
b0109805 9858 tmp = load_reg(s, rn);
9ee6e8bb 9859 switch ((insn >> 6) & 3) {
66896cb8 9860 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
9861 case 1: gen_rev16(tmp); break;
9862 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
9863 default: goto illegal_op;
9864 }
b0109805 9865 store_reg(s, rd, tmp);
9ee6e8bb
PB
9866 break;
9867
d9e028c1
PM
9868 case 6:
9869 switch ((insn >> 5) & 7) {
9870 case 2:
9871 /* setend */
9872 ARCH(6);
10962fd5
PM
9873 if (((insn >> 3) & 1) != s->bswap_code) {
9874 /* Dynamic endianness switching not implemented. */
e0c270d9 9875 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
d9e028c1
PM
9876 goto illegal_op;
9877 }
9ee6e8bb 9878 break;
d9e028c1
PM
9879 case 3:
9880 /* cps */
9881 ARCH(6);
9882 if (IS_USER(s)) {
9883 break;
8984bd2e 9884 }
d9e028c1
PM
9885 if (IS_M(env)) {
9886 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9887 /* FAULTMASK */
9888 if (insn & 1) {
9889 addr = tcg_const_i32(19);
9890 gen_helper_v7m_msr(cpu_env, addr, tmp);
9891 tcg_temp_free_i32(addr);
9892 }
9893 /* PRIMASK */
9894 if (insn & 2) {
9895 addr = tcg_const_i32(16);
9896 gen_helper_v7m_msr(cpu_env, addr, tmp);
9897 tcg_temp_free_i32(addr);
9898 }
9899 tcg_temp_free_i32(tmp);
9900 gen_lookup_tb(s);
9901 } else {
9902 if (insn & (1 << 4)) {
9903 shift = CPSR_A | CPSR_I | CPSR_F;
9904 } else {
9905 shift = 0;
9906 }
9907 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 9908 }
d9e028c1
PM
9909 break;
9910 default:
9911 goto undef;
9ee6e8bb
PB
9912 }
9913 break;
9914
99c475ab
FB
9915 default:
9916 goto undef;
9917 }
9918 break;
9919
9920 case 12:
a7d3970d 9921 {
99c475ab 9922 /* load/store multiple */
39d5492a
PM
9923 TCGv_i32 loaded_var;
9924 TCGV_UNUSED_I32(loaded_var);
99c475ab 9925 rn = (insn >> 8) & 0x7;
b0109805 9926 addr = load_reg(s, rn);
99c475ab
FB
9927 for (i = 0; i < 8; i++) {
9928 if (insn & (1 << i)) {
99c475ab
FB
9929 if (insn & (1 << 11)) {
9930 /* load */
c40c8556 9931 tmp = tcg_temp_new_i32();
08307563 9932 gen_aa32_ld32u(tmp, addr, IS_USER(s));
a7d3970d
PM
9933 if (i == rn) {
9934 loaded_var = tmp;
9935 } else {
9936 store_reg(s, i, tmp);
9937 }
99c475ab
FB
9938 } else {
9939 /* store */
b0109805 9940 tmp = load_reg(s, i);
08307563 9941 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 9942 tcg_temp_free_i32(tmp);
99c475ab 9943 }
5899f386 9944 /* advance to the next address */
b0109805 9945 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9946 }
9947 }
b0109805 9948 if ((insn & (1 << rn)) == 0) {
a7d3970d 9949 /* base reg not in list: base register writeback */
b0109805
PB
9950 store_reg(s, rn, addr);
9951 } else {
a7d3970d
PM
9952 /* base reg in list: if load, complete it now */
9953 if (insn & (1 << 11)) {
9954 store_reg(s, rn, loaded_var);
9955 }
7d1b0095 9956 tcg_temp_free_i32(addr);
b0109805 9957 }
99c475ab 9958 break;
a7d3970d 9959 }
99c475ab
FB
9960 case 13:
9961 /* conditional branch or swi */
9962 cond = (insn >> 8) & 0xf;
9963 if (cond == 0xe)
9964 goto undef;
9965
9966 if (cond == 0xf) {
9967 /* swi */
eaed129d 9968 gen_set_pc_im(s, s->pc);
9ee6e8bb 9969 s->is_jmp = DISAS_SWI;
99c475ab
FB
9970 break;
9971 }
9972 /* generate a conditional jump to next instruction */
e50e6a20 9973 s->condlabel = gen_new_label();
d9ba4830 9974 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9975 s->condjmp = 1;
99c475ab
FB
9976
9977 /* jump to the offset */
5899f386 9978 val = (uint32_t)s->pc + 2;
99c475ab 9979 offset = ((int32_t)insn << 24) >> 24;
5899f386 9980 val += offset << 1;
8aaca4c0 9981 gen_jmp(s, val);
99c475ab
FB
9982 break;
9983
9984 case 14:
358bf29e 9985 if (insn & (1 << 11)) {
9ee6e8bb
PB
9986 if (disas_thumb2_insn(env, s, insn))
9987 goto undef32;
358bf29e
PB
9988 break;
9989 }
9ee6e8bb 9990 /* unconditional branch */
99c475ab
FB
9991 val = (uint32_t)s->pc;
9992 offset = ((int32_t)insn << 21) >> 21;
9993 val += (offset << 1) + 2;
8aaca4c0 9994 gen_jmp(s, val);
99c475ab
FB
9995 break;
9996
9997 case 15:
9ee6e8bb 9998 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9999 goto undef32;
9ee6e8bb 10000 break;
99c475ab
FB
10001 }
10002 return;
9ee6e8bb 10003undef32:
bc4a0de0 10004 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
10005 return;
10006illegal_op:
99c475ab 10007undef:
bc4a0de0 10008 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
10009}
10010
2c0262af
FB
10011/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
10012 basic block 'tb'. If search_pc is TRUE, also generate PC
10013 information for each intermediate instruction. */
5639c3f2 10014static inline void gen_intermediate_code_internal(ARMCPU *cpu,
2cfc5f17 10015 TranslationBlock *tb,
5639c3f2 10016 bool search_pc)
2c0262af 10017{
ed2803da 10018 CPUState *cs = CPU(cpu);
5639c3f2 10019 CPUARMState *env = &cpu->env;
2c0262af 10020 DisasContext dc1, *dc = &dc1;
a1d1bb31 10021 CPUBreakpoint *bp;
2c0262af
FB
10022 uint16_t *gen_opc_end;
10023 int j, lj;
0fa85d43 10024 target_ulong pc_start;
0a2461fa 10025 target_ulong next_page_start;
2e70f6ef
PB
10026 int num_insns;
10027 int max_insns;
3b46e624 10028
2c0262af 10029 /* generate intermediate code */
0fa85d43 10030 pc_start = tb->pc;
3b46e624 10031
2c0262af
FB
10032 dc->tb = tb;
10033
92414b31 10034 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
10035
10036 dc->is_jmp = DISAS_NEXT;
10037 dc->pc = pc_start;
ed2803da 10038 dc->singlestep_enabled = cs->singlestep_enabled;
e50e6a20 10039 dc->condjmp = 0;
3926cc84
AG
10040
10041 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
10042 dc->aarch64 = 1;
10043 dc->thumb = 0;
10044 dc->bswap_code = 0;
10045 dc->condexec_mask = 0;
10046 dc->condexec_cond = 0;
10047#if !defined(CONFIG_USER_ONLY)
10048 dc->user = 0;
10049#endif
10050 dc->vfp_enabled = 0;
10051 dc->vec_len = 0;
10052 dc->vec_stride = 0;
10053 } else {
10054 dc->aarch64 = 0;
10055 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
10056 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
10057 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
10058 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 10059#if !defined(CONFIG_USER_ONLY)
3926cc84 10060 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
b5ff1b31 10061#endif
3926cc84
AG
10062 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
10063 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
10064 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
10065 }
a7812ae4
PB
10066 cpu_F0s = tcg_temp_new_i32();
10067 cpu_F1s = tcg_temp_new_i32();
10068 cpu_F0d = tcg_temp_new_i64();
10069 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
10070 cpu_V0 = cpu_F0d;
10071 cpu_V1 = cpu_F1d;
e677137d 10072 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 10073 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 10074 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 10075 lj = -1;
2e70f6ef
PB
10076 num_insns = 0;
10077 max_insns = tb->cflags & CF_COUNT_MASK;
10078 if (max_insns == 0)
10079 max_insns = CF_COUNT_MASK;
10080
806f352d 10081 gen_tb_start();
e12ce78d 10082
3849902c
PM
10083 tcg_clear_temp_count();
10084
e12ce78d
PM
10085 /* A note on handling of the condexec (IT) bits:
10086 *
10087 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 10088 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 10089 * (1) if the condexec bits are not already zero then we write
0ecb72a5 10090 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
10091 * to do it at the end of the block. (For example if we don't do this
10092 * it's hard to identify whether we can safely skip writing condexec
10093 * at the end of the TB, which we definitely want to do for the case
10094 * where a TB doesn't do anything with the IT state at all.)
10095 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 10096 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
10097 * This is done both for leaving the TB at the end, and for leaving
10098 * it because of an exception we know will happen, which is done in
10099 * gen_exception_insn(). The latter is necessary because we need to
10100 * leave the TB with the PC/IT state just prior to execution of the
10101 * instruction which caused the exception.
10102 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 10103 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d
PM
10104 * This is handled in the same way as restoration of the
10105 * PC in these situations: we will be called again with search_pc=1
10106 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
10107 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
10108 * this to restore the condexec bits.
e12ce78d
PM
10109 *
10110 * Note that there are no instructions which can read the condexec
10111 * bits, and none which can write non-static values to them, so
0ecb72a5 10112 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
10113 * middle of a TB.
10114 */
10115
9ee6e8bb
PB
10116 /* Reset the conditional execution bits immediately. This avoids
10117 complications trying to do it at the end of the block. */
98eac7ca 10118 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 10119 {
39d5492a 10120 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 10121 tcg_gen_movi_i32(tmp, 0);
d9ba4830 10122 store_cpu_field(tmp, condexec_bits);
8f01245e 10123 }
2c0262af 10124 do {
fbb4a2e3
PB
10125#ifdef CONFIG_USER_ONLY
10126 /* Intercept jump to the magic kernel page. */
14ade10f 10127 if (!dc->aarch64 && dc->pc >= 0xffff0000) {
fbb4a2e3
PB
10128 /* We always get here via a jump, so know we are not in a
10129 conditional execution block. */
10130 gen_exception(EXCP_KERNEL_TRAP);
10131 dc->is_jmp = DISAS_UPDATE;
10132 break;
10133 }
10134#else
9ee6e8bb
PB
10135 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
10136 /* We always get here via a jump, so know we are not in a
10137 conditional execution block. */
d9ba4830 10138 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
10139 dc->is_jmp = DISAS_UPDATE;
10140 break;
9ee6e8bb
PB
10141 }
10142#endif
10143
72cf2d4f
BS
10144 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
10145 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 10146 if (bp->pc == dc->pc) {
bc4a0de0 10147 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
10148 /* Advance PC so that clearing the breakpoint will
10149 invalidate this TB. */
10150 dc->pc += 2;
10151 goto done_generating;
1fddef4b
FB
10152 }
10153 }
10154 }
2c0262af 10155 if (search_pc) {
92414b31 10156 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
10157 if (lj < j) {
10158 lj++;
10159 while (lj < j)
ab1103de 10160 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 10161 }
25983cad 10162 tcg_ctx.gen_opc_pc[lj] = dc->pc;
e12ce78d 10163 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
ab1103de 10164 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 10165 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 10166 }
e50e6a20 10167
2e70f6ef
PB
10168 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
10169 gen_io_start();
10170
fdefe51c 10171 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5642463a
PM
10172 tcg_gen_debug_insn_start(dc->pc);
10173 }
10174
14ade10f
AG
10175 if (dc->aarch64) {
10176 disas_a64_insn(env, dc);
10177 } else if (dc->thumb) {
9ee6e8bb
PB
10178 disas_thumb_insn(env, dc);
10179 if (dc->condexec_mask) {
10180 dc->condexec_cond = (dc->condexec_cond & 0xe)
10181 | ((dc->condexec_mask >> 4) & 1);
10182 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
10183 if (dc->condexec_mask == 0) {
10184 dc->condexec_cond = 0;
10185 }
10186 }
10187 } else {
10188 disas_arm_insn(env, dc);
10189 }
e50e6a20
FB
10190
10191 if (dc->condjmp && !dc->is_jmp) {
10192 gen_set_label(dc->condlabel);
10193 dc->condjmp = 0;
10194 }
3849902c
PM
10195
10196 if (tcg_check_temp_count()) {
0a2461fa
AG
10197 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
10198 dc->pc);
3849902c
PM
10199 }
10200
aaf2d97d 10201 /* Translation stops when a conditional branch is encountered.
e50e6a20 10202 * Otherwise the subsequent code could get translated several times.
b5ff1b31 10203 * Also stop translation when a page boundary is reached. This
bf20dc07 10204 * ensures prefetch aborts occur at the right place. */
2e70f6ef 10205 num_insns ++;
efd7f486 10206 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
ed2803da 10207 !cs->singlestep_enabled &&
1b530a6d 10208 !singlestep &&
2e70f6ef
PB
10209 dc->pc < next_page_start &&
10210 num_insns < max_insns);
10211
10212 if (tb->cflags & CF_LAST_IO) {
10213 if (dc->condjmp) {
10214 /* FIXME: This can theoretically happen with self-modifying
10215 code. */
10216 cpu_abort(env, "IO on conditional branch instruction");
10217 }
10218 gen_io_end();
10219 }
9ee6e8bb 10220
b5ff1b31 10221 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
10222 instruction was a conditional branch or trap, and the PC has
10223 already been written. */
ed2803da 10224 if (unlikely(cs->singlestep_enabled)) {
8aaca4c0 10225 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 10226 if (dc->condjmp) {
9ee6e8bb
PB
10227 gen_set_condexec(dc);
10228 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 10229 gen_exception(EXCP_SWI);
9ee6e8bb 10230 } else {
d9ba4830 10231 gen_exception(EXCP_DEBUG);
9ee6e8bb 10232 }
e50e6a20
FB
10233 gen_set_label(dc->condlabel);
10234 }
10235 if (dc->condjmp || !dc->is_jmp) {
eaed129d 10236 gen_set_pc_im(dc, dc->pc);
e50e6a20 10237 dc->condjmp = 0;
8aaca4c0 10238 }
9ee6e8bb
PB
10239 gen_set_condexec(dc);
10240 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 10241 gen_exception(EXCP_SWI);
9ee6e8bb
PB
10242 } else {
10243 /* FIXME: Single stepping a WFI insn will not halt
10244 the CPU. */
d9ba4830 10245 gen_exception(EXCP_DEBUG);
9ee6e8bb 10246 }
8aaca4c0 10247 } else {
9ee6e8bb
PB
10248 /* While branches must always occur at the end of an IT block,
10249 there are a few other things that can cause us to terminate
65626741 10250 the TB in the middle of an IT block:
9ee6e8bb
PB
10251 - Exception generating instructions (bkpt, swi, undefined).
10252 - Page boundaries.
10253 - Hardware watchpoints.
10254 Hardware breakpoints have already been handled and skip this code.
10255 */
10256 gen_set_condexec(dc);
8aaca4c0 10257 switch(dc->is_jmp) {
8aaca4c0 10258 case DISAS_NEXT:
6e256c93 10259 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
10260 break;
10261 default:
10262 case DISAS_JUMP:
10263 case DISAS_UPDATE:
10264 /* indicate that the hash table must be used to find the next TB */
57fec1fe 10265 tcg_gen_exit_tb(0);
8aaca4c0
FB
10266 break;
10267 case DISAS_TB_JUMP:
10268 /* nothing more to generate */
10269 break;
9ee6e8bb 10270 case DISAS_WFI:
1ce94f81 10271 gen_helper_wfi(cpu_env);
9ee6e8bb
PB
10272 break;
10273 case DISAS_SWI:
d9ba4830 10274 gen_exception(EXCP_SWI);
9ee6e8bb 10275 break;
8aaca4c0 10276 }
e50e6a20
FB
10277 if (dc->condjmp) {
10278 gen_set_label(dc->condlabel);
9ee6e8bb 10279 gen_set_condexec(dc);
6e256c93 10280 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
10281 dc->condjmp = 0;
10282 }
2c0262af 10283 }
2e70f6ef 10284
9ee6e8bb 10285done_generating:
806f352d 10286 gen_tb_end(tb, num_insns);
efd7f486 10287 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2c0262af
FB
10288
10289#ifdef DEBUG_DISAS
8fec2b8c 10290 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
10291 qemu_log("----------------\n");
10292 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 10293 log_target_disas(env, pc_start, dc->pc - pc_start,
d8fd2954 10294 dc->thumb | (dc->bswap_code << 1));
93fcfe39 10295 qemu_log("\n");
2c0262af
FB
10296 }
10297#endif
b5ff1b31 10298 if (search_pc) {
92414b31 10299 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
b5ff1b31
FB
10300 lj++;
10301 while (lj <= j)
ab1103de 10302 tcg_ctx.gen_opc_instr_start[lj++] = 0;
b5ff1b31 10303 } else {
2c0262af 10304 tb->size = dc->pc - pc_start;
2e70f6ef 10305 tb->icount = num_insns;
b5ff1b31 10306 }
2c0262af
FB
10307}
10308
0ecb72a5 10309void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 10310{
5639c3f2 10311 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, false);
2c0262af
FB
10312}
10313
0ecb72a5 10314void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
2c0262af 10315{
5639c3f2 10316 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, true);
2c0262af
FB
10317}
10318
b5ff1b31
FB
10319static const char *cpu_mode_names[16] = {
10320 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10321 "???", "???", "???", "und", "???", "???", "???", "sys"
10322};
9ee6e8bb 10323
878096ee
AF
10324void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
10325 int flags)
2c0262af 10326{
878096ee
AF
10327 ARMCPU *cpu = ARM_CPU(cs);
10328 CPUARMState *env = &cpu->env;
2c0262af 10329 int i;
b5ff1b31 10330 uint32_t psr;
2c0262af
FB
10331
10332 for(i=0;i<16;i++) {
7fe48483 10333 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 10334 if ((i % 4) == 3)
7fe48483 10335 cpu_fprintf(f, "\n");
2c0262af 10336 else
7fe48483 10337 cpu_fprintf(f, " ");
2c0262af 10338 }
b5ff1b31 10339 psr = cpsr_read(env);
687fa640
TS
10340 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10341 psr,
b5ff1b31
FB
10342 psr & (1 << 31) ? 'N' : '-',
10343 psr & (1 << 30) ? 'Z' : '-',
10344 psr & (1 << 29) ? 'C' : '-',
10345 psr & (1 << 28) ? 'V' : '-',
5fafdf24 10346 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 10347 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 10348
f2617cfc
PM
10349 if (flags & CPU_DUMP_FPU) {
10350 int numvfpregs = 0;
10351 if (arm_feature(env, ARM_FEATURE_VFP)) {
10352 numvfpregs += 16;
10353 }
10354 if (arm_feature(env, ARM_FEATURE_VFP3)) {
10355 numvfpregs += 16;
10356 }
10357 for (i = 0; i < numvfpregs; i++) {
10358 uint64_t v = float64_val(env->vfp.regs[i]);
10359 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
10360 i * 2, (uint32_t)v,
10361 i * 2 + 1, (uint32_t)(v >> 32),
10362 i, v);
10363 }
10364 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 10365 }
2c0262af 10366}
a6b025d3 10367
0ecb72a5 10368void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 10369{
3926cc84
AG
10370 if (is_a64(env)) {
10371 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
10372 } else {
10373 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
10374 }
e12ce78d 10375 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
d2856f1a 10376}
This page took 2.961593 seconds and 4 git commands to generate.