]> Git Repo - qemu.git/blame - target-arm/translate.c
target-arm: Split out private-to-target functions into internals.h
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
ccd38087 28#include "internals.h"
76cad711 29#include "disas/disas.h"
57fec1fe 30#include "tcg-op.h"
1de7afc9 31#include "qemu/log.h"
534df156 32#include "qemu/bitops.h"
1497c961 33
7b59220e 34#include "helper.h"
1497c961 35#define GEN_HELPER 1
7b59220e 36#include "helper.h"
2c0262af 37
be5e7a76
DES
38#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
39#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
40/* currently all emulated v5 cores are also v5TE, so don't bother */
41#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
42#define ENABLE_ARCH_5J 0
43#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
44#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
45#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
46#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
81e69fb0 47#define ENABLE_ARCH_8 arm_feature(env, ARM_FEATURE_V8)
b5ff1b31 48
86753403 49#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 50
f570c61e 51#include "translate.h"
e12ce78d
PM
52static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
53
b5ff1b31
FB
54#if defined(CONFIG_USER_ONLY)
55#define IS_USER(s) 1
56#else
57#define IS_USER(s) (s->user)
58#endif
59
3407ad0e 60TCGv_ptr cpu_env;
ad69471c 61/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 62static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 63static TCGv_i32 cpu_R[16];
66c374de 64static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
03d05e2d
PM
65static TCGv_i64 cpu_exclusive_addr;
66static TCGv_i64 cpu_exclusive_val;
426f5abc 67#ifdef CONFIG_USER_ONLY
03d05e2d 68static TCGv_i64 cpu_exclusive_test;
426f5abc
PB
69static TCGv_i32 cpu_exclusive_info;
70#endif
ad69471c 71
b26eefb6 72/* FIXME: These should be removed. */
39d5492a 73static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 74static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 75
022c62cb 76#include "exec/gen-icount.h"
2e70f6ef 77
155c3eac
FN
78static const char *regnames[] =
79 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
80 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
81
b26eefb6
PB
82/* initialize TCG globals. */
83void arm_translate_init(void)
84{
155c3eac
FN
85 int i;
86
a7812ae4
PB
87 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
88
155c3eac
FN
89 for (i = 0; i < 16; i++) {
90 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 91 offsetof(CPUARMState, regs[i]),
155c3eac
FN
92 regnames[i]);
93 }
66c374de
AJ
94 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
95 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
96 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
97 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
98
03d05e2d 99 cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 100 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
03d05e2d 101 cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 102 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 103#ifdef CONFIG_USER_ONLY
03d05e2d 104 cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 105 offsetof(CPUARMState, exclusive_test), "exclusive_test");
426f5abc 106 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 107 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 108#endif
155c3eac 109
14ade10f 110 a64_translate_init();
b26eefb6
PB
111}
112
39d5492a 113static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 114{
39d5492a 115 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
116 tcg_gen_ld_i32(tmp, cpu_env, offset);
117 return tmp;
118}
119
0ecb72a5 120#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 121
39d5492a 122static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
123{
124 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 125 tcg_temp_free_i32(var);
d9ba4830
PB
126}
127
128#define store_cpu_field(var, name) \
0ecb72a5 129 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 130
b26eefb6 131/* Set a variable to the value of a CPU register. */
39d5492a 132static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
133{
134 if (reg == 15) {
135 uint32_t addr;
b90372ad 136 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
137 if (s->thumb)
138 addr = (long)s->pc + 2;
139 else
140 addr = (long)s->pc + 4;
141 tcg_gen_movi_i32(var, addr);
142 } else {
155c3eac 143 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
144 }
145}
146
147/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 148static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 149{
39d5492a 150 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
151 load_reg_var(s, tmp, reg);
152 return tmp;
153}
154
155/* Set a CPU register. The source must be a temporary and will be
156 marked as dead. */
39d5492a 157static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
158{
159 if (reg == 15) {
160 tcg_gen_andi_i32(var, var, ~1);
161 s->is_jmp = DISAS_JUMP;
162 }
155c3eac 163 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 164 tcg_temp_free_i32(var);
b26eefb6
PB
165}
166
b26eefb6 167/* Value extensions. */
86831435
PB
168#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
169#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
170#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
171#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
172
1497c961
PB
173#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
174#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 175
b26eefb6 176
39d5492a 177static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 178{
39d5492a 179 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 180 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
181 tcg_temp_free_i32(tmp_mask);
182}
d9ba4830
PB
183/* Set NZCV flags from the high 4 bits of var. */
184#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
185
186static void gen_exception(int excp)
187{
39d5492a 188 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830 189 tcg_gen_movi_i32(tmp, excp);
1ce94f81 190 gen_helper_exception(cpu_env, tmp);
7d1b0095 191 tcg_temp_free_i32(tmp);
d9ba4830
PB
192}
193
39d5492a 194static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 195{
39d5492a
PM
196 TCGv_i32 tmp1 = tcg_temp_new_i32();
197 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
198 tcg_gen_ext16s_i32(tmp1, a);
199 tcg_gen_ext16s_i32(tmp2, b);
3670669c 200 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 201 tcg_temp_free_i32(tmp2);
3670669c
PB
202 tcg_gen_sari_i32(a, a, 16);
203 tcg_gen_sari_i32(b, b, 16);
204 tcg_gen_mul_i32(b, b, a);
205 tcg_gen_mov_i32(a, tmp1);
7d1b0095 206 tcg_temp_free_i32(tmp1);
3670669c
PB
207}
208
209/* Byteswap each halfword. */
39d5492a 210static void gen_rev16(TCGv_i32 var)
3670669c 211{
39d5492a 212 TCGv_i32 tmp = tcg_temp_new_i32();
3670669c
PB
213 tcg_gen_shri_i32(tmp, var, 8);
214 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
215 tcg_gen_shli_i32(var, var, 8);
216 tcg_gen_andi_i32(var, var, 0xff00ff00);
217 tcg_gen_or_i32(var, var, tmp);
7d1b0095 218 tcg_temp_free_i32(tmp);
3670669c
PB
219}
220
221/* Byteswap low halfword and sign extend. */
39d5492a 222static void gen_revsh(TCGv_i32 var)
3670669c 223{
1a855029
AJ
224 tcg_gen_ext16u_i32(var, var);
225 tcg_gen_bswap16_i32(var, var);
226 tcg_gen_ext16s_i32(var, var);
3670669c
PB
227}
228
229/* Unsigned bitfield extract. */
39d5492a 230static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
3670669c
PB
231{
232 if (shift)
233 tcg_gen_shri_i32(var, var, shift);
234 tcg_gen_andi_i32(var, var, mask);
235}
236
237/* Signed bitfield extract. */
39d5492a 238static void gen_sbfx(TCGv_i32 var, int shift, int width)
3670669c
PB
239{
240 uint32_t signbit;
241
242 if (shift)
243 tcg_gen_sari_i32(var, var, shift);
244 if (shift + width < 32) {
245 signbit = 1u << (width - 1);
246 tcg_gen_andi_i32(var, var, (1u << width) - 1);
247 tcg_gen_xori_i32(var, var, signbit);
248 tcg_gen_subi_i32(var, var, signbit);
249 }
250}
251
838fa72d 252/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 253static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 254{
838fa72d
AJ
255 TCGv_i64 tmp64 = tcg_temp_new_i64();
256
257 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 258 tcg_temp_free_i32(b);
838fa72d
AJ
259 tcg_gen_shli_i64(tmp64, tmp64, 32);
260 tcg_gen_add_i64(a, tmp64, a);
261
262 tcg_temp_free_i64(tmp64);
263 return a;
264}
265
266/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 267static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
268{
269 TCGv_i64 tmp64 = tcg_temp_new_i64();
270
271 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 272 tcg_temp_free_i32(b);
838fa72d
AJ
273 tcg_gen_shli_i64(tmp64, tmp64, 32);
274 tcg_gen_sub_i64(a, tmp64, a);
275
276 tcg_temp_free_i64(tmp64);
277 return a;
3670669c
PB
278}
279
5e3f878a 280/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 281static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 282{
39d5492a
PM
283 TCGv_i32 lo = tcg_temp_new_i32();
284 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 285 TCGv_i64 ret;
5e3f878a 286
831d7fe8 287 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 288 tcg_temp_free_i32(a);
7d1b0095 289 tcg_temp_free_i32(b);
831d7fe8
RH
290
291 ret = tcg_temp_new_i64();
292 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
293 tcg_temp_free_i32(lo);
294 tcg_temp_free_i32(hi);
831d7fe8
RH
295
296 return ret;
5e3f878a
PB
297}
298
39d5492a 299static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 300{
39d5492a
PM
301 TCGv_i32 lo = tcg_temp_new_i32();
302 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 303 TCGv_i64 ret;
5e3f878a 304
831d7fe8 305 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 306 tcg_temp_free_i32(a);
7d1b0095 307 tcg_temp_free_i32(b);
831d7fe8
RH
308
309 ret = tcg_temp_new_i64();
310 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
311 tcg_temp_free_i32(lo);
312 tcg_temp_free_i32(hi);
831d7fe8
RH
313
314 return ret;
5e3f878a
PB
315}
316
8f01245e 317/* Swap low and high halfwords. */
39d5492a 318static void gen_swap_half(TCGv_i32 var)
8f01245e 319{
39d5492a 320 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
321 tcg_gen_shri_i32(tmp, var, 16);
322 tcg_gen_shli_i32(var, var, 16);
323 tcg_gen_or_i32(var, var, tmp);
7d1b0095 324 tcg_temp_free_i32(tmp);
8f01245e
PB
325}
326
b26eefb6
PB
327/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
328 tmp = (t0 ^ t1) & 0x8000;
329 t0 &= ~0x8000;
330 t1 &= ~0x8000;
331 t0 = (t0 + t1) ^ tmp;
332 */
333
39d5492a 334static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 335{
39d5492a 336 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
337 tcg_gen_xor_i32(tmp, t0, t1);
338 tcg_gen_andi_i32(tmp, tmp, 0x8000);
339 tcg_gen_andi_i32(t0, t0, ~0x8000);
340 tcg_gen_andi_i32(t1, t1, ~0x8000);
341 tcg_gen_add_i32(t0, t0, t1);
342 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
343 tcg_temp_free_i32(tmp);
344 tcg_temp_free_i32(t1);
b26eefb6
PB
345}
346
347/* Set CF to the top bit of var. */
39d5492a 348static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 349{
66c374de 350 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
351}
352
353/* Set N and Z flags from var. */
39d5492a 354static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 355{
66c374de
AJ
356 tcg_gen_mov_i32(cpu_NF, var);
357 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
358}
359
360/* T0 += T1 + CF. */
39d5492a 361static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 362{
396e467c 363 tcg_gen_add_i32(t0, t0, t1);
66c374de 364 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
365}
366
e9bb4aa9 367/* dest = T0 + T1 + CF. */
39d5492a 368static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 369{
e9bb4aa9 370 tcg_gen_add_i32(dest, t0, t1);
66c374de 371 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
372}
373
3670669c 374/* dest = T0 - T1 + CF - 1. */
39d5492a 375static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 376{
3670669c 377 tcg_gen_sub_i32(dest, t0, t1);
66c374de 378 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 379 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
380}
381
72485ec4 382/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 383static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 384{
39d5492a 385 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
386 tcg_gen_movi_i32(tmp, 0);
387 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 388 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 389 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
390 tcg_gen_xor_i32(tmp, t0, t1);
391 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
392 tcg_temp_free_i32(tmp);
393 tcg_gen_mov_i32(dest, cpu_NF);
394}
395
49b4c31e 396/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 397static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 398{
39d5492a 399 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
400 if (TCG_TARGET_HAS_add2_i32) {
401 tcg_gen_movi_i32(tmp, 0);
402 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 403 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
404 } else {
405 TCGv_i64 q0 = tcg_temp_new_i64();
406 TCGv_i64 q1 = tcg_temp_new_i64();
407 tcg_gen_extu_i32_i64(q0, t0);
408 tcg_gen_extu_i32_i64(q1, t1);
409 tcg_gen_add_i64(q0, q0, q1);
410 tcg_gen_extu_i32_i64(q1, cpu_CF);
411 tcg_gen_add_i64(q0, q0, q1);
412 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
413 tcg_temp_free_i64(q0);
414 tcg_temp_free_i64(q1);
415 }
416 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
417 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
418 tcg_gen_xor_i32(tmp, t0, t1);
419 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
420 tcg_temp_free_i32(tmp);
421 tcg_gen_mov_i32(dest, cpu_NF);
422}
423
72485ec4 424/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 425static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 426{
39d5492a 427 TCGv_i32 tmp;
72485ec4
AJ
428 tcg_gen_sub_i32(cpu_NF, t0, t1);
429 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
430 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
431 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
432 tmp = tcg_temp_new_i32();
433 tcg_gen_xor_i32(tmp, t0, t1);
434 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
435 tcg_temp_free_i32(tmp);
436 tcg_gen_mov_i32(dest, cpu_NF);
437}
438
e77f0832 439/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 440static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 441{
39d5492a 442 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
443 tcg_gen_not_i32(tmp, t1);
444 gen_adc_CC(dest, t0, tmp);
39d5492a 445 tcg_temp_free_i32(tmp);
2de68a49
RH
446}
447
365af80e 448#define GEN_SHIFT(name) \
39d5492a 449static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 450{ \
39d5492a 451 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
452 tmp1 = tcg_temp_new_i32(); \
453 tcg_gen_andi_i32(tmp1, t1, 0xff); \
454 tmp2 = tcg_const_i32(0); \
455 tmp3 = tcg_const_i32(0x1f); \
456 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
457 tcg_temp_free_i32(tmp3); \
458 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
459 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
460 tcg_temp_free_i32(tmp2); \
461 tcg_temp_free_i32(tmp1); \
462}
463GEN_SHIFT(shl)
464GEN_SHIFT(shr)
465#undef GEN_SHIFT
466
39d5492a 467static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 468{
39d5492a 469 TCGv_i32 tmp1, tmp2;
365af80e
AJ
470 tmp1 = tcg_temp_new_i32();
471 tcg_gen_andi_i32(tmp1, t1, 0xff);
472 tmp2 = tcg_const_i32(0x1f);
473 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
474 tcg_temp_free_i32(tmp2);
475 tcg_gen_sar_i32(dest, t0, tmp1);
476 tcg_temp_free_i32(tmp1);
477}
478
39d5492a 479static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 480{
39d5492a
PM
481 TCGv_i32 c0 = tcg_const_i32(0);
482 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
483 tcg_gen_neg_i32(tmp, src);
484 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
485 tcg_temp_free_i32(c0);
486 tcg_temp_free_i32(tmp);
487}
ad69471c 488
39d5492a 489static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 490{
9a119ff6 491 if (shift == 0) {
66c374de 492 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 493 } else {
66c374de
AJ
494 tcg_gen_shri_i32(cpu_CF, var, shift);
495 if (shift != 31) {
496 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
497 }
9a119ff6 498 }
9a119ff6 499}
b26eefb6 500
9a119ff6 501/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
502static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
503 int shift, int flags)
9a119ff6
PB
504{
505 switch (shiftop) {
506 case 0: /* LSL */
507 if (shift != 0) {
508 if (flags)
509 shifter_out_im(var, 32 - shift);
510 tcg_gen_shli_i32(var, var, shift);
511 }
512 break;
513 case 1: /* LSR */
514 if (shift == 0) {
515 if (flags) {
66c374de 516 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
517 }
518 tcg_gen_movi_i32(var, 0);
519 } else {
520 if (flags)
521 shifter_out_im(var, shift - 1);
522 tcg_gen_shri_i32(var, var, shift);
523 }
524 break;
525 case 2: /* ASR */
526 if (shift == 0)
527 shift = 32;
528 if (flags)
529 shifter_out_im(var, shift - 1);
530 if (shift == 32)
531 shift = 31;
532 tcg_gen_sari_i32(var, var, shift);
533 break;
534 case 3: /* ROR/RRX */
535 if (shift != 0) {
536 if (flags)
537 shifter_out_im(var, shift - 1);
f669df27 538 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 539 } else {
39d5492a 540 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 541 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
542 if (flags)
543 shifter_out_im(var, 0);
544 tcg_gen_shri_i32(var, var, 1);
b26eefb6 545 tcg_gen_or_i32(var, var, tmp);
7d1b0095 546 tcg_temp_free_i32(tmp);
b26eefb6
PB
547 }
548 }
549};
550
39d5492a
PM
551static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
552 TCGv_i32 shift, int flags)
8984bd2e
PB
553{
554 if (flags) {
555 switch (shiftop) {
9ef39277
BS
556 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
557 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
558 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
559 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
560 }
561 } else {
562 switch (shiftop) {
365af80e
AJ
563 case 0:
564 gen_shl(var, var, shift);
565 break;
566 case 1:
567 gen_shr(var, var, shift);
568 break;
569 case 2:
570 gen_sar(var, var, shift);
571 break;
f669df27
AJ
572 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
573 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
574 }
575 }
7d1b0095 576 tcg_temp_free_i32(shift);
8984bd2e
PB
577}
578
6ddbc6e4
PB
579#define PAS_OP(pfx) \
580 switch (op2) { \
581 case 0: gen_pas_helper(glue(pfx,add16)); break; \
582 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
583 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
584 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
585 case 4: gen_pas_helper(glue(pfx,add8)); break; \
586 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
587 }
39d5492a 588static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 589{
a7812ae4 590 TCGv_ptr tmp;
6ddbc6e4
PB
591
592 switch (op1) {
593#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
594 case 1:
a7812ae4 595 tmp = tcg_temp_new_ptr();
0ecb72a5 596 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 597 PAS_OP(s)
b75263d6 598 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
599 break;
600 case 5:
a7812ae4 601 tmp = tcg_temp_new_ptr();
0ecb72a5 602 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 603 PAS_OP(u)
b75263d6 604 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
605 break;
606#undef gen_pas_helper
607#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
608 case 2:
609 PAS_OP(q);
610 break;
611 case 3:
612 PAS_OP(sh);
613 break;
614 case 6:
615 PAS_OP(uq);
616 break;
617 case 7:
618 PAS_OP(uh);
619 break;
620#undef gen_pas_helper
621 }
622}
9ee6e8bb
PB
623#undef PAS_OP
624
6ddbc6e4
PB
625/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
626#define PAS_OP(pfx) \
ed89a2f1 627 switch (op1) { \
6ddbc6e4
PB
628 case 0: gen_pas_helper(glue(pfx,add8)); break; \
629 case 1: gen_pas_helper(glue(pfx,add16)); break; \
630 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
631 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
632 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
633 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
634 }
39d5492a 635static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 636{
a7812ae4 637 TCGv_ptr tmp;
6ddbc6e4 638
ed89a2f1 639 switch (op2) {
6ddbc6e4
PB
640#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
641 case 0:
a7812ae4 642 tmp = tcg_temp_new_ptr();
0ecb72a5 643 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 644 PAS_OP(s)
b75263d6 645 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
646 break;
647 case 4:
a7812ae4 648 tmp = tcg_temp_new_ptr();
0ecb72a5 649 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 650 PAS_OP(u)
b75263d6 651 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
652 break;
653#undef gen_pas_helper
654#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
655 case 1:
656 PAS_OP(q);
657 break;
658 case 2:
659 PAS_OP(sh);
660 break;
661 case 5:
662 PAS_OP(uq);
663 break;
664 case 6:
665 PAS_OP(uh);
666 break;
667#undef gen_pas_helper
668 }
669}
9ee6e8bb
PB
670#undef PAS_OP
671
39fb730a
AG
672/*
673 * generate a conditional branch based on ARM condition code cc.
674 * This is common between ARM and Aarch64 targets.
675 */
676void arm_gen_test_cc(int cc, int label)
d9ba4830 677{
39d5492a 678 TCGv_i32 tmp;
d9ba4830
PB
679 int inv;
680
d9ba4830
PB
681 switch (cc) {
682 case 0: /* eq: Z */
66c374de 683 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
684 break;
685 case 1: /* ne: !Z */
66c374de 686 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
687 break;
688 case 2: /* cs: C */
66c374de 689 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
d9ba4830
PB
690 break;
691 case 3: /* cc: !C */
66c374de 692 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
d9ba4830
PB
693 break;
694 case 4: /* mi: N */
66c374de 695 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
d9ba4830
PB
696 break;
697 case 5: /* pl: !N */
66c374de 698 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
d9ba4830
PB
699 break;
700 case 6: /* vs: V */
66c374de 701 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
d9ba4830
PB
702 break;
703 case 7: /* vc: !V */
66c374de 704 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
d9ba4830
PB
705 break;
706 case 8: /* hi: C && !Z */
707 inv = gen_new_label();
66c374de
AJ
708 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
709 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
710 gen_set_label(inv);
711 break;
712 case 9: /* ls: !C || Z */
66c374de
AJ
713 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
714 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
715 break;
716 case 10: /* ge: N == V -> N ^ V == 0 */
66c374de
AJ
717 tmp = tcg_temp_new_i32();
718 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 719 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 720 tcg_temp_free_i32(tmp);
d9ba4830
PB
721 break;
722 case 11: /* lt: N != V -> N ^ V != 0 */
66c374de
AJ
723 tmp = tcg_temp_new_i32();
724 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 725 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 726 tcg_temp_free_i32(tmp);
d9ba4830
PB
727 break;
728 case 12: /* gt: !Z && N == V */
729 inv = gen_new_label();
66c374de
AJ
730 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
731 tmp = tcg_temp_new_i32();
732 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 733 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 734 tcg_temp_free_i32(tmp);
d9ba4830
PB
735 gen_set_label(inv);
736 break;
737 case 13: /* le: Z || N != V */
66c374de
AJ
738 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
739 tmp = tcg_temp_new_i32();
740 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 741 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 742 tcg_temp_free_i32(tmp);
d9ba4830
PB
743 break;
744 default:
745 fprintf(stderr, "Bad condition code 0x%x\n", cc);
746 abort();
747 }
d9ba4830 748}
2c0262af 749
b1d8e52e 750static const uint8_t table_logic_cc[16] = {
2c0262af
FB
751 1, /* and */
752 1, /* xor */
753 0, /* sub */
754 0, /* rsb */
755 0, /* add */
756 0, /* adc */
757 0, /* sbc */
758 0, /* rsc */
759 1, /* andl */
760 1, /* xorl */
761 0, /* cmp */
762 0, /* cmn */
763 1, /* orr */
764 1, /* mov */
765 1, /* bic */
766 1, /* mvn */
767};
3b46e624 768
d9ba4830
PB
769/* Set PC and Thumb state from an immediate address. */
770static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 771{
39d5492a 772 TCGv_i32 tmp;
99c475ab 773
b26eefb6 774 s->is_jmp = DISAS_UPDATE;
d9ba4830 775 if (s->thumb != (addr & 1)) {
7d1b0095 776 tmp = tcg_temp_new_i32();
d9ba4830 777 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 778 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 779 tcg_temp_free_i32(tmp);
d9ba4830 780 }
155c3eac 781 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
782}
783
784/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 785static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 786{
d9ba4830 787 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
788 tcg_gen_andi_i32(cpu_R[15], var, ~1);
789 tcg_gen_andi_i32(var, var, 1);
790 store_cpu_field(var, thumb);
d9ba4830
PB
791}
792
21aeb343
JR
793/* Variant of store_reg which uses branch&exchange logic when storing
794 to r15 in ARM architecture v7 and above. The source must be a temporary
795 and will be marked as dead. */
0ecb72a5 796static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
39d5492a 797 int reg, TCGv_i32 var)
21aeb343
JR
798{
799 if (reg == 15 && ENABLE_ARCH_7) {
800 gen_bx(s, var);
801 } else {
802 store_reg(s, reg, var);
803 }
804}
805
be5e7a76
DES
806/* Variant of store_reg which uses branch&exchange logic when storing
807 * to r15 in ARM architecture v5T and above. This is used for storing
808 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
809 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
0ecb72a5 810static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
39d5492a 811 int reg, TCGv_i32 var)
be5e7a76
DES
812{
813 if (reg == 15 && ENABLE_ARCH_5) {
814 gen_bx(s, var);
815 } else {
816 store_reg(s, reg, var);
817 }
818}
819
08307563
PM
820/* Abstractions of "generate code to do a guest load/store for
821 * AArch32", where a vaddr is always 32 bits (and is zero
822 * extended if we're a 64 bit core) and data is also
823 * 32 bits unless specifically doing a 64 bit access.
824 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 825 * that the address argument is TCGv_i32 rather than TCGv.
08307563
PM
826 */
827#if TARGET_LONG_BITS == 32
828
09f78135
RH
829#define DO_GEN_LD(SUFF, OPC) \
830static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563 831{ \
09f78135 832 tcg_gen_qemu_ld_i32(val, addr, index, OPC); \
08307563
PM
833}
834
09f78135
RH
835#define DO_GEN_ST(SUFF, OPC) \
836static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563 837{ \
09f78135 838 tcg_gen_qemu_st_i32(val, addr, index, OPC); \
08307563
PM
839}
840
841static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
842{
09f78135 843 tcg_gen_qemu_ld_i64(val, addr, index, MO_TEQ);
08307563
PM
844}
845
846static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
847{
09f78135 848 tcg_gen_qemu_st_i64(val, addr, index, MO_TEQ);
08307563
PM
849}
850
851#else
852
09f78135
RH
853#define DO_GEN_LD(SUFF, OPC) \
854static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563
PM
855{ \
856 TCGv addr64 = tcg_temp_new(); \
08307563 857 tcg_gen_extu_i32_i64(addr64, addr); \
09f78135 858 tcg_gen_qemu_ld_i32(val, addr64, index, OPC); \
08307563 859 tcg_temp_free(addr64); \
08307563
PM
860}
861
09f78135
RH
862#define DO_GEN_ST(SUFF, OPC) \
863static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563
PM
864{ \
865 TCGv addr64 = tcg_temp_new(); \
08307563 866 tcg_gen_extu_i32_i64(addr64, addr); \
09f78135 867 tcg_gen_qemu_st_i32(val, addr64, index, OPC); \
08307563 868 tcg_temp_free(addr64); \
08307563
PM
869}
870
871static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
872{
873 TCGv addr64 = tcg_temp_new();
874 tcg_gen_extu_i32_i64(addr64, addr);
09f78135 875 tcg_gen_qemu_ld_i64(val, addr64, index, MO_TEQ);
08307563
PM
876 tcg_temp_free(addr64);
877}
878
879static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
880{
881 TCGv addr64 = tcg_temp_new();
882 tcg_gen_extu_i32_i64(addr64, addr);
09f78135 883 tcg_gen_qemu_st_i64(val, addr64, index, MO_TEQ);
08307563
PM
884 tcg_temp_free(addr64);
885}
886
887#endif
888
09f78135
RH
889DO_GEN_LD(8s, MO_SB)
890DO_GEN_LD(8u, MO_UB)
891DO_GEN_LD(16s, MO_TESW)
892DO_GEN_LD(16u, MO_TEUW)
893DO_GEN_LD(32u, MO_TEUL)
894DO_GEN_ST(8, MO_UB)
895DO_GEN_ST(16, MO_TEUW)
896DO_GEN_ST(32, MO_TEUL)
08307563 897
eaed129d 898static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
5e3f878a 899{
40f860cd 900 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
901}
902
b5ff1b31
FB
903/* Force a TB lookup after an instruction that changes the CPU state. */
904static inline void gen_lookup_tb(DisasContext *s)
905{
a6445c52 906 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
907 s->is_jmp = DISAS_UPDATE;
908}
909
b0109805 910static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 911 TCGv_i32 var)
2c0262af 912{
1e8d4eec 913 int val, rm, shift, shiftop;
39d5492a 914 TCGv_i32 offset;
2c0262af
FB
915
916 if (!(insn & (1 << 25))) {
917 /* immediate */
918 val = insn & 0xfff;
919 if (!(insn & (1 << 23)))
920 val = -val;
537730b9 921 if (val != 0)
b0109805 922 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
923 } else {
924 /* shift/register */
925 rm = (insn) & 0xf;
926 shift = (insn >> 7) & 0x1f;
1e8d4eec 927 shiftop = (insn >> 5) & 3;
b26eefb6 928 offset = load_reg(s, rm);
9a119ff6 929 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 930 if (!(insn & (1 << 23)))
b0109805 931 tcg_gen_sub_i32(var, var, offset);
2c0262af 932 else
b0109805 933 tcg_gen_add_i32(var, var, offset);
7d1b0095 934 tcg_temp_free_i32(offset);
2c0262af
FB
935 }
936}
937
191f9a93 938static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 939 int extra, TCGv_i32 var)
2c0262af
FB
940{
941 int val, rm;
39d5492a 942 TCGv_i32 offset;
3b46e624 943
2c0262af
FB
944 if (insn & (1 << 22)) {
945 /* immediate */
946 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
947 if (!(insn & (1 << 23)))
948 val = -val;
18acad92 949 val += extra;
537730b9 950 if (val != 0)
b0109805 951 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
952 } else {
953 /* register */
191f9a93 954 if (extra)
b0109805 955 tcg_gen_addi_i32(var, var, extra);
2c0262af 956 rm = (insn) & 0xf;
b26eefb6 957 offset = load_reg(s, rm);
2c0262af 958 if (!(insn & (1 << 23)))
b0109805 959 tcg_gen_sub_i32(var, var, offset);
2c0262af 960 else
b0109805 961 tcg_gen_add_i32(var, var, offset);
7d1b0095 962 tcg_temp_free_i32(offset);
2c0262af
FB
963 }
964}
965
5aaebd13
PM
966static TCGv_ptr get_fpstatus_ptr(int neon)
967{
968 TCGv_ptr statusptr = tcg_temp_new_ptr();
969 int offset;
970 if (neon) {
0ecb72a5 971 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 972 } else {
0ecb72a5 973 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
974 }
975 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
976 return statusptr;
977}
978
4373f3ce
PB
979#define VFP_OP2(name) \
980static inline void gen_vfp_##name(int dp) \
981{ \
ae1857ec
PM
982 TCGv_ptr fpst = get_fpstatus_ptr(0); \
983 if (dp) { \
984 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
985 } else { \
986 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
987 } \
988 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
989}
990
4373f3ce
PB
991VFP_OP2(add)
992VFP_OP2(sub)
993VFP_OP2(mul)
994VFP_OP2(div)
995
996#undef VFP_OP2
997
605a6aed
PM
998static inline void gen_vfp_F1_mul(int dp)
999{
1000 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1001 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1002 if (dp) {
ae1857ec 1003 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1004 } else {
ae1857ec 1005 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1006 }
ae1857ec 1007 tcg_temp_free_ptr(fpst);
605a6aed
PM
1008}
1009
1010static inline void gen_vfp_F1_neg(int dp)
1011{
1012 /* Like gen_vfp_neg() but put result in F1 */
1013 if (dp) {
1014 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1015 } else {
1016 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1017 }
1018}
1019
4373f3ce
PB
1020static inline void gen_vfp_abs(int dp)
1021{
1022 if (dp)
1023 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1024 else
1025 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1026}
1027
1028static inline void gen_vfp_neg(int dp)
1029{
1030 if (dp)
1031 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1032 else
1033 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1034}
1035
1036static inline void gen_vfp_sqrt(int dp)
1037{
1038 if (dp)
1039 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1040 else
1041 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1042}
1043
1044static inline void gen_vfp_cmp(int dp)
1045{
1046 if (dp)
1047 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1048 else
1049 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1050}
1051
1052static inline void gen_vfp_cmpe(int dp)
1053{
1054 if (dp)
1055 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1056 else
1057 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1058}
1059
1060static inline void gen_vfp_F1_ld0(int dp)
1061{
1062 if (dp)
5b340b51 1063 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1064 else
5b340b51 1065 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1066}
1067
5500b06c
PM
1068#define VFP_GEN_ITOF(name) \
1069static inline void gen_vfp_##name(int dp, int neon) \
1070{ \
5aaebd13 1071 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1072 if (dp) { \
1073 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1074 } else { \
1075 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1076 } \
b7fa9214 1077 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1078}
1079
5500b06c
PM
1080VFP_GEN_ITOF(uito)
1081VFP_GEN_ITOF(sito)
1082#undef VFP_GEN_ITOF
4373f3ce 1083
5500b06c
PM
1084#define VFP_GEN_FTOI(name) \
1085static inline void gen_vfp_##name(int dp, int neon) \
1086{ \
5aaebd13 1087 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1088 if (dp) { \
1089 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1090 } else { \
1091 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1092 } \
b7fa9214 1093 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1094}
1095
5500b06c
PM
1096VFP_GEN_FTOI(toui)
1097VFP_GEN_FTOI(touiz)
1098VFP_GEN_FTOI(tosi)
1099VFP_GEN_FTOI(tosiz)
1100#undef VFP_GEN_FTOI
4373f3ce 1101
16d5b3ca 1102#define VFP_GEN_FIX(name, round) \
5500b06c 1103static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1104{ \
39d5492a 1105 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1106 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1107 if (dp) { \
16d5b3ca
WN
1108 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1109 statusptr); \
5500b06c 1110 } else { \
16d5b3ca
WN
1111 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1112 statusptr); \
5500b06c 1113 } \
b75263d6 1114 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1115 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1116}
16d5b3ca
WN
1117VFP_GEN_FIX(tosh, _round_to_zero)
1118VFP_GEN_FIX(tosl, _round_to_zero)
1119VFP_GEN_FIX(touh, _round_to_zero)
1120VFP_GEN_FIX(toul, _round_to_zero)
1121VFP_GEN_FIX(shto, )
1122VFP_GEN_FIX(slto, )
1123VFP_GEN_FIX(uhto, )
1124VFP_GEN_FIX(ulto, )
4373f3ce 1125#undef VFP_GEN_FIX
9ee6e8bb 1126
39d5492a 1127static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1128{
08307563
PM
1129 if (dp) {
1130 gen_aa32_ld64(cpu_F0d, addr, IS_USER(s));
1131 } else {
1132 gen_aa32_ld32u(cpu_F0s, addr, IS_USER(s));
1133 }
b5ff1b31
FB
1134}
1135
39d5492a 1136static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1137{
08307563
PM
1138 if (dp) {
1139 gen_aa32_st64(cpu_F0d, addr, IS_USER(s));
1140 } else {
1141 gen_aa32_st32(cpu_F0s, addr, IS_USER(s));
1142 }
b5ff1b31
FB
1143}
1144
8e96005d
FB
1145static inline long
1146vfp_reg_offset (int dp, int reg)
1147{
1148 if (dp)
1149 return offsetof(CPUARMState, vfp.regs[reg]);
1150 else if (reg & 1) {
1151 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1152 + offsetof(CPU_DoubleU, l.upper);
1153 } else {
1154 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1155 + offsetof(CPU_DoubleU, l.lower);
1156 }
1157}
9ee6e8bb
PB
1158
1159/* Return the offset of a 32-bit piece of a NEON register.
1160 zero is the least significant end of the register. */
1161static inline long
1162neon_reg_offset (int reg, int n)
1163{
1164 int sreg;
1165 sreg = reg * 2 + n;
1166 return vfp_reg_offset(0, sreg);
1167}
1168
39d5492a 1169static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1170{
39d5492a 1171 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1172 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1173 return tmp;
1174}
1175
39d5492a 1176static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1177{
1178 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1179 tcg_temp_free_i32(var);
8f8e3aa4
PB
1180}
1181
a7812ae4 1182static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1183{
1184 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1185}
1186
a7812ae4 1187static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1188{
1189 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1190}
1191
4373f3ce
PB
1192#define tcg_gen_ld_f32 tcg_gen_ld_i32
1193#define tcg_gen_ld_f64 tcg_gen_ld_i64
1194#define tcg_gen_st_f32 tcg_gen_st_i32
1195#define tcg_gen_st_f64 tcg_gen_st_i64
1196
b7bcbe95
FB
1197static inline void gen_mov_F0_vreg(int dp, int reg)
1198{
1199 if (dp)
4373f3ce 1200 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1201 else
4373f3ce 1202 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1203}
1204
1205static inline void gen_mov_F1_vreg(int dp, int reg)
1206{
1207 if (dp)
4373f3ce 1208 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1209 else
4373f3ce 1210 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1211}
1212
1213static inline void gen_mov_vreg_F0(int dp, int reg)
1214{
1215 if (dp)
4373f3ce 1216 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1217 else
4373f3ce 1218 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1219}
1220
18c9b560
AZ
1221#define ARM_CP_RW_BIT (1 << 20)
1222
a7812ae4 1223static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1224{
0ecb72a5 1225 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1226}
1227
a7812ae4 1228static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1229{
0ecb72a5 1230 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1231}
1232
39d5492a 1233static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1234{
39d5492a 1235 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1236 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1237 return var;
e677137d
PB
1238}
1239
39d5492a 1240static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1241{
0ecb72a5 1242 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1243 tcg_temp_free_i32(var);
e677137d
PB
1244}
1245
1246static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1247{
1248 iwmmxt_store_reg(cpu_M0, rn);
1249}
1250
1251static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1252{
1253 iwmmxt_load_reg(cpu_M0, rn);
1254}
1255
1256static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1257{
1258 iwmmxt_load_reg(cpu_V1, rn);
1259 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1260}
1261
1262static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1263{
1264 iwmmxt_load_reg(cpu_V1, rn);
1265 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1266}
1267
1268static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1269{
1270 iwmmxt_load_reg(cpu_V1, rn);
1271 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1272}
1273
1274#define IWMMXT_OP(name) \
1275static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1276{ \
1277 iwmmxt_load_reg(cpu_V1, rn); \
1278 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1279}
1280
477955bd
PM
1281#define IWMMXT_OP_ENV(name) \
1282static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1283{ \
1284 iwmmxt_load_reg(cpu_V1, rn); \
1285 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1286}
1287
1288#define IWMMXT_OP_ENV_SIZE(name) \
1289IWMMXT_OP_ENV(name##b) \
1290IWMMXT_OP_ENV(name##w) \
1291IWMMXT_OP_ENV(name##l)
e677137d 1292
477955bd 1293#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1294static inline void gen_op_iwmmxt_##name##_M0(void) \
1295{ \
477955bd 1296 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1297}
1298
1299IWMMXT_OP(maddsq)
1300IWMMXT_OP(madduq)
1301IWMMXT_OP(sadb)
1302IWMMXT_OP(sadw)
1303IWMMXT_OP(mulslw)
1304IWMMXT_OP(mulshw)
1305IWMMXT_OP(mululw)
1306IWMMXT_OP(muluhw)
1307IWMMXT_OP(macsw)
1308IWMMXT_OP(macuw)
1309
477955bd
PM
1310IWMMXT_OP_ENV_SIZE(unpackl)
1311IWMMXT_OP_ENV_SIZE(unpackh)
1312
1313IWMMXT_OP_ENV1(unpacklub)
1314IWMMXT_OP_ENV1(unpackluw)
1315IWMMXT_OP_ENV1(unpacklul)
1316IWMMXT_OP_ENV1(unpackhub)
1317IWMMXT_OP_ENV1(unpackhuw)
1318IWMMXT_OP_ENV1(unpackhul)
1319IWMMXT_OP_ENV1(unpacklsb)
1320IWMMXT_OP_ENV1(unpacklsw)
1321IWMMXT_OP_ENV1(unpacklsl)
1322IWMMXT_OP_ENV1(unpackhsb)
1323IWMMXT_OP_ENV1(unpackhsw)
1324IWMMXT_OP_ENV1(unpackhsl)
1325
1326IWMMXT_OP_ENV_SIZE(cmpeq)
1327IWMMXT_OP_ENV_SIZE(cmpgtu)
1328IWMMXT_OP_ENV_SIZE(cmpgts)
1329
1330IWMMXT_OP_ENV_SIZE(mins)
1331IWMMXT_OP_ENV_SIZE(minu)
1332IWMMXT_OP_ENV_SIZE(maxs)
1333IWMMXT_OP_ENV_SIZE(maxu)
1334
1335IWMMXT_OP_ENV_SIZE(subn)
1336IWMMXT_OP_ENV_SIZE(addn)
1337IWMMXT_OP_ENV_SIZE(subu)
1338IWMMXT_OP_ENV_SIZE(addu)
1339IWMMXT_OP_ENV_SIZE(subs)
1340IWMMXT_OP_ENV_SIZE(adds)
1341
1342IWMMXT_OP_ENV(avgb0)
1343IWMMXT_OP_ENV(avgb1)
1344IWMMXT_OP_ENV(avgw0)
1345IWMMXT_OP_ENV(avgw1)
e677137d
PB
1346
1347IWMMXT_OP(msadb)
1348
477955bd
PM
1349IWMMXT_OP_ENV(packuw)
1350IWMMXT_OP_ENV(packul)
1351IWMMXT_OP_ENV(packuq)
1352IWMMXT_OP_ENV(packsw)
1353IWMMXT_OP_ENV(packsl)
1354IWMMXT_OP_ENV(packsq)
e677137d 1355
e677137d
PB
1356static void gen_op_iwmmxt_set_mup(void)
1357{
39d5492a 1358 TCGv_i32 tmp;
e677137d
PB
1359 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1360 tcg_gen_ori_i32(tmp, tmp, 2);
1361 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1362}
1363
1364static void gen_op_iwmmxt_set_cup(void)
1365{
39d5492a 1366 TCGv_i32 tmp;
e677137d
PB
1367 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1368 tcg_gen_ori_i32(tmp, tmp, 1);
1369 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1370}
1371
1372static void gen_op_iwmmxt_setpsr_nz(void)
1373{
39d5492a 1374 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1375 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1376 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1377}
1378
1379static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1380{
1381 iwmmxt_load_reg(cpu_V1, rn);
86831435 1382 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1383 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1384}
1385
39d5492a
PM
1386static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1387 TCGv_i32 dest)
18c9b560
AZ
1388{
1389 int rd;
1390 uint32_t offset;
39d5492a 1391 TCGv_i32 tmp;
18c9b560
AZ
1392
1393 rd = (insn >> 16) & 0xf;
da6b5335 1394 tmp = load_reg(s, rd);
18c9b560
AZ
1395
1396 offset = (insn & 0xff) << ((insn >> 7) & 2);
1397 if (insn & (1 << 24)) {
1398 /* Pre indexed */
1399 if (insn & (1 << 23))
da6b5335 1400 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1401 else
da6b5335
FN
1402 tcg_gen_addi_i32(tmp, tmp, -offset);
1403 tcg_gen_mov_i32(dest, tmp);
18c9b560 1404 if (insn & (1 << 21))
da6b5335
FN
1405 store_reg(s, rd, tmp);
1406 else
7d1b0095 1407 tcg_temp_free_i32(tmp);
18c9b560
AZ
1408 } else if (insn & (1 << 21)) {
1409 /* Post indexed */
da6b5335 1410 tcg_gen_mov_i32(dest, tmp);
18c9b560 1411 if (insn & (1 << 23))
da6b5335 1412 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1413 else
da6b5335
FN
1414 tcg_gen_addi_i32(tmp, tmp, -offset);
1415 store_reg(s, rd, tmp);
18c9b560
AZ
1416 } else if (!(insn & (1 << 23)))
1417 return 1;
1418 return 0;
1419}
1420
39d5492a 1421static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1422{
1423 int rd = (insn >> 0) & 0xf;
39d5492a 1424 TCGv_i32 tmp;
18c9b560 1425
da6b5335
FN
1426 if (insn & (1 << 8)) {
1427 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1428 return 1;
da6b5335
FN
1429 } else {
1430 tmp = iwmmxt_load_creg(rd);
1431 }
1432 } else {
7d1b0095 1433 tmp = tcg_temp_new_i32();
da6b5335
FN
1434 iwmmxt_load_reg(cpu_V0, rd);
1435 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1436 }
1437 tcg_gen_andi_i32(tmp, tmp, mask);
1438 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1439 tcg_temp_free_i32(tmp);
18c9b560
AZ
1440 return 0;
1441}
1442
a1c7273b 1443/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1444 (ie. an undefined instruction). */
0ecb72a5 1445static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
1446{
1447 int rd, wrd;
1448 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1449 TCGv_i32 addr;
1450 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1451
1452 if ((insn & 0x0e000e00) == 0x0c000000) {
1453 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1454 wrd = insn & 0xf;
1455 rdlo = (insn >> 12) & 0xf;
1456 rdhi = (insn >> 16) & 0xf;
1457 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1458 iwmmxt_load_reg(cpu_V0, wrd);
1459 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1460 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1461 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1462 } else { /* TMCRR */
da6b5335
FN
1463 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1464 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1465 gen_op_iwmmxt_set_mup();
1466 }
1467 return 0;
1468 }
1469
1470 wrd = (insn >> 12) & 0xf;
7d1b0095 1471 addr = tcg_temp_new_i32();
da6b5335 1472 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1473 tcg_temp_free_i32(addr);
18c9b560 1474 return 1;
da6b5335 1475 }
18c9b560
AZ
1476 if (insn & ARM_CP_RW_BIT) {
1477 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1478 tmp = tcg_temp_new_i32();
08307563 1479 gen_aa32_ld32u(tmp, addr, IS_USER(s));
da6b5335 1480 iwmmxt_store_creg(wrd, tmp);
18c9b560 1481 } else {
e677137d
PB
1482 i = 1;
1483 if (insn & (1 << 8)) {
1484 if (insn & (1 << 22)) { /* WLDRD */
08307563 1485 gen_aa32_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1486 i = 0;
1487 } else { /* WLDRW wRd */
29531141 1488 tmp = tcg_temp_new_i32();
08307563 1489 gen_aa32_ld32u(tmp, addr, IS_USER(s));
e677137d
PB
1490 }
1491 } else {
29531141 1492 tmp = tcg_temp_new_i32();
e677137d 1493 if (insn & (1 << 22)) { /* WLDRH */
08307563 1494 gen_aa32_ld16u(tmp, addr, IS_USER(s));
e677137d 1495 } else { /* WLDRB */
08307563 1496 gen_aa32_ld8u(tmp, addr, IS_USER(s));
e677137d
PB
1497 }
1498 }
1499 if (i) {
1500 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1501 tcg_temp_free_i32(tmp);
e677137d 1502 }
18c9b560
AZ
1503 gen_op_iwmmxt_movq_wRn_M0(wrd);
1504 }
1505 } else {
1506 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1507 tmp = iwmmxt_load_creg(wrd);
08307563 1508 gen_aa32_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1509 } else {
1510 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1511 tmp = tcg_temp_new_i32();
e677137d
PB
1512 if (insn & (1 << 8)) {
1513 if (insn & (1 << 22)) { /* WSTRD */
08307563 1514 gen_aa32_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1515 } else { /* WSTRW wRd */
1516 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
08307563 1517 gen_aa32_st32(tmp, addr, IS_USER(s));
e677137d
PB
1518 }
1519 } else {
1520 if (insn & (1 << 22)) { /* WSTRH */
1521 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
08307563 1522 gen_aa32_st16(tmp, addr, IS_USER(s));
e677137d
PB
1523 } else { /* WSTRB */
1524 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
08307563 1525 gen_aa32_st8(tmp, addr, IS_USER(s));
e677137d
PB
1526 }
1527 }
18c9b560 1528 }
29531141 1529 tcg_temp_free_i32(tmp);
18c9b560 1530 }
7d1b0095 1531 tcg_temp_free_i32(addr);
18c9b560
AZ
1532 return 0;
1533 }
1534
1535 if ((insn & 0x0f000000) != 0x0e000000)
1536 return 1;
1537
1538 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1539 case 0x000: /* WOR */
1540 wrd = (insn >> 12) & 0xf;
1541 rd0 = (insn >> 0) & 0xf;
1542 rd1 = (insn >> 16) & 0xf;
1543 gen_op_iwmmxt_movq_M0_wRn(rd0);
1544 gen_op_iwmmxt_orq_M0_wRn(rd1);
1545 gen_op_iwmmxt_setpsr_nz();
1546 gen_op_iwmmxt_movq_wRn_M0(wrd);
1547 gen_op_iwmmxt_set_mup();
1548 gen_op_iwmmxt_set_cup();
1549 break;
1550 case 0x011: /* TMCR */
1551 if (insn & 0xf)
1552 return 1;
1553 rd = (insn >> 12) & 0xf;
1554 wrd = (insn >> 16) & 0xf;
1555 switch (wrd) {
1556 case ARM_IWMMXT_wCID:
1557 case ARM_IWMMXT_wCASF:
1558 break;
1559 case ARM_IWMMXT_wCon:
1560 gen_op_iwmmxt_set_cup();
1561 /* Fall through. */
1562 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1563 tmp = iwmmxt_load_creg(wrd);
1564 tmp2 = load_reg(s, rd);
f669df27 1565 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1566 tcg_temp_free_i32(tmp2);
da6b5335 1567 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1568 break;
1569 case ARM_IWMMXT_wCGR0:
1570 case ARM_IWMMXT_wCGR1:
1571 case ARM_IWMMXT_wCGR2:
1572 case ARM_IWMMXT_wCGR3:
1573 gen_op_iwmmxt_set_cup();
da6b5335
FN
1574 tmp = load_reg(s, rd);
1575 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1576 break;
1577 default:
1578 return 1;
1579 }
1580 break;
1581 case 0x100: /* WXOR */
1582 wrd = (insn >> 12) & 0xf;
1583 rd0 = (insn >> 0) & 0xf;
1584 rd1 = (insn >> 16) & 0xf;
1585 gen_op_iwmmxt_movq_M0_wRn(rd0);
1586 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1587 gen_op_iwmmxt_setpsr_nz();
1588 gen_op_iwmmxt_movq_wRn_M0(wrd);
1589 gen_op_iwmmxt_set_mup();
1590 gen_op_iwmmxt_set_cup();
1591 break;
1592 case 0x111: /* TMRC */
1593 if (insn & 0xf)
1594 return 1;
1595 rd = (insn >> 12) & 0xf;
1596 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1597 tmp = iwmmxt_load_creg(wrd);
1598 store_reg(s, rd, tmp);
18c9b560
AZ
1599 break;
1600 case 0x300: /* WANDN */
1601 wrd = (insn >> 12) & 0xf;
1602 rd0 = (insn >> 0) & 0xf;
1603 rd1 = (insn >> 16) & 0xf;
1604 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1605 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1606 gen_op_iwmmxt_andq_M0_wRn(rd1);
1607 gen_op_iwmmxt_setpsr_nz();
1608 gen_op_iwmmxt_movq_wRn_M0(wrd);
1609 gen_op_iwmmxt_set_mup();
1610 gen_op_iwmmxt_set_cup();
1611 break;
1612 case 0x200: /* WAND */
1613 wrd = (insn >> 12) & 0xf;
1614 rd0 = (insn >> 0) & 0xf;
1615 rd1 = (insn >> 16) & 0xf;
1616 gen_op_iwmmxt_movq_M0_wRn(rd0);
1617 gen_op_iwmmxt_andq_M0_wRn(rd1);
1618 gen_op_iwmmxt_setpsr_nz();
1619 gen_op_iwmmxt_movq_wRn_M0(wrd);
1620 gen_op_iwmmxt_set_mup();
1621 gen_op_iwmmxt_set_cup();
1622 break;
1623 case 0x810: case 0xa10: /* WMADD */
1624 wrd = (insn >> 12) & 0xf;
1625 rd0 = (insn >> 0) & 0xf;
1626 rd1 = (insn >> 16) & 0xf;
1627 gen_op_iwmmxt_movq_M0_wRn(rd0);
1628 if (insn & (1 << 21))
1629 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1630 else
1631 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1632 gen_op_iwmmxt_movq_wRn_M0(wrd);
1633 gen_op_iwmmxt_set_mup();
1634 break;
1635 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1636 wrd = (insn >> 12) & 0xf;
1637 rd0 = (insn >> 16) & 0xf;
1638 rd1 = (insn >> 0) & 0xf;
1639 gen_op_iwmmxt_movq_M0_wRn(rd0);
1640 switch ((insn >> 22) & 3) {
1641 case 0:
1642 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1643 break;
1644 case 1:
1645 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1646 break;
1647 case 2:
1648 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1649 break;
1650 case 3:
1651 return 1;
1652 }
1653 gen_op_iwmmxt_movq_wRn_M0(wrd);
1654 gen_op_iwmmxt_set_mup();
1655 gen_op_iwmmxt_set_cup();
1656 break;
1657 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1658 wrd = (insn >> 12) & 0xf;
1659 rd0 = (insn >> 16) & 0xf;
1660 rd1 = (insn >> 0) & 0xf;
1661 gen_op_iwmmxt_movq_M0_wRn(rd0);
1662 switch ((insn >> 22) & 3) {
1663 case 0:
1664 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1665 break;
1666 case 1:
1667 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1668 break;
1669 case 2:
1670 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1671 break;
1672 case 3:
1673 return 1;
1674 }
1675 gen_op_iwmmxt_movq_wRn_M0(wrd);
1676 gen_op_iwmmxt_set_mup();
1677 gen_op_iwmmxt_set_cup();
1678 break;
1679 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1680 wrd = (insn >> 12) & 0xf;
1681 rd0 = (insn >> 16) & 0xf;
1682 rd1 = (insn >> 0) & 0xf;
1683 gen_op_iwmmxt_movq_M0_wRn(rd0);
1684 if (insn & (1 << 22))
1685 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1686 else
1687 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1688 if (!(insn & (1 << 20)))
1689 gen_op_iwmmxt_addl_M0_wRn(wrd);
1690 gen_op_iwmmxt_movq_wRn_M0(wrd);
1691 gen_op_iwmmxt_set_mup();
1692 break;
1693 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1694 wrd = (insn >> 12) & 0xf;
1695 rd0 = (insn >> 16) & 0xf;
1696 rd1 = (insn >> 0) & 0xf;
1697 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1698 if (insn & (1 << 21)) {
1699 if (insn & (1 << 20))
1700 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1701 else
1702 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1703 } else {
1704 if (insn & (1 << 20))
1705 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1706 else
1707 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1708 }
18c9b560
AZ
1709 gen_op_iwmmxt_movq_wRn_M0(wrd);
1710 gen_op_iwmmxt_set_mup();
1711 break;
1712 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1713 wrd = (insn >> 12) & 0xf;
1714 rd0 = (insn >> 16) & 0xf;
1715 rd1 = (insn >> 0) & 0xf;
1716 gen_op_iwmmxt_movq_M0_wRn(rd0);
1717 if (insn & (1 << 21))
1718 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1719 else
1720 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1721 if (!(insn & (1 << 20))) {
e677137d
PB
1722 iwmmxt_load_reg(cpu_V1, wrd);
1723 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1724 }
1725 gen_op_iwmmxt_movq_wRn_M0(wrd);
1726 gen_op_iwmmxt_set_mup();
1727 break;
1728 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1729 wrd = (insn >> 12) & 0xf;
1730 rd0 = (insn >> 16) & 0xf;
1731 rd1 = (insn >> 0) & 0xf;
1732 gen_op_iwmmxt_movq_M0_wRn(rd0);
1733 switch ((insn >> 22) & 3) {
1734 case 0:
1735 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1736 break;
1737 case 1:
1738 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1739 break;
1740 case 2:
1741 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1742 break;
1743 case 3:
1744 return 1;
1745 }
1746 gen_op_iwmmxt_movq_wRn_M0(wrd);
1747 gen_op_iwmmxt_set_mup();
1748 gen_op_iwmmxt_set_cup();
1749 break;
1750 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1751 wrd = (insn >> 12) & 0xf;
1752 rd0 = (insn >> 16) & 0xf;
1753 rd1 = (insn >> 0) & 0xf;
1754 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1755 if (insn & (1 << 22)) {
1756 if (insn & (1 << 20))
1757 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1758 else
1759 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1760 } else {
1761 if (insn & (1 << 20))
1762 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1763 else
1764 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1765 }
18c9b560
AZ
1766 gen_op_iwmmxt_movq_wRn_M0(wrd);
1767 gen_op_iwmmxt_set_mup();
1768 gen_op_iwmmxt_set_cup();
1769 break;
1770 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1771 wrd = (insn >> 12) & 0xf;
1772 rd0 = (insn >> 16) & 0xf;
1773 rd1 = (insn >> 0) & 0xf;
1774 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1775 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1776 tcg_gen_andi_i32(tmp, tmp, 7);
1777 iwmmxt_load_reg(cpu_V1, rd1);
1778 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1779 tcg_temp_free_i32(tmp);
18c9b560
AZ
1780 gen_op_iwmmxt_movq_wRn_M0(wrd);
1781 gen_op_iwmmxt_set_mup();
1782 break;
1783 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1784 if (((insn >> 6) & 3) == 3)
1785 return 1;
18c9b560
AZ
1786 rd = (insn >> 12) & 0xf;
1787 wrd = (insn >> 16) & 0xf;
da6b5335 1788 tmp = load_reg(s, rd);
18c9b560
AZ
1789 gen_op_iwmmxt_movq_M0_wRn(wrd);
1790 switch ((insn >> 6) & 3) {
1791 case 0:
da6b5335
FN
1792 tmp2 = tcg_const_i32(0xff);
1793 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1794 break;
1795 case 1:
da6b5335
FN
1796 tmp2 = tcg_const_i32(0xffff);
1797 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1798 break;
1799 case 2:
da6b5335
FN
1800 tmp2 = tcg_const_i32(0xffffffff);
1801 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1802 break;
da6b5335 1803 default:
39d5492a
PM
1804 TCGV_UNUSED_I32(tmp2);
1805 TCGV_UNUSED_I32(tmp3);
18c9b560 1806 }
da6b5335 1807 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
1808 tcg_temp_free_i32(tmp3);
1809 tcg_temp_free_i32(tmp2);
7d1b0095 1810 tcg_temp_free_i32(tmp);
18c9b560
AZ
1811 gen_op_iwmmxt_movq_wRn_M0(wrd);
1812 gen_op_iwmmxt_set_mup();
1813 break;
1814 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1815 rd = (insn >> 12) & 0xf;
1816 wrd = (insn >> 16) & 0xf;
da6b5335 1817 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1818 return 1;
1819 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1820 tmp = tcg_temp_new_i32();
18c9b560
AZ
1821 switch ((insn >> 22) & 3) {
1822 case 0:
da6b5335
FN
1823 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1824 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1825 if (insn & 8) {
1826 tcg_gen_ext8s_i32(tmp, tmp);
1827 } else {
1828 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1829 }
1830 break;
1831 case 1:
da6b5335
FN
1832 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1833 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1834 if (insn & 8) {
1835 tcg_gen_ext16s_i32(tmp, tmp);
1836 } else {
1837 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1838 }
1839 break;
1840 case 2:
da6b5335
FN
1841 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1842 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1843 break;
18c9b560 1844 }
da6b5335 1845 store_reg(s, rd, tmp);
18c9b560
AZ
1846 break;
1847 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1848 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1849 return 1;
da6b5335 1850 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1851 switch ((insn >> 22) & 3) {
1852 case 0:
da6b5335 1853 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1854 break;
1855 case 1:
da6b5335 1856 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1857 break;
1858 case 2:
da6b5335 1859 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1860 break;
18c9b560 1861 }
da6b5335
FN
1862 tcg_gen_shli_i32(tmp, tmp, 28);
1863 gen_set_nzcv(tmp);
7d1b0095 1864 tcg_temp_free_i32(tmp);
18c9b560
AZ
1865 break;
1866 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1867 if (((insn >> 6) & 3) == 3)
1868 return 1;
18c9b560
AZ
1869 rd = (insn >> 12) & 0xf;
1870 wrd = (insn >> 16) & 0xf;
da6b5335 1871 tmp = load_reg(s, rd);
18c9b560
AZ
1872 switch ((insn >> 6) & 3) {
1873 case 0:
da6b5335 1874 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1875 break;
1876 case 1:
da6b5335 1877 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1878 break;
1879 case 2:
da6b5335 1880 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1881 break;
18c9b560 1882 }
7d1b0095 1883 tcg_temp_free_i32(tmp);
18c9b560
AZ
1884 gen_op_iwmmxt_movq_wRn_M0(wrd);
1885 gen_op_iwmmxt_set_mup();
1886 break;
1887 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1888 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1889 return 1;
da6b5335 1890 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1891 tmp2 = tcg_temp_new_i32();
da6b5335 1892 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1893 switch ((insn >> 22) & 3) {
1894 case 0:
1895 for (i = 0; i < 7; i ++) {
da6b5335
FN
1896 tcg_gen_shli_i32(tmp2, tmp2, 4);
1897 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1898 }
1899 break;
1900 case 1:
1901 for (i = 0; i < 3; i ++) {
da6b5335
FN
1902 tcg_gen_shli_i32(tmp2, tmp2, 8);
1903 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1904 }
1905 break;
1906 case 2:
da6b5335
FN
1907 tcg_gen_shli_i32(tmp2, tmp2, 16);
1908 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1909 break;
18c9b560 1910 }
da6b5335 1911 gen_set_nzcv(tmp);
7d1b0095
PM
1912 tcg_temp_free_i32(tmp2);
1913 tcg_temp_free_i32(tmp);
18c9b560
AZ
1914 break;
1915 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1916 wrd = (insn >> 12) & 0xf;
1917 rd0 = (insn >> 16) & 0xf;
1918 gen_op_iwmmxt_movq_M0_wRn(rd0);
1919 switch ((insn >> 22) & 3) {
1920 case 0:
e677137d 1921 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1922 break;
1923 case 1:
e677137d 1924 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1925 break;
1926 case 2:
e677137d 1927 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1928 break;
1929 case 3:
1930 return 1;
1931 }
1932 gen_op_iwmmxt_movq_wRn_M0(wrd);
1933 gen_op_iwmmxt_set_mup();
1934 break;
1935 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1936 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1937 return 1;
da6b5335 1938 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1939 tmp2 = tcg_temp_new_i32();
da6b5335 1940 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1941 switch ((insn >> 22) & 3) {
1942 case 0:
1943 for (i = 0; i < 7; i ++) {
da6b5335
FN
1944 tcg_gen_shli_i32(tmp2, tmp2, 4);
1945 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1946 }
1947 break;
1948 case 1:
1949 for (i = 0; i < 3; i ++) {
da6b5335
FN
1950 tcg_gen_shli_i32(tmp2, tmp2, 8);
1951 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1952 }
1953 break;
1954 case 2:
da6b5335
FN
1955 tcg_gen_shli_i32(tmp2, tmp2, 16);
1956 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1957 break;
18c9b560 1958 }
da6b5335 1959 gen_set_nzcv(tmp);
7d1b0095
PM
1960 tcg_temp_free_i32(tmp2);
1961 tcg_temp_free_i32(tmp);
18c9b560
AZ
1962 break;
1963 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1964 rd = (insn >> 12) & 0xf;
1965 rd0 = (insn >> 16) & 0xf;
da6b5335 1966 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1967 return 1;
1968 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1969 tmp = tcg_temp_new_i32();
18c9b560
AZ
1970 switch ((insn >> 22) & 3) {
1971 case 0:
da6b5335 1972 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1973 break;
1974 case 1:
da6b5335 1975 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1976 break;
1977 case 2:
da6b5335 1978 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1979 break;
18c9b560 1980 }
da6b5335 1981 store_reg(s, rd, tmp);
18c9b560
AZ
1982 break;
1983 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1984 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1985 wrd = (insn >> 12) & 0xf;
1986 rd0 = (insn >> 16) & 0xf;
1987 rd1 = (insn >> 0) & 0xf;
1988 gen_op_iwmmxt_movq_M0_wRn(rd0);
1989 switch ((insn >> 22) & 3) {
1990 case 0:
1991 if (insn & (1 << 21))
1992 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1993 else
1994 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1995 break;
1996 case 1:
1997 if (insn & (1 << 21))
1998 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1999 else
2000 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2001 break;
2002 case 2:
2003 if (insn & (1 << 21))
2004 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2005 else
2006 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2007 break;
2008 case 3:
2009 return 1;
2010 }
2011 gen_op_iwmmxt_movq_wRn_M0(wrd);
2012 gen_op_iwmmxt_set_mup();
2013 gen_op_iwmmxt_set_cup();
2014 break;
2015 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2016 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2017 wrd = (insn >> 12) & 0xf;
2018 rd0 = (insn >> 16) & 0xf;
2019 gen_op_iwmmxt_movq_M0_wRn(rd0);
2020 switch ((insn >> 22) & 3) {
2021 case 0:
2022 if (insn & (1 << 21))
2023 gen_op_iwmmxt_unpacklsb_M0();
2024 else
2025 gen_op_iwmmxt_unpacklub_M0();
2026 break;
2027 case 1:
2028 if (insn & (1 << 21))
2029 gen_op_iwmmxt_unpacklsw_M0();
2030 else
2031 gen_op_iwmmxt_unpackluw_M0();
2032 break;
2033 case 2:
2034 if (insn & (1 << 21))
2035 gen_op_iwmmxt_unpacklsl_M0();
2036 else
2037 gen_op_iwmmxt_unpacklul_M0();
2038 break;
2039 case 3:
2040 return 1;
2041 }
2042 gen_op_iwmmxt_movq_wRn_M0(wrd);
2043 gen_op_iwmmxt_set_mup();
2044 gen_op_iwmmxt_set_cup();
2045 break;
2046 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2047 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2048 wrd = (insn >> 12) & 0xf;
2049 rd0 = (insn >> 16) & 0xf;
2050 gen_op_iwmmxt_movq_M0_wRn(rd0);
2051 switch ((insn >> 22) & 3) {
2052 case 0:
2053 if (insn & (1 << 21))
2054 gen_op_iwmmxt_unpackhsb_M0();
2055 else
2056 gen_op_iwmmxt_unpackhub_M0();
2057 break;
2058 case 1:
2059 if (insn & (1 << 21))
2060 gen_op_iwmmxt_unpackhsw_M0();
2061 else
2062 gen_op_iwmmxt_unpackhuw_M0();
2063 break;
2064 case 2:
2065 if (insn & (1 << 21))
2066 gen_op_iwmmxt_unpackhsl_M0();
2067 else
2068 gen_op_iwmmxt_unpackhul_M0();
2069 break;
2070 case 3:
2071 return 1;
2072 }
2073 gen_op_iwmmxt_movq_wRn_M0(wrd);
2074 gen_op_iwmmxt_set_mup();
2075 gen_op_iwmmxt_set_cup();
2076 break;
2077 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2078 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2079 if (((insn >> 22) & 3) == 0)
2080 return 1;
18c9b560
AZ
2081 wrd = (insn >> 12) & 0xf;
2082 rd0 = (insn >> 16) & 0xf;
2083 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2084 tmp = tcg_temp_new_i32();
da6b5335 2085 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2086 tcg_temp_free_i32(tmp);
18c9b560 2087 return 1;
da6b5335 2088 }
18c9b560 2089 switch ((insn >> 22) & 3) {
18c9b560 2090 case 1:
477955bd 2091 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2092 break;
2093 case 2:
477955bd 2094 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2095 break;
2096 case 3:
477955bd 2097 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2098 break;
2099 }
7d1b0095 2100 tcg_temp_free_i32(tmp);
18c9b560
AZ
2101 gen_op_iwmmxt_movq_wRn_M0(wrd);
2102 gen_op_iwmmxt_set_mup();
2103 gen_op_iwmmxt_set_cup();
2104 break;
2105 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2106 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2107 if (((insn >> 22) & 3) == 0)
2108 return 1;
18c9b560
AZ
2109 wrd = (insn >> 12) & 0xf;
2110 rd0 = (insn >> 16) & 0xf;
2111 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2112 tmp = tcg_temp_new_i32();
da6b5335 2113 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2114 tcg_temp_free_i32(tmp);
18c9b560 2115 return 1;
da6b5335 2116 }
18c9b560 2117 switch ((insn >> 22) & 3) {
18c9b560 2118 case 1:
477955bd 2119 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2120 break;
2121 case 2:
477955bd 2122 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2123 break;
2124 case 3:
477955bd 2125 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2126 break;
2127 }
7d1b0095 2128 tcg_temp_free_i32(tmp);
18c9b560
AZ
2129 gen_op_iwmmxt_movq_wRn_M0(wrd);
2130 gen_op_iwmmxt_set_mup();
2131 gen_op_iwmmxt_set_cup();
2132 break;
2133 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2134 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2135 if (((insn >> 22) & 3) == 0)
2136 return 1;
18c9b560
AZ
2137 wrd = (insn >> 12) & 0xf;
2138 rd0 = (insn >> 16) & 0xf;
2139 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2140 tmp = tcg_temp_new_i32();
da6b5335 2141 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2142 tcg_temp_free_i32(tmp);
18c9b560 2143 return 1;
da6b5335 2144 }
18c9b560 2145 switch ((insn >> 22) & 3) {
18c9b560 2146 case 1:
477955bd 2147 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2148 break;
2149 case 2:
477955bd 2150 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2151 break;
2152 case 3:
477955bd 2153 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2154 break;
2155 }
7d1b0095 2156 tcg_temp_free_i32(tmp);
18c9b560
AZ
2157 gen_op_iwmmxt_movq_wRn_M0(wrd);
2158 gen_op_iwmmxt_set_mup();
2159 gen_op_iwmmxt_set_cup();
2160 break;
2161 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2162 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2163 if (((insn >> 22) & 3) == 0)
2164 return 1;
18c9b560
AZ
2165 wrd = (insn >> 12) & 0xf;
2166 rd0 = (insn >> 16) & 0xf;
2167 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2168 tmp = tcg_temp_new_i32();
18c9b560 2169 switch ((insn >> 22) & 3) {
18c9b560 2170 case 1:
da6b5335 2171 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2172 tcg_temp_free_i32(tmp);
18c9b560 2173 return 1;
da6b5335 2174 }
477955bd 2175 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2176 break;
2177 case 2:
da6b5335 2178 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2179 tcg_temp_free_i32(tmp);
18c9b560 2180 return 1;
da6b5335 2181 }
477955bd 2182 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2183 break;
2184 case 3:
da6b5335 2185 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2186 tcg_temp_free_i32(tmp);
18c9b560 2187 return 1;
da6b5335 2188 }
477955bd 2189 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2190 break;
2191 }
7d1b0095 2192 tcg_temp_free_i32(tmp);
18c9b560
AZ
2193 gen_op_iwmmxt_movq_wRn_M0(wrd);
2194 gen_op_iwmmxt_set_mup();
2195 gen_op_iwmmxt_set_cup();
2196 break;
2197 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2198 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2199 wrd = (insn >> 12) & 0xf;
2200 rd0 = (insn >> 16) & 0xf;
2201 rd1 = (insn >> 0) & 0xf;
2202 gen_op_iwmmxt_movq_M0_wRn(rd0);
2203 switch ((insn >> 22) & 3) {
2204 case 0:
2205 if (insn & (1 << 21))
2206 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2207 else
2208 gen_op_iwmmxt_minub_M0_wRn(rd1);
2209 break;
2210 case 1:
2211 if (insn & (1 << 21))
2212 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2213 else
2214 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2215 break;
2216 case 2:
2217 if (insn & (1 << 21))
2218 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2219 else
2220 gen_op_iwmmxt_minul_M0_wRn(rd1);
2221 break;
2222 case 3:
2223 return 1;
2224 }
2225 gen_op_iwmmxt_movq_wRn_M0(wrd);
2226 gen_op_iwmmxt_set_mup();
2227 break;
2228 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2229 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2230 wrd = (insn >> 12) & 0xf;
2231 rd0 = (insn >> 16) & 0xf;
2232 rd1 = (insn >> 0) & 0xf;
2233 gen_op_iwmmxt_movq_M0_wRn(rd0);
2234 switch ((insn >> 22) & 3) {
2235 case 0:
2236 if (insn & (1 << 21))
2237 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2238 else
2239 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2240 break;
2241 case 1:
2242 if (insn & (1 << 21))
2243 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2244 else
2245 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2246 break;
2247 case 2:
2248 if (insn & (1 << 21))
2249 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2250 else
2251 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2252 break;
2253 case 3:
2254 return 1;
2255 }
2256 gen_op_iwmmxt_movq_wRn_M0(wrd);
2257 gen_op_iwmmxt_set_mup();
2258 break;
2259 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2260 case 0x402: case 0x502: case 0x602: case 0x702:
2261 wrd = (insn >> 12) & 0xf;
2262 rd0 = (insn >> 16) & 0xf;
2263 rd1 = (insn >> 0) & 0xf;
2264 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2265 tmp = tcg_const_i32((insn >> 20) & 3);
2266 iwmmxt_load_reg(cpu_V1, rd1);
2267 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2268 tcg_temp_free_i32(tmp);
18c9b560
AZ
2269 gen_op_iwmmxt_movq_wRn_M0(wrd);
2270 gen_op_iwmmxt_set_mup();
2271 break;
2272 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2273 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2274 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2275 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2276 wrd = (insn >> 12) & 0xf;
2277 rd0 = (insn >> 16) & 0xf;
2278 rd1 = (insn >> 0) & 0xf;
2279 gen_op_iwmmxt_movq_M0_wRn(rd0);
2280 switch ((insn >> 20) & 0xf) {
2281 case 0x0:
2282 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2283 break;
2284 case 0x1:
2285 gen_op_iwmmxt_subub_M0_wRn(rd1);
2286 break;
2287 case 0x3:
2288 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2289 break;
2290 case 0x4:
2291 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2292 break;
2293 case 0x5:
2294 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2295 break;
2296 case 0x7:
2297 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2298 break;
2299 case 0x8:
2300 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2301 break;
2302 case 0x9:
2303 gen_op_iwmmxt_subul_M0_wRn(rd1);
2304 break;
2305 case 0xb:
2306 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2307 break;
2308 default:
2309 return 1;
2310 }
2311 gen_op_iwmmxt_movq_wRn_M0(wrd);
2312 gen_op_iwmmxt_set_mup();
2313 gen_op_iwmmxt_set_cup();
2314 break;
2315 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2316 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2317 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2318 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2319 wrd = (insn >> 12) & 0xf;
2320 rd0 = (insn >> 16) & 0xf;
2321 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2322 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2323 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2324 tcg_temp_free_i32(tmp);
18c9b560
AZ
2325 gen_op_iwmmxt_movq_wRn_M0(wrd);
2326 gen_op_iwmmxt_set_mup();
2327 gen_op_iwmmxt_set_cup();
2328 break;
2329 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2330 case 0x418: case 0x518: case 0x618: case 0x718:
2331 case 0x818: case 0x918: case 0xa18: case 0xb18:
2332 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2333 wrd = (insn >> 12) & 0xf;
2334 rd0 = (insn >> 16) & 0xf;
2335 rd1 = (insn >> 0) & 0xf;
2336 gen_op_iwmmxt_movq_M0_wRn(rd0);
2337 switch ((insn >> 20) & 0xf) {
2338 case 0x0:
2339 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2340 break;
2341 case 0x1:
2342 gen_op_iwmmxt_addub_M0_wRn(rd1);
2343 break;
2344 case 0x3:
2345 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2346 break;
2347 case 0x4:
2348 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2349 break;
2350 case 0x5:
2351 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2352 break;
2353 case 0x7:
2354 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2355 break;
2356 case 0x8:
2357 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2358 break;
2359 case 0x9:
2360 gen_op_iwmmxt_addul_M0_wRn(rd1);
2361 break;
2362 case 0xb:
2363 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2364 break;
2365 default:
2366 return 1;
2367 }
2368 gen_op_iwmmxt_movq_wRn_M0(wrd);
2369 gen_op_iwmmxt_set_mup();
2370 gen_op_iwmmxt_set_cup();
2371 break;
2372 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2373 case 0x408: case 0x508: case 0x608: case 0x708:
2374 case 0x808: case 0x908: case 0xa08: case 0xb08:
2375 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2376 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2377 return 1;
18c9b560
AZ
2378 wrd = (insn >> 12) & 0xf;
2379 rd0 = (insn >> 16) & 0xf;
2380 rd1 = (insn >> 0) & 0xf;
2381 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2382 switch ((insn >> 22) & 3) {
18c9b560
AZ
2383 case 1:
2384 if (insn & (1 << 21))
2385 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2386 else
2387 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2388 break;
2389 case 2:
2390 if (insn & (1 << 21))
2391 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2392 else
2393 gen_op_iwmmxt_packul_M0_wRn(rd1);
2394 break;
2395 case 3:
2396 if (insn & (1 << 21))
2397 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2398 else
2399 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2400 break;
2401 }
2402 gen_op_iwmmxt_movq_wRn_M0(wrd);
2403 gen_op_iwmmxt_set_mup();
2404 gen_op_iwmmxt_set_cup();
2405 break;
2406 case 0x201: case 0x203: case 0x205: case 0x207:
2407 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2408 case 0x211: case 0x213: case 0x215: case 0x217:
2409 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2410 wrd = (insn >> 5) & 0xf;
2411 rd0 = (insn >> 12) & 0xf;
2412 rd1 = (insn >> 0) & 0xf;
2413 if (rd0 == 0xf || rd1 == 0xf)
2414 return 1;
2415 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2416 tmp = load_reg(s, rd0);
2417 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2418 switch ((insn >> 16) & 0xf) {
2419 case 0x0: /* TMIA */
da6b5335 2420 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2421 break;
2422 case 0x8: /* TMIAPH */
da6b5335 2423 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2424 break;
2425 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2426 if (insn & (1 << 16))
da6b5335 2427 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2428 if (insn & (1 << 17))
da6b5335
FN
2429 tcg_gen_shri_i32(tmp2, tmp2, 16);
2430 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2431 break;
2432 default:
7d1b0095
PM
2433 tcg_temp_free_i32(tmp2);
2434 tcg_temp_free_i32(tmp);
18c9b560
AZ
2435 return 1;
2436 }
7d1b0095
PM
2437 tcg_temp_free_i32(tmp2);
2438 tcg_temp_free_i32(tmp);
18c9b560
AZ
2439 gen_op_iwmmxt_movq_wRn_M0(wrd);
2440 gen_op_iwmmxt_set_mup();
2441 break;
2442 default:
2443 return 1;
2444 }
2445
2446 return 0;
2447}
2448
a1c7273b 2449/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2450 (ie. an undefined instruction). */
0ecb72a5 2451static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
2452{
2453 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2454 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2455
2456 if ((insn & 0x0ff00f10) == 0x0e200010) {
2457 /* Multiply with Internal Accumulate Format */
2458 rd0 = (insn >> 12) & 0xf;
2459 rd1 = insn & 0xf;
2460 acc = (insn >> 5) & 7;
2461
2462 if (acc != 0)
2463 return 1;
2464
3a554c0f
FN
2465 tmp = load_reg(s, rd0);
2466 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2467 switch ((insn >> 16) & 0xf) {
2468 case 0x0: /* MIA */
3a554c0f 2469 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2470 break;
2471 case 0x8: /* MIAPH */
3a554c0f 2472 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2473 break;
2474 case 0xc: /* MIABB */
2475 case 0xd: /* MIABT */
2476 case 0xe: /* MIATB */
2477 case 0xf: /* MIATT */
18c9b560 2478 if (insn & (1 << 16))
3a554c0f 2479 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2480 if (insn & (1 << 17))
3a554c0f
FN
2481 tcg_gen_shri_i32(tmp2, tmp2, 16);
2482 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2483 break;
2484 default:
2485 return 1;
2486 }
7d1b0095
PM
2487 tcg_temp_free_i32(tmp2);
2488 tcg_temp_free_i32(tmp);
18c9b560
AZ
2489
2490 gen_op_iwmmxt_movq_wRn_M0(acc);
2491 return 0;
2492 }
2493
2494 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2495 /* Internal Accumulator Access Format */
2496 rdhi = (insn >> 16) & 0xf;
2497 rdlo = (insn >> 12) & 0xf;
2498 acc = insn & 7;
2499
2500 if (acc != 0)
2501 return 1;
2502
2503 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2504 iwmmxt_load_reg(cpu_V0, acc);
2505 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2506 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2507 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2508 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2509 } else { /* MAR */
3a554c0f
FN
2510 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2511 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2512 }
2513 return 0;
2514 }
2515
2516 return 1;
2517}
2518
9ee6e8bb
PB
2519#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2520#define VFP_SREG(insn, bigbit, smallbit) \
2521 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2522#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2523 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2524 reg = (((insn) >> (bigbit)) & 0x0f) \
2525 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2526 } else { \
2527 if (insn & (1 << (smallbit))) \
2528 return 1; \
2529 reg = ((insn) >> (bigbit)) & 0x0f; \
2530 }} while (0)
2531
2532#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2533#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2534#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2535#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2536#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2537#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2538
4373f3ce 2539/* Move between integer and VFP cores. */
39d5492a 2540static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2541{
39d5492a 2542 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2543 tcg_gen_mov_i32(tmp, cpu_F0s);
2544 return tmp;
2545}
2546
39d5492a 2547static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2548{
2549 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2550 tcg_temp_free_i32(tmp);
4373f3ce
PB
2551}
2552
39d5492a 2553static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2554{
39d5492a 2555 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2556 if (shift)
2557 tcg_gen_shri_i32(var, var, shift);
86831435 2558 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2559 tcg_gen_shli_i32(tmp, var, 8);
2560 tcg_gen_or_i32(var, var, tmp);
2561 tcg_gen_shli_i32(tmp, var, 16);
2562 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2563 tcg_temp_free_i32(tmp);
ad69471c
PB
2564}
2565
39d5492a 2566static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2567{
39d5492a 2568 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2569 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2570 tcg_gen_shli_i32(tmp, var, 16);
2571 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2572 tcg_temp_free_i32(tmp);
ad69471c
PB
2573}
2574
39d5492a 2575static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2576{
39d5492a 2577 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2578 tcg_gen_andi_i32(var, var, 0xffff0000);
2579 tcg_gen_shri_i32(tmp, var, 16);
2580 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2581 tcg_temp_free_i32(tmp);
ad69471c
PB
2582}
2583
39d5492a 2584static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2585{
2586 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2587 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2588 switch (size) {
2589 case 0:
08307563 2590 gen_aa32_ld8u(tmp, addr, IS_USER(s));
8e18cde3
PM
2591 gen_neon_dup_u8(tmp, 0);
2592 break;
2593 case 1:
08307563 2594 gen_aa32_ld16u(tmp, addr, IS_USER(s));
8e18cde3
PM
2595 gen_neon_dup_low16(tmp);
2596 break;
2597 case 2:
08307563 2598 gen_aa32_ld32u(tmp, addr, IS_USER(s));
8e18cde3
PM
2599 break;
2600 default: /* Avoid compiler warnings. */
2601 abort();
2602 }
2603 return tmp;
2604}
2605
04731fb5
WN
2606static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2607 uint32_t dp)
2608{
2609 uint32_t cc = extract32(insn, 20, 2);
2610
2611 if (dp) {
2612 TCGv_i64 frn, frm, dest;
2613 TCGv_i64 tmp, zero, zf, nf, vf;
2614
2615 zero = tcg_const_i64(0);
2616
2617 frn = tcg_temp_new_i64();
2618 frm = tcg_temp_new_i64();
2619 dest = tcg_temp_new_i64();
2620
2621 zf = tcg_temp_new_i64();
2622 nf = tcg_temp_new_i64();
2623 vf = tcg_temp_new_i64();
2624
2625 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2626 tcg_gen_ext_i32_i64(nf, cpu_NF);
2627 tcg_gen_ext_i32_i64(vf, cpu_VF);
2628
2629 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2630 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2631 switch (cc) {
2632 case 0: /* eq: Z */
2633 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2634 frn, frm);
2635 break;
2636 case 1: /* vs: V */
2637 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2638 frn, frm);
2639 break;
2640 case 2: /* ge: N == V -> N ^ V == 0 */
2641 tmp = tcg_temp_new_i64();
2642 tcg_gen_xor_i64(tmp, vf, nf);
2643 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2644 frn, frm);
2645 tcg_temp_free_i64(tmp);
2646 break;
2647 case 3: /* gt: !Z && N == V */
2648 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2649 frn, frm);
2650 tmp = tcg_temp_new_i64();
2651 tcg_gen_xor_i64(tmp, vf, nf);
2652 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2653 dest, frm);
2654 tcg_temp_free_i64(tmp);
2655 break;
2656 }
2657 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2658 tcg_temp_free_i64(frn);
2659 tcg_temp_free_i64(frm);
2660 tcg_temp_free_i64(dest);
2661
2662 tcg_temp_free_i64(zf);
2663 tcg_temp_free_i64(nf);
2664 tcg_temp_free_i64(vf);
2665
2666 tcg_temp_free_i64(zero);
2667 } else {
2668 TCGv_i32 frn, frm, dest;
2669 TCGv_i32 tmp, zero;
2670
2671 zero = tcg_const_i32(0);
2672
2673 frn = tcg_temp_new_i32();
2674 frm = tcg_temp_new_i32();
2675 dest = tcg_temp_new_i32();
2676 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2677 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2678 switch (cc) {
2679 case 0: /* eq: Z */
2680 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2681 frn, frm);
2682 break;
2683 case 1: /* vs: V */
2684 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2685 frn, frm);
2686 break;
2687 case 2: /* ge: N == V -> N ^ V == 0 */
2688 tmp = tcg_temp_new_i32();
2689 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2690 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2691 frn, frm);
2692 tcg_temp_free_i32(tmp);
2693 break;
2694 case 3: /* gt: !Z && N == V */
2695 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2696 frn, frm);
2697 tmp = tcg_temp_new_i32();
2698 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2699 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2700 dest, frm);
2701 tcg_temp_free_i32(tmp);
2702 break;
2703 }
2704 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2705 tcg_temp_free_i32(frn);
2706 tcg_temp_free_i32(frm);
2707 tcg_temp_free_i32(dest);
2708
2709 tcg_temp_free_i32(zero);
2710 }
2711
2712 return 0;
2713}
2714
40cfacdd
WN
2715static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2716 uint32_t rm, uint32_t dp)
2717{
2718 uint32_t vmin = extract32(insn, 6, 1);
2719 TCGv_ptr fpst = get_fpstatus_ptr(0);
2720
2721 if (dp) {
2722 TCGv_i64 frn, frm, dest;
2723
2724 frn = tcg_temp_new_i64();
2725 frm = tcg_temp_new_i64();
2726 dest = tcg_temp_new_i64();
2727
2728 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2729 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2730 if (vmin) {
f71a2ae5 2731 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 2732 } else {
f71a2ae5 2733 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
2734 }
2735 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2736 tcg_temp_free_i64(frn);
2737 tcg_temp_free_i64(frm);
2738 tcg_temp_free_i64(dest);
2739 } else {
2740 TCGv_i32 frn, frm, dest;
2741
2742 frn = tcg_temp_new_i32();
2743 frm = tcg_temp_new_i32();
2744 dest = tcg_temp_new_i32();
2745
2746 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2747 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2748 if (vmin) {
f71a2ae5 2749 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 2750 } else {
f71a2ae5 2751 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
2752 }
2753 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2754 tcg_temp_free_i32(frn);
2755 tcg_temp_free_i32(frm);
2756 tcg_temp_free_i32(dest);
2757 }
2758
2759 tcg_temp_free_ptr(fpst);
2760 return 0;
2761}
2762
7655f39b
WN
2763static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2764 int rounding)
2765{
2766 TCGv_ptr fpst = get_fpstatus_ptr(0);
2767 TCGv_i32 tcg_rmode;
2768
2769 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2770 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2771
2772 if (dp) {
2773 TCGv_i64 tcg_op;
2774 TCGv_i64 tcg_res;
2775 tcg_op = tcg_temp_new_i64();
2776 tcg_res = tcg_temp_new_i64();
2777 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2778 gen_helper_rintd(tcg_res, tcg_op, fpst);
2779 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2780 tcg_temp_free_i64(tcg_op);
2781 tcg_temp_free_i64(tcg_res);
2782 } else {
2783 TCGv_i32 tcg_op;
2784 TCGv_i32 tcg_res;
2785 tcg_op = tcg_temp_new_i32();
2786 tcg_res = tcg_temp_new_i32();
2787 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2788 gen_helper_rints(tcg_res, tcg_op, fpst);
2789 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2790 tcg_temp_free_i32(tcg_op);
2791 tcg_temp_free_i32(tcg_res);
2792 }
2793
2794 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2795 tcg_temp_free_i32(tcg_rmode);
2796
2797 tcg_temp_free_ptr(fpst);
2798 return 0;
2799}
2800
c9975a83
WN
2801static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2802 int rounding)
2803{
2804 bool is_signed = extract32(insn, 7, 1);
2805 TCGv_ptr fpst = get_fpstatus_ptr(0);
2806 TCGv_i32 tcg_rmode, tcg_shift;
2807
2808 tcg_shift = tcg_const_i32(0);
2809
2810 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2811 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2812
2813 if (dp) {
2814 TCGv_i64 tcg_double, tcg_res;
2815 TCGv_i32 tcg_tmp;
2816 /* Rd is encoded as a single precision register even when the source
2817 * is double precision.
2818 */
2819 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
2820 tcg_double = tcg_temp_new_i64();
2821 tcg_res = tcg_temp_new_i64();
2822 tcg_tmp = tcg_temp_new_i32();
2823 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
2824 if (is_signed) {
2825 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
2826 } else {
2827 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
2828 }
2829 tcg_gen_trunc_i64_i32(tcg_tmp, tcg_res);
2830 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
2831 tcg_temp_free_i32(tcg_tmp);
2832 tcg_temp_free_i64(tcg_res);
2833 tcg_temp_free_i64(tcg_double);
2834 } else {
2835 TCGv_i32 tcg_single, tcg_res;
2836 tcg_single = tcg_temp_new_i32();
2837 tcg_res = tcg_temp_new_i32();
2838 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
2839 if (is_signed) {
2840 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
2841 } else {
2842 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
2843 }
2844 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
2845 tcg_temp_free_i32(tcg_res);
2846 tcg_temp_free_i32(tcg_single);
2847 }
2848
2849 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2850 tcg_temp_free_i32(tcg_rmode);
2851
2852 tcg_temp_free_i32(tcg_shift);
2853
2854 tcg_temp_free_ptr(fpst);
2855
2856 return 0;
2857}
7655f39b
WN
2858
2859/* Table for converting the most common AArch32 encoding of
2860 * rounding mode to arm_fprounding order (which matches the
2861 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
2862 */
2863static const uint8_t fp_decode_rm[] = {
2864 FPROUNDING_TIEAWAY,
2865 FPROUNDING_TIEEVEN,
2866 FPROUNDING_POSINF,
2867 FPROUNDING_NEGINF,
2868};
2869
04731fb5
WN
2870static int disas_vfp_v8_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
2871{
2872 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
2873
2874 if (!arm_feature(env, ARM_FEATURE_V8)) {
2875 return 1;
2876 }
2877
2878 if (dp) {
2879 VFP_DREG_D(rd, insn);
2880 VFP_DREG_N(rn, insn);
2881 VFP_DREG_M(rm, insn);
2882 } else {
2883 rd = VFP_SREG_D(insn);
2884 rn = VFP_SREG_N(insn);
2885 rm = VFP_SREG_M(insn);
2886 }
2887
2888 if ((insn & 0x0f800e50) == 0x0e000a00) {
2889 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
2890 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
2891 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
2892 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
2893 /* VRINTA, VRINTN, VRINTP, VRINTM */
2894 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
2895 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
2896 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
2897 /* VCVTA, VCVTN, VCVTP, VCVTM */
2898 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
2899 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
2900 }
2901 return 1;
2902}
2903
a1c7273b 2904/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 2905 (ie. an undefined instruction). */
0ecb72a5 2906static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
b7bcbe95
FB
2907{
2908 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2909 int dp, veclen;
39d5492a
PM
2910 TCGv_i32 addr;
2911 TCGv_i32 tmp;
2912 TCGv_i32 tmp2;
b7bcbe95 2913
40f137e1
PB
2914 if (!arm_feature(env, ARM_FEATURE_VFP))
2915 return 1;
2916
5df8bac1 2917 if (!s->vfp_enabled) {
9ee6e8bb 2918 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2919 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2920 return 1;
2921 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2922 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2923 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2924 return 1;
2925 }
6a57f3eb
WN
2926
2927 if (extract32(insn, 28, 4) == 0xf) {
2928 /* Encodings with T=1 (Thumb) or unconditional (ARM):
2929 * only used in v8 and above.
2930 */
04731fb5 2931 return disas_vfp_v8_insn(env, s, insn);
6a57f3eb
WN
2932 }
2933
b7bcbe95
FB
2934 dp = ((insn & 0xf00) == 0xb00);
2935 switch ((insn >> 24) & 0xf) {
2936 case 0xe:
2937 if (insn & (1 << 4)) {
2938 /* single register transfer */
b7bcbe95
FB
2939 rd = (insn >> 12) & 0xf;
2940 if (dp) {
9ee6e8bb
PB
2941 int size;
2942 int pass;
2943
2944 VFP_DREG_N(rn, insn);
2945 if (insn & 0xf)
b7bcbe95 2946 return 1;
9ee6e8bb
PB
2947 if (insn & 0x00c00060
2948 && !arm_feature(env, ARM_FEATURE_NEON))
2949 return 1;
2950
2951 pass = (insn >> 21) & 1;
2952 if (insn & (1 << 22)) {
2953 size = 0;
2954 offset = ((insn >> 5) & 3) * 8;
2955 } else if (insn & (1 << 5)) {
2956 size = 1;
2957 offset = (insn & (1 << 6)) ? 16 : 0;
2958 } else {
2959 size = 2;
2960 offset = 0;
2961 }
18c9b560 2962 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2963 /* vfp->arm */
ad69471c 2964 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2965 switch (size) {
2966 case 0:
9ee6e8bb 2967 if (offset)
ad69471c 2968 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2969 if (insn & (1 << 23))
ad69471c 2970 gen_uxtb(tmp);
9ee6e8bb 2971 else
ad69471c 2972 gen_sxtb(tmp);
9ee6e8bb
PB
2973 break;
2974 case 1:
9ee6e8bb
PB
2975 if (insn & (1 << 23)) {
2976 if (offset) {
ad69471c 2977 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2978 } else {
ad69471c 2979 gen_uxth(tmp);
9ee6e8bb
PB
2980 }
2981 } else {
2982 if (offset) {
ad69471c 2983 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2984 } else {
ad69471c 2985 gen_sxth(tmp);
9ee6e8bb
PB
2986 }
2987 }
2988 break;
2989 case 2:
9ee6e8bb
PB
2990 break;
2991 }
ad69471c 2992 store_reg(s, rd, tmp);
b7bcbe95
FB
2993 } else {
2994 /* arm->vfp */
ad69471c 2995 tmp = load_reg(s, rd);
9ee6e8bb
PB
2996 if (insn & (1 << 23)) {
2997 /* VDUP */
2998 if (size == 0) {
ad69471c 2999 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3000 } else if (size == 1) {
ad69471c 3001 gen_neon_dup_low16(tmp);
9ee6e8bb 3002 }
cbbccffc 3003 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3004 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3005 tcg_gen_mov_i32(tmp2, tmp);
3006 neon_store_reg(rn, n, tmp2);
3007 }
3008 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3009 } else {
3010 /* VMOV */
3011 switch (size) {
3012 case 0:
ad69471c 3013 tmp2 = neon_load_reg(rn, pass);
d593c48e 3014 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3015 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3016 break;
3017 case 1:
ad69471c 3018 tmp2 = neon_load_reg(rn, pass);
d593c48e 3019 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3020 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3021 break;
3022 case 2:
9ee6e8bb
PB
3023 break;
3024 }
ad69471c 3025 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3026 }
b7bcbe95 3027 }
9ee6e8bb
PB
3028 } else { /* !dp */
3029 if ((insn & 0x6f) != 0x00)
3030 return 1;
3031 rn = VFP_SREG_N(insn);
18c9b560 3032 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3033 /* vfp->arm */
3034 if (insn & (1 << 21)) {
3035 /* system register */
40f137e1 3036 rn >>= 1;
9ee6e8bb 3037
b7bcbe95 3038 switch (rn) {
40f137e1 3039 case ARM_VFP_FPSID:
4373f3ce 3040 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3041 VFP3 restricts all id registers to privileged
3042 accesses. */
3043 if (IS_USER(s)
3044 && arm_feature(env, ARM_FEATURE_VFP3))
3045 return 1;
4373f3ce 3046 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3047 break;
40f137e1 3048 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3049 if (IS_USER(s))
3050 return 1;
4373f3ce 3051 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3052 break;
40f137e1
PB
3053 case ARM_VFP_FPINST:
3054 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3055 /* Not present in VFP3. */
3056 if (IS_USER(s)
3057 || arm_feature(env, ARM_FEATURE_VFP3))
3058 return 1;
4373f3ce 3059 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3060 break;
40f137e1 3061 case ARM_VFP_FPSCR:
601d70b9 3062 if (rd == 15) {
4373f3ce
PB
3063 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3064 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3065 } else {
7d1b0095 3066 tmp = tcg_temp_new_i32();
4373f3ce
PB
3067 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3068 }
b7bcbe95 3069 break;
9ee6e8bb
PB
3070 case ARM_VFP_MVFR0:
3071 case ARM_VFP_MVFR1:
3072 if (IS_USER(s)
06ed5d66 3073 || !arm_feature(env, ARM_FEATURE_MVFR))
9ee6e8bb 3074 return 1;
4373f3ce 3075 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3076 break;
b7bcbe95
FB
3077 default:
3078 return 1;
3079 }
3080 } else {
3081 gen_mov_F0_vreg(0, rn);
4373f3ce 3082 tmp = gen_vfp_mrs();
b7bcbe95
FB
3083 }
3084 if (rd == 15) {
b5ff1b31 3085 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3086 gen_set_nzcv(tmp);
7d1b0095 3087 tcg_temp_free_i32(tmp);
4373f3ce
PB
3088 } else {
3089 store_reg(s, rd, tmp);
3090 }
b7bcbe95
FB
3091 } else {
3092 /* arm->vfp */
b7bcbe95 3093 if (insn & (1 << 21)) {
40f137e1 3094 rn >>= 1;
b7bcbe95
FB
3095 /* system register */
3096 switch (rn) {
40f137e1 3097 case ARM_VFP_FPSID:
9ee6e8bb
PB
3098 case ARM_VFP_MVFR0:
3099 case ARM_VFP_MVFR1:
b7bcbe95
FB
3100 /* Writes are ignored. */
3101 break;
40f137e1 3102 case ARM_VFP_FPSCR:
e4c1cfa5 3103 tmp = load_reg(s, rd);
4373f3ce 3104 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3105 tcg_temp_free_i32(tmp);
b5ff1b31 3106 gen_lookup_tb(s);
b7bcbe95 3107 break;
40f137e1 3108 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3109 if (IS_USER(s))
3110 return 1;
71b3c3de
JR
3111 /* TODO: VFP subarchitecture support.
3112 * For now, keep the EN bit only */
e4c1cfa5 3113 tmp = load_reg(s, rd);
71b3c3de 3114 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3115 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3116 gen_lookup_tb(s);
3117 break;
3118 case ARM_VFP_FPINST:
3119 case ARM_VFP_FPINST2:
e4c1cfa5 3120 tmp = load_reg(s, rd);
4373f3ce 3121 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3122 break;
b7bcbe95
FB
3123 default:
3124 return 1;
3125 }
3126 } else {
e4c1cfa5 3127 tmp = load_reg(s, rd);
4373f3ce 3128 gen_vfp_msr(tmp);
b7bcbe95
FB
3129 gen_mov_vreg_F0(0, rn);
3130 }
3131 }
3132 }
3133 } else {
3134 /* data processing */
3135 /* The opcode is in bits 23, 21, 20 and 6. */
3136 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3137 if (dp) {
3138 if (op == 15) {
3139 /* rn is opcode */
3140 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3141 } else {
3142 /* rn is register number */
9ee6e8bb 3143 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3144 }
3145
239c20c7
WN
3146 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3147 ((rn & 0x1e) == 0x6))) {
3148 /* Integer or single/half precision destination. */
9ee6e8bb 3149 rd = VFP_SREG_D(insn);
b7bcbe95 3150 } else {
9ee6e8bb 3151 VFP_DREG_D(rd, insn);
b7bcbe95 3152 }
04595bf6 3153 if (op == 15 &&
239c20c7
WN
3154 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3155 ((rn & 0x1e) == 0x4))) {
3156 /* VCVT from int or half precision is always from S reg
3157 * regardless of dp bit. VCVT with immediate frac_bits
3158 * has same format as SREG_M.
04595bf6
PM
3159 */
3160 rm = VFP_SREG_M(insn);
b7bcbe95 3161 } else {
9ee6e8bb 3162 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3163 }
3164 } else {
9ee6e8bb 3165 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3166 if (op == 15 && rn == 15) {
3167 /* Double precision destination. */
9ee6e8bb
PB
3168 VFP_DREG_D(rd, insn);
3169 } else {
3170 rd = VFP_SREG_D(insn);
3171 }
04595bf6
PM
3172 /* NB that we implicitly rely on the encoding for the frac_bits
3173 * in VCVT of fixed to float being the same as that of an SREG_M
3174 */
9ee6e8bb 3175 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3176 }
3177
69d1fc22 3178 veclen = s->vec_len;
b7bcbe95
FB
3179 if (op == 15 && rn > 3)
3180 veclen = 0;
3181
3182 /* Shut up compiler warnings. */
3183 delta_m = 0;
3184 delta_d = 0;
3185 bank_mask = 0;
3b46e624 3186
b7bcbe95
FB
3187 if (veclen > 0) {
3188 if (dp)
3189 bank_mask = 0xc;
3190 else
3191 bank_mask = 0x18;
3192
3193 /* Figure out what type of vector operation this is. */
3194 if ((rd & bank_mask) == 0) {
3195 /* scalar */
3196 veclen = 0;
3197 } else {
3198 if (dp)
69d1fc22 3199 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3200 else
69d1fc22 3201 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3202
3203 if ((rm & bank_mask) == 0) {
3204 /* mixed scalar/vector */
3205 delta_m = 0;
3206 } else {
3207 /* vector */
3208 delta_m = delta_d;
3209 }
3210 }
3211 }
3212
3213 /* Load the initial operands. */
3214 if (op == 15) {
3215 switch (rn) {
3216 case 16:
3217 case 17:
3218 /* Integer source */
3219 gen_mov_F0_vreg(0, rm);
3220 break;
3221 case 8:
3222 case 9:
3223 /* Compare */
3224 gen_mov_F0_vreg(dp, rd);
3225 gen_mov_F1_vreg(dp, rm);
3226 break;
3227 case 10:
3228 case 11:
3229 /* Compare with zero */
3230 gen_mov_F0_vreg(dp, rd);
3231 gen_vfp_F1_ld0(dp);
3232 break;
9ee6e8bb
PB
3233 case 20:
3234 case 21:
3235 case 22:
3236 case 23:
644ad806
PB
3237 case 28:
3238 case 29:
3239 case 30:
3240 case 31:
9ee6e8bb
PB
3241 /* Source and destination the same. */
3242 gen_mov_F0_vreg(dp, rd);
3243 break;
6e0c0ed1
PM
3244 case 4:
3245 case 5:
3246 case 6:
3247 case 7:
239c20c7
WN
3248 /* VCVTB, VCVTT: only present with the halfprec extension
3249 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3250 * (we choose to UNDEF)
6e0c0ed1 3251 */
239c20c7
WN
3252 if ((dp && !arm_feature(env, ARM_FEATURE_V8)) ||
3253 !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3254 return 1;
3255 }
239c20c7
WN
3256 if (!extract32(rn, 1, 1)) {
3257 /* Half precision source. */
3258 gen_mov_F0_vreg(0, rm);
3259 break;
3260 }
6e0c0ed1 3261 /* Otherwise fall through */
b7bcbe95
FB
3262 default:
3263 /* One source operand. */
3264 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3265 break;
b7bcbe95
FB
3266 }
3267 } else {
3268 /* Two source operands. */
3269 gen_mov_F0_vreg(dp, rn);
3270 gen_mov_F1_vreg(dp, rm);
3271 }
3272
3273 for (;;) {
3274 /* Perform the calculation. */
3275 switch (op) {
605a6aed
PM
3276 case 0: /* VMLA: fd + (fn * fm) */
3277 /* Note that order of inputs to the add matters for NaNs */
3278 gen_vfp_F1_mul(dp);
3279 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3280 gen_vfp_add(dp);
3281 break;
605a6aed 3282 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3283 gen_vfp_mul(dp);
605a6aed
PM
3284 gen_vfp_F1_neg(dp);
3285 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3286 gen_vfp_add(dp);
3287 break;
605a6aed
PM
3288 case 2: /* VNMLS: -fd + (fn * fm) */
3289 /* Note that it isn't valid to replace (-A + B) with (B - A)
3290 * or similar plausible looking simplifications
3291 * because this will give wrong results for NaNs.
3292 */
3293 gen_vfp_F1_mul(dp);
3294 gen_mov_F0_vreg(dp, rd);
3295 gen_vfp_neg(dp);
3296 gen_vfp_add(dp);
b7bcbe95 3297 break;
605a6aed 3298 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3299 gen_vfp_mul(dp);
605a6aed
PM
3300 gen_vfp_F1_neg(dp);
3301 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3302 gen_vfp_neg(dp);
605a6aed 3303 gen_vfp_add(dp);
b7bcbe95
FB
3304 break;
3305 case 4: /* mul: fn * fm */
3306 gen_vfp_mul(dp);
3307 break;
3308 case 5: /* nmul: -(fn * fm) */
3309 gen_vfp_mul(dp);
3310 gen_vfp_neg(dp);
3311 break;
3312 case 6: /* add: fn + fm */
3313 gen_vfp_add(dp);
3314 break;
3315 case 7: /* sub: fn - fm */
3316 gen_vfp_sub(dp);
3317 break;
3318 case 8: /* div: fn / fm */
3319 gen_vfp_div(dp);
3320 break;
da97f52c
PM
3321 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3322 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3323 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3324 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3325 /* These are fused multiply-add, and must be done as one
3326 * floating point operation with no rounding between the
3327 * multiplication and addition steps.
3328 * NB that doing the negations here as separate steps is
3329 * correct : an input NaN should come out with its sign bit
3330 * flipped if it is a negated-input.
3331 */
3332 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
3333 return 1;
3334 }
3335 if (dp) {
3336 TCGv_ptr fpst;
3337 TCGv_i64 frd;
3338 if (op & 1) {
3339 /* VFNMS, VFMS */
3340 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3341 }
3342 frd = tcg_temp_new_i64();
3343 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3344 if (op & 2) {
3345 /* VFNMA, VFNMS */
3346 gen_helper_vfp_negd(frd, frd);
3347 }
3348 fpst = get_fpstatus_ptr(0);
3349 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3350 cpu_F1d, frd, fpst);
3351 tcg_temp_free_ptr(fpst);
3352 tcg_temp_free_i64(frd);
3353 } else {
3354 TCGv_ptr fpst;
3355 TCGv_i32 frd;
3356 if (op & 1) {
3357 /* VFNMS, VFMS */
3358 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3359 }
3360 frd = tcg_temp_new_i32();
3361 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3362 if (op & 2) {
3363 gen_helper_vfp_negs(frd, frd);
3364 }
3365 fpst = get_fpstatus_ptr(0);
3366 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3367 cpu_F1s, frd, fpst);
3368 tcg_temp_free_ptr(fpst);
3369 tcg_temp_free_i32(frd);
3370 }
3371 break;
9ee6e8bb
PB
3372 case 14: /* fconst */
3373 if (!arm_feature(env, ARM_FEATURE_VFP3))
3374 return 1;
3375
3376 n = (insn << 12) & 0x80000000;
3377 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3378 if (dp) {
3379 if (i & 0x40)
3380 i |= 0x3f80;
3381 else
3382 i |= 0x4000;
3383 n |= i << 16;
4373f3ce 3384 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3385 } else {
3386 if (i & 0x40)
3387 i |= 0x780;
3388 else
3389 i |= 0x800;
3390 n |= i << 19;
5b340b51 3391 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3392 }
9ee6e8bb 3393 break;
b7bcbe95
FB
3394 case 15: /* extension space */
3395 switch (rn) {
3396 case 0: /* cpy */
3397 /* no-op */
3398 break;
3399 case 1: /* abs */
3400 gen_vfp_abs(dp);
3401 break;
3402 case 2: /* neg */
3403 gen_vfp_neg(dp);
3404 break;
3405 case 3: /* sqrt */
3406 gen_vfp_sqrt(dp);
3407 break;
239c20c7 3408 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
60011498
PB
3409 tmp = gen_vfp_mrs();
3410 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3411 if (dp) {
3412 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3413 cpu_env);
3414 } else {
3415 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3416 cpu_env);
3417 }
7d1b0095 3418 tcg_temp_free_i32(tmp);
60011498 3419 break;
239c20c7 3420 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
60011498
PB
3421 tmp = gen_vfp_mrs();
3422 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3423 if (dp) {
3424 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3425 cpu_env);
3426 } else {
3427 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3428 cpu_env);
3429 }
7d1b0095 3430 tcg_temp_free_i32(tmp);
60011498 3431 break;
239c20c7 3432 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
7d1b0095 3433 tmp = tcg_temp_new_i32();
239c20c7
WN
3434 if (dp) {
3435 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3436 cpu_env);
3437 } else {
3438 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3439 cpu_env);
3440 }
60011498
PB
3441 gen_mov_F0_vreg(0, rd);
3442 tmp2 = gen_vfp_mrs();
3443 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3444 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3445 tcg_temp_free_i32(tmp2);
60011498
PB
3446 gen_vfp_msr(tmp);
3447 break;
239c20c7 3448 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
7d1b0095 3449 tmp = tcg_temp_new_i32();
239c20c7
WN
3450 if (dp) {
3451 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3452 cpu_env);
3453 } else {
3454 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3455 cpu_env);
3456 }
60011498
PB
3457 tcg_gen_shli_i32(tmp, tmp, 16);
3458 gen_mov_F0_vreg(0, rd);
3459 tmp2 = gen_vfp_mrs();
3460 tcg_gen_ext16u_i32(tmp2, tmp2);
3461 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3462 tcg_temp_free_i32(tmp2);
60011498
PB
3463 gen_vfp_msr(tmp);
3464 break;
b7bcbe95
FB
3465 case 8: /* cmp */
3466 gen_vfp_cmp(dp);
3467 break;
3468 case 9: /* cmpe */
3469 gen_vfp_cmpe(dp);
3470 break;
3471 case 10: /* cmpz */
3472 gen_vfp_cmp(dp);
3473 break;
3474 case 11: /* cmpez */
3475 gen_vfp_F1_ld0(dp);
3476 gen_vfp_cmpe(dp);
3477 break;
664c6733
WN
3478 case 12: /* vrintr */
3479 {
3480 TCGv_ptr fpst = get_fpstatus_ptr(0);
3481 if (dp) {
3482 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3483 } else {
3484 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3485 }
3486 tcg_temp_free_ptr(fpst);
3487 break;
3488 }
a290c62a
WN
3489 case 13: /* vrintz */
3490 {
3491 TCGv_ptr fpst = get_fpstatus_ptr(0);
3492 TCGv_i32 tcg_rmode;
3493 tcg_rmode = tcg_const_i32(float_round_to_zero);
3494 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3495 if (dp) {
3496 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3497 } else {
3498 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3499 }
3500 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3501 tcg_temp_free_i32(tcg_rmode);
3502 tcg_temp_free_ptr(fpst);
3503 break;
3504 }
4e82bc01
WN
3505 case 14: /* vrintx */
3506 {
3507 TCGv_ptr fpst = get_fpstatus_ptr(0);
3508 if (dp) {
3509 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3510 } else {
3511 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3512 }
3513 tcg_temp_free_ptr(fpst);
3514 break;
3515 }
b7bcbe95
FB
3516 case 15: /* single<->double conversion */
3517 if (dp)
4373f3ce 3518 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3519 else
4373f3ce 3520 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3521 break;
3522 case 16: /* fuito */
5500b06c 3523 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3524 break;
3525 case 17: /* fsito */
5500b06c 3526 gen_vfp_sito(dp, 0);
b7bcbe95 3527 break;
9ee6e8bb
PB
3528 case 20: /* fshto */
3529 if (!arm_feature(env, ARM_FEATURE_VFP3))
3530 return 1;
5500b06c 3531 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3532 break;
3533 case 21: /* fslto */
3534 if (!arm_feature(env, ARM_FEATURE_VFP3))
3535 return 1;
5500b06c 3536 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3537 break;
3538 case 22: /* fuhto */
3539 if (!arm_feature(env, ARM_FEATURE_VFP3))
3540 return 1;
5500b06c 3541 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3542 break;
3543 case 23: /* fulto */
3544 if (!arm_feature(env, ARM_FEATURE_VFP3))
3545 return 1;
5500b06c 3546 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3547 break;
b7bcbe95 3548 case 24: /* ftoui */
5500b06c 3549 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3550 break;
3551 case 25: /* ftouiz */
5500b06c 3552 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3553 break;
3554 case 26: /* ftosi */
5500b06c 3555 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3556 break;
3557 case 27: /* ftosiz */
5500b06c 3558 gen_vfp_tosiz(dp, 0);
b7bcbe95 3559 break;
9ee6e8bb
PB
3560 case 28: /* ftosh */
3561 if (!arm_feature(env, ARM_FEATURE_VFP3))
3562 return 1;
5500b06c 3563 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3564 break;
3565 case 29: /* ftosl */
3566 if (!arm_feature(env, ARM_FEATURE_VFP3))
3567 return 1;
5500b06c 3568 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3569 break;
3570 case 30: /* ftouh */
3571 if (!arm_feature(env, ARM_FEATURE_VFP3))
3572 return 1;
5500b06c 3573 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3574 break;
3575 case 31: /* ftoul */
3576 if (!arm_feature(env, ARM_FEATURE_VFP3))
3577 return 1;
5500b06c 3578 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3579 break;
b7bcbe95 3580 default: /* undefined */
b7bcbe95
FB
3581 return 1;
3582 }
3583 break;
3584 default: /* undefined */
b7bcbe95
FB
3585 return 1;
3586 }
3587
3588 /* Write back the result. */
239c20c7
WN
3589 if (op == 15 && (rn >= 8 && rn <= 11)) {
3590 /* Comparison, do nothing. */
3591 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3592 (rn & 0x1e) == 0x6)) {
3593 /* VCVT double to int: always integer result.
3594 * VCVT double to half precision is always a single
3595 * precision result.
3596 */
b7bcbe95 3597 gen_mov_vreg_F0(0, rd);
239c20c7 3598 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
3599 /* conversion */
3600 gen_mov_vreg_F0(!dp, rd);
239c20c7 3601 } else {
b7bcbe95 3602 gen_mov_vreg_F0(dp, rd);
239c20c7 3603 }
b7bcbe95
FB
3604
3605 /* break out of the loop if we have finished */
3606 if (veclen == 0)
3607 break;
3608
3609 if (op == 15 && delta_m == 0) {
3610 /* single source one-many */
3611 while (veclen--) {
3612 rd = ((rd + delta_d) & (bank_mask - 1))
3613 | (rd & bank_mask);
3614 gen_mov_vreg_F0(dp, rd);
3615 }
3616 break;
3617 }
3618 /* Setup the next operands. */
3619 veclen--;
3620 rd = ((rd + delta_d) & (bank_mask - 1))
3621 | (rd & bank_mask);
3622
3623 if (op == 15) {
3624 /* One source operand. */
3625 rm = ((rm + delta_m) & (bank_mask - 1))
3626 | (rm & bank_mask);
3627 gen_mov_F0_vreg(dp, rm);
3628 } else {
3629 /* Two source operands. */
3630 rn = ((rn + delta_d) & (bank_mask - 1))
3631 | (rn & bank_mask);
3632 gen_mov_F0_vreg(dp, rn);
3633 if (delta_m) {
3634 rm = ((rm + delta_m) & (bank_mask - 1))
3635 | (rm & bank_mask);
3636 gen_mov_F1_vreg(dp, rm);
3637 }
3638 }
3639 }
3640 }
3641 break;
3642 case 0xc:
3643 case 0xd:
8387da81 3644 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3645 /* two-register transfer */
3646 rn = (insn >> 16) & 0xf;
3647 rd = (insn >> 12) & 0xf;
3648 if (dp) {
9ee6e8bb
PB
3649 VFP_DREG_M(rm, insn);
3650 } else {
3651 rm = VFP_SREG_M(insn);
3652 }
b7bcbe95 3653
18c9b560 3654 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3655 /* vfp->arm */
3656 if (dp) {
4373f3ce
PB
3657 gen_mov_F0_vreg(0, rm * 2);
3658 tmp = gen_vfp_mrs();
3659 store_reg(s, rd, tmp);
3660 gen_mov_F0_vreg(0, rm * 2 + 1);
3661 tmp = gen_vfp_mrs();
3662 store_reg(s, rn, tmp);
b7bcbe95
FB
3663 } else {
3664 gen_mov_F0_vreg(0, rm);
4373f3ce 3665 tmp = gen_vfp_mrs();
8387da81 3666 store_reg(s, rd, tmp);
b7bcbe95 3667 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3668 tmp = gen_vfp_mrs();
8387da81 3669 store_reg(s, rn, tmp);
b7bcbe95
FB
3670 }
3671 } else {
3672 /* arm->vfp */
3673 if (dp) {
4373f3ce
PB
3674 tmp = load_reg(s, rd);
3675 gen_vfp_msr(tmp);
3676 gen_mov_vreg_F0(0, rm * 2);
3677 tmp = load_reg(s, rn);
3678 gen_vfp_msr(tmp);
3679 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3680 } else {
8387da81 3681 tmp = load_reg(s, rd);
4373f3ce 3682 gen_vfp_msr(tmp);
b7bcbe95 3683 gen_mov_vreg_F0(0, rm);
8387da81 3684 tmp = load_reg(s, rn);
4373f3ce 3685 gen_vfp_msr(tmp);
b7bcbe95
FB
3686 gen_mov_vreg_F0(0, rm + 1);
3687 }
3688 }
3689 } else {
3690 /* Load/store */
3691 rn = (insn >> 16) & 0xf;
3692 if (dp)
9ee6e8bb 3693 VFP_DREG_D(rd, insn);
b7bcbe95 3694 else
9ee6e8bb 3695 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3696 if ((insn & 0x01200000) == 0x01000000) {
3697 /* Single load/store */
3698 offset = (insn & 0xff) << 2;
3699 if ((insn & (1 << 23)) == 0)
3700 offset = -offset;
934814f1
PM
3701 if (s->thumb && rn == 15) {
3702 /* This is actually UNPREDICTABLE */
3703 addr = tcg_temp_new_i32();
3704 tcg_gen_movi_i32(addr, s->pc & ~2);
3705 } else {
3706 addr = load_reg(s, rn);
3707 }
312eea9f 3708 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3709 if (insn & (1 << 20)) {
312eea9f 3710 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3711 gen_mov_vreg_F0(dp, rd);
3712 } else {
3713 gen_mov_F0_vreg(dp, rd);
312eea9f 3714 gen_vfp_st(s, dp, addr);
b7bcbe95 3715 }
7d1b0095 3716 tcg_temp_free_i32(addr);
b7bcbe95
FB
3717 } else {
3718 /* load/store multiple */
934814f1 3719 int w = insn & (1 << 21);
b7bcbe95
FB
3720 if (dp)
3721 n = (insn >> 1) & 0x7f;
3722 else
3723 n = insn & 0xff;
3724
934814f1
PM
3725 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3726 /* P == U , W == 1 => UNDEF */
3727 return 1;
3728 }
3729 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3730 /* UNPREDICTABLE cases for bad immediates: we choose to
3731 * UNDEF to avoid generating huge numbers of TCG ops
3732 */
3733 return 1;
3734 }
3735 if (rn == 15 && w) {
3736 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3737 return 1;
3738 }
3739
3740 if (s->thumb && rn == 15) {
3741 /* This is actually UNPREDICTABLE */
3742 addr = tcg_temp_new_i32();
3743 tcg_gen_movi_i32(addr, s->pc & ~2);
3744 } else {
3745 addr = load_reg(s, rn);
3746 }
b7bcbe95 3747 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3748 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3749
3750 if (dp)
3751 offset = 8;
3752 else
3753 offset = 4;
3754 for (i = 0; i < n; i++) {
18c9b560 3755 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3756 /* load */
312eea9f 3757 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3758 gen_mov_vreg_F0(dp, rd + i);
3759 } else {
3760 /* store */
3761 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3762 gen_vfp_st(s, dp, addr);
b7bcbe95 3763 }
312eea9f 3764 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3765 }
934814f1 3766 if (w) {
b7bcbe95
FB
3767 /* writeback */
3768 if (insn & (1 << 24))
3769 offset = -offset * n;
3770 else if (dp && (insn & 1))
3771 offset = 4;
3772 else
3773 offset = 0;
3774
3775 if (offset != 0)
312eea9f
FN
3776 tcg_gen_addi_i32(addr, addr, offset);
3777 store_reg(s, rn, addr);
3778 } else {
7d1b0095 3779 tcg_temp_free_i32(addr);
b7bcbe95
FB
3780 }
3781 }
3782 }
3783 break;
3784 default:
3785 /* Should never happen. */
3786 return 1;
3787 }
3788 return 0;
3789}
3790
0a2461fa 3791static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
c53be334 3792{
6e256c93
FB
3793 TranslationBlock *tb;
3794
3795 tb = s->tb;
3796 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3797 tcg_gen_goto_tb(n);
eaed129d 3798 gen_set_pc_im(s, dest);
8cfd0495 3799 tcg_gen_exit_tb((uintptr_t)tb + n);
6e256c93 3800 } else {
eaed129d 3801 gen_set_pc_im(s, dest);
57fec1fe 3802 tcg_gen_exit_tb(0);
6e256c93 3803 }
c53be334
FB
3804}
3805
8aaca4c0
FB
3806static inline void gen_jmp (DisasContext *s, uint32_t dest)
3807{
551bd27f 3808 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3809 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3810 if (s->thumb)
d9ba4830
PB
3811 dest |= 1;
3812 gen_bx_im(s, dest);
8aaca4c0 3813 } else {
6e256c93 3814 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3815 s->is_jmp = DISAS_TB_JUMP;
3816 }
3817}
3818
39d5492a 3819static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 3820{
ee097184 3821 if (x)
d9ba4830 3822 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3823 else
d9ba4830 3824 gen_sxth(t0);
ee097184 3825 if (y)
d9ba4830 3826 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3827 else
d9ba4830
PB
3828 gen_sxth(t1);
3829 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3830}
3831
3832/* Return the mask of PSR bits set by a MSR instruction. */
0ecb72a5 3833static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3834 uint32_t mask;
3835
3836 mask = 0;
3837 if (flags & (1 << 0))
3838 mask |= 0xff;
3839 if (flags & (1 << 1))
3840 mask |= 0xff00;
3841 if (flags & (1 << 2))
3842 mask |= 0xff0000;
3843 if (flags & (1 << 3))
3844 mask |= 0xff000000;
9ee6e8bb 3845
2ae23e75 3846 /* Mask out undefined bits. */
9ee6e8bb 3847 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3848 if (!arm_feature(env, ARM_FEATURE_V4T))
3849 mask &= ~CPSR_T;
3850 if (!arm_feature(env, ARM_FEATURE_V5))
3851 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3852 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3853 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3854 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3855 mask &= ~CPSR_IT;
9ee6e8bb 3856 /* Mask out execution state bits. */
2ae23e75 3857 if (!spsr)
e160c51c 3858 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3859 /* Mask out privileged bits. */
3860 if (IS_USER(s))
9ee6e8bb 3861 mask &= CPSR_USER;
b5ff1b31
FB
3862 return mask;
3863}
3864
2fbac54b 3865/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 3866static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 3867{
39d5492a 3868 TCGv_i32 tmp;
b5ff1b31
FB
3869 if (spsr) {
3870 /* ??? This is also undefined in system mode. */
3871 if (IS_USER(s))
3872 return 1;
d9ba4830
PB
3873
3874 tmp = load_cpu_field(spsr);
3875 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3876 tcg_gen_andi_i32(t0, t0, mask);
3877 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3878 store_cpu_field(tmp, spsr);
b5ff1b31 3879 } else {
2fbac54b 3880 gen_set_cpsr(t0, mask);
b5ff1b31 3881 }
7d1b0095 3882 tcg_temp_free_i32(t0);
b5ff1b31
FB
3883 gen_lookup_tb(s);
3884 return 0;
3885}
3886
2fbac54b
FN
3887/* Returns nonzero if access to the PSR is not permitted. */
3888static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3889{
39d5492a 3890 TCGv_i32 tmp;
7d1b0095 3891 tmp = tcg_temp_new_i32();
2fbac54b
FN
3892 tcg_gen_movi_i32(tmp, val);
3893 return gen_set_psr(s, mask, spsr, tmp);
3894}
3895
e9bb4aa9 3896/* Generate an old-style exception return. Marks pc as dead. */
39d5492a 3897static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
b5ff1b31 3898{
39d5492a 3899 TCGv_i32 tmp;
e9bb4aa9 3900 store_reg(s, 15, pc);
d9ba4830
PB
3901 tmp = load_cpu_field(spsr);
3902 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3903 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3904 s->is_jmp = DISAS_UPDATE;
3905}
3906
b0109805 3907/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 3908static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 3909{
b0109805 3910 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3911 tcg_temp_free_i32(cpsr);
b0109805 3912 store_reg(s, 15, pc);
9ee6e8bb
PB
3913 s->is_jmp = DISAS_UPDATE;
3914}
3b46e624 3915
9ee6e8bb
PB
3916static inline void
3917gen_set_condexec (DisasContext *s)
3918{
3919 if (s->condexec_mask) {
8f01245e 3920 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
39d5492a 3921 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 3922 tcg_gen_movi_i32(tmp, val);
d9ba4830 3923 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3924 }
3925}
3b46e624 3926
bc4a0de0
PM
3927static void gen_exception_insn(DisasContext *s, int offset, int excp)
3928{
3929 gen_set_condexec(s);
eaed129d 3930 gen_set_pc_im(s, s->pc - offset);
bc4a0de0
PM
3931 gen_exception(excp);
3932 s->is_jmp = DISAS_JUMP;
3933}
3934
9ee6e8bb
PB
3935static void gen_nop_hint(DisasContext *s, int val)
3936{
3937 switch (val) {
3938 case 3: /* wfi */
eaed129d 3939 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
3940 s->is_jmp = DISAS_WFI;
3941 break;
3942 case 2: /* wfe */
72c1d3af
PM
3943 gen_set_pc_im(s, s->pc);
3944 s->is_jmp = DISAS_WFE;
3945 break;
9ee6e8bb 3946 case 4: /* sev */
12b10571
MR
3947 case 5: /* sevl */
3948 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
3949 default: /* nop */
3950 break;
3951 }
3952}
99c475ab 3953
ad69471c 3954#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3955
39d5492a 3956static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
3957{
3958 switch (size) {
dd8fbd78
FN
3959 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3960 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3961 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3962 default: abort();
9ee6e8bb 3963 }
9ee6e8bb
PB
3964}
3965
39d5492a 3966static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
3967{
3968 switch (size) {
dd8fbd78
FN
3969 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3970 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3971 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3972 default: return;
3973 }
3974}
3975
3976/* 32-bit pairwise ops end up the same as the elementwise versions. */
3977#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3978#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3979#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3980#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3981
ad69471c
PB
3982#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3983 switch ((size << 1) | u) { \
3984 case 0: \
dd8fbd78 3985 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3986 break; \
3987 case 1: \
dd8fbd78 3988 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3989 break; \
3990 case 2: \
dd8fbd78 3991 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3992 break; \
3993 case 3: \
dd8fbd78 3994 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3995 break; \
3996 case 4: \
dd8fbd78 3997 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3998 break; \
3999 case 5: \
dd8fbd78 4000 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4001 break; \
4002 default: return 1; \
4003 }} while (0)
9ee6e8bb
PB
4004
4005#define GEN_NEON_INTEGER_OP(name) do { \
4006 switch ((size << 1) | u) { \
ad69471c 4007 case 0: \
dd8fbd78 4008 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4009 break; \
4010 case 1: \
dd8fbd78 4011 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4012 break; \
4013 case 2: \
dd8fbd78 4014 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4015 break; \
4016 case 3: \
dd8fbd78 4017 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4018 break; \
4019 case 4: \
dd8fbd78 4020 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4021 break; \
4022 case 5: \
dd8fbd78 4023 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4024 break; \
9ee6e8bb
PB
4025 default: return 1; \
4026 }} while (0)
4027
39d5492a 4028static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4029{
39d5492a 4030 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4031 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4032 return tmp;
9ee6e8bb
PB
4033}
4034
39d5492a 4035static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4036{
dd8fbd78 4037 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4038 tcg_temp_free_i32(var);
9ee6e8bb
PB
4039}
4040
39d5492a 4041static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4042{
39d5492a 4043 TCGv_i32 tmp;
9ee6e8bb 4044 if (size == 1) {
0fad6efc
PM
4045 tmp = neon_load_reg(reg & 7, reg >> 4);
4046 if (reg & 8) {
dd8fbd78 4047 gen_neon_dup_high16(tmp);
0fad6efc
PM
4048 } else {
4049 gen_neon_dup_low16(tmp);
dd8fbd78 4050 }
0fad6efc
PM
4051 } else {
4052 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4053 }
dd8fbd78 4054 return tmp;
9ee6e8bb
PB
4055}
4056
02acedf9 4057static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4058{
39d5492a 4059 TCGv_i32 tmp, tmp2;
600b828c 4060 if (!q && size == 2) {
02acedf9
PM
4061 return 1;
4062 }
4063 tmp = tcg_const_i32(rd);
4064 tmp2 = tcg_const_i32(rm);
4065 if (q) {
4066 switch (size) {
4067 case 0:
02da0b2d 4068 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4069 break;
4070 case 1:
02da0b2d 4071 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4072 break;
4073 case 2:
02da0b2d 4074 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
4075 break;
4076 default:
4077 abort();
4078 }
4079 } else {
4080 switch (size) {
4081 case 0:
02da0b2d 4082 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4083 break;
4084 case 1:
02da0b2d 4085 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4086 break;
4087 default:
4088 abort();
4089 }
4090 }
4091 tcg_temp_free_i32(tmp);
4092 tcg_temp_free_i32(tmp2);
4093 return 0;
19457615
FN
4094}
4095
d68a6f3a 4096static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4097{
39d5492a 4098 TCGv_i32 tmp, tmp2;
600b828c 4099 if (!q && size == 2) {
d68a6f3a
PM
4100 return 1;
4101 }
4102 tmp = tcg_const_i32(rd);
4103 tmp2 = tcg_const_i32(rm);
4104 if (q) {
4105 switch (size) {
4106 case 0:
02da0b2d 4107 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4108 break;
4109 case 1:
02da0b2d 4110 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4111 break;
4112 case 2:
02da0b2d 4113 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
4114 break;
4115 default:
4116 abort();
4117 }
4118 } else {
4119 switch (size) {
4120 case 0:
02da0b2d 4121 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4122 break;
4123 case 1:
02da0b2d 4124 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4125 break;
4126 default:
4127 abort();
4128 }
4129 }
4130 tcg_temp_free_i32(tmp);
4131 tcg_temp_free_i32(tmp2);
4132 return 0;
19457615
FN
4133}
4134
39d5492a 4135static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4136{
39d5492a 4137 TCGv_i32 rd, tmp;
19457615 4138
7d1b0095
PM
4139 rd = tcg_temp_new_i32();
4140 tmp = tcg_temp_new_i32();
19457615
FN
4141
4142 tcg_gen_shli_i32(rd, t0, 8);
4143 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4144 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4145 tcg_gen_or_i32(rd, rd, tmp);
4146
4147 tcg_gen_shri_i32(t1, t1, 8);
4148 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4149 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4150 tcg_gen_or_i32(t1, t1, tmp);
4151 tcg_gen_mov_i32(t0, rd);
4152
7d1b0095
PM
4153 tcg_temp_free_i32(tmp);
4154 tcg_temp_free_i32(rd);
19457615
FN
4155}
4156
39d5492a 4157static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4158{
39d5492a 4159 TCGv_i32 rd, tmp;
19457615 4160
7d1b0095
PM
4161 rd = tcg_temp_new_i32();
4162 tmp = tcg_temp_new_i32();
19457615
FN
4163
4164 tcg_gen_shli_i32(rd, t0, 16);
4165 tcg_gen_andi_i32(tmp, t1, 0xffff);
4166 tcg_gen_or_i32(rd, rd, tmp);
4167 tcg_gen_shri_i32(t1, t1, 16);
4168 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4169 tcg_gen_or_i32(t1, t1, tmp);
4170 tcg_gen_mov_i32(t0, rd);
4171
7d1b0095
PM
4172 tcg_temp_free_i32(tmp);
4173 tcg_temp_free_i32(rd);
19457615
FN
4174}
4175
4176
9ee6e8bb
PB
4177static struct {
4178 int nregs;
4179 int interleave;
4180 int spacing;
4181} neon_ls_element_type[11] = {
4182 {4, 4, 1},
4183 {4, 4, 2},
4184 {4, 1, 1},
4185 {4, 2, 1},
4186 {3, 3, 1},
4187 {3, 3, 2},
4188 {3, 1, 1},
4189 {1, 1, 1},
4190 {2, 2, 1},
4191 {2, 2, 2},
4192 {2, 1, 1}
4193};
4194
4195/* Translate a NEON load/store element instruction. Return nonzero if the
4196 instruction is invalid. */
0ecb72a5 4197static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4198{
4199 int rd, rn, rm;
4200 int op;
4201 int nregs;
4202 int interleave;
84496233 4203 int spacing;
9ee6e8bb
PB
4204 int stride;
4205 int size;
4206 int reg;
4207 int pass;
4208 int load;
4209 int shift;
9ee6e8bb 4210 int n;
39d5492a
PM
4211 TCGv_i32 addr;
4212 TCGv_i32 tmp;
4213 TCGv_i32 tmp2;
84496233 4214 TCGv_i64 tmp64;
9ee6e8bb 4215
5df8bac1 4216 if (!s->vfp_enabled)
9ee6e8bb
PB
4217 return 1;
4218 VFP_DREG_D(rd, insn);
4219 rn = (insn >> 16) & 0xf;
4220 rm = insn & 0xf;
4221 load = (insn & (1 << 21)) != 0;
4222 if ((insn & (1 << 23)) == 0) {
4223 /* Load store all elements. */
4224 op = (insn >> 8) & 0xf;
4225 size = (insn >> 6) & 3;
84496233 4226 if (op > 10)
9ee6e8bb 4227 return 1;
f2dd89d0
PM
4228 /* Catch UNDEF cases for bad values of align field */
4229 switch (op & 0xc) {
4230 case 4:
4231 if (((insn >> 5) & 1) == 1) {
4232 return 1;
4233 }
4234 break;
4235 case 8:
4236 if (((insn >> 4) & 3) == 3) {
4237 return 1;
4238 }
4239 break;
4240 default:
4241 break;
4242 }
9ee6e8bb
PB
4243 nregs = neon_ls_element_type[op].nregs;
4244 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4245 spacing = neon_ls_element_type[op].spacing;
4246 if (size == 3 && (interleave | spacing) != 1)
4247 return 1;
e318a60b 4248 addr = tcg_temp_new_i32();
dcc65026 4249 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4250 stride = (1 << size) * interleave;
4251 for (reg = 0; reg < nregs; reg++) {
4252 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4253 load_reg_var(s, addr, rn);
4254 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4255 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4256 load_reg_var(s, addr, rn);
4257 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4258 }
84496233 4259 if (size == 3) {
8ed1237d 4260 tmp64 = tcg_temp_new_i64();
84496233 4261 if (load) {
08307563 4262 gen_aa32_ld64(tmp64, addr, IS_USER(s));
84496233 4263 neon_store_reg64(tmp64, rd);
84496233 4264 } else {
84496233 4265 neon_load_reg64(tmp64, rd);
08307563 4266 gen_aa32_st64(tmp64, addr, IS_USER(s));
84496233 4267 }
8ed1237d 4268 tcg_temp_free_i64(tmp64);
84496233
JR
4269 tcg_gen_addi_i32(addr, addr, stride);
4270 } else {
4271 for (pass = 0; pass < 2; pass++) {
4272 if (size == 2) {
4273 if (load) {
58ab8e96 4274 tmp = tcg_temp_new_i32();
08307563 4275 gen_aa32_ld32u(tmp, addr, IS_USER(s));
84496233
JR
4276 neon_store_reg(rd, pass, tmp);
4277 } else {
4278 tmp = neon_load_reg(rd, pass);
08307563 4279 gen_aa32_st32(tmp, addr, IS_USER(s));
58ab8e96 4280 tcg_temp_free_i32(tmp);
84496233 4281 }
1b2b1e54 4282 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4283 } else if (size == 1) {
4284 if (load) {
58ab8e96 4285 tmp = tcg_temp_new_i32();
08307563 4286 gen_aa32_ld16u(tmp, addr, IS_USER(s));
84496233 4287 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4288 tmp2 = tcg_temp_new_i32();
08307563 4289 gen_aa32_ld16u(tmp2, addr, IS_USER(s));
84496233 4290 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4291 tcg_gen_shli_i32(tmp2, tmp2, 16);
4292 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4293 tcg_temp_free_i32(tmp2);
84496233
JR
4294 neon_store_reg(rd, pass, tmp);
4295 } else {
4296 tmp = neon_load_reg(rd, pass);
7d1b0095 4297 tmp2 = tcg_temp_new_i32();
84496233 4298 tcg_gen_shri_i32(tmp2, tmp, 16);
08307563 4299 gen_aa32_st16(tmp, addr, IS_USER(s));
58ab8e96 4300 tcg_temp_free_i32(tmp);
84496233 4301 tcg_gen_addi_i32(addr, addr, stride);
08307563 4302 gen_aa32_st16(tmp2, addr, IS_USER(s));
58ab8e96 4303 tcg_temp_free_i32(tmp2);
1b2b1e54 4304 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4305 }
84496233
JR
4306 } else /* size == 0 */ {
4307 if (load) {
39d5492a 4308 TCGV_UNUSED_I32(tmp2);
84496233 4309 for (n = 0; n < 4; n++) {
58ab8e96 4310 tmp = tcg_temp_new_i32();
08307563 4311 gen_aa32_ld8u(tmp, addr, IS_USER(s));
84496233
JR
4312 tcg_gen_addi_i32(addr, addr, stride);
4313 if (n == 0) {
4314 tmp2 = tmp;
4315 } else {
41ba8341
PB
4316 tcg_gen_shli_i32(tmp, tmp, n * 8);
4317 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4318 tcg_temp_free_i32(tmp);
84496233 4319 }
9ee6e8bb 4320 }
84496233
JR
4321 neon_store_reg(rd, pass, tmp2);
4322 } else {
4323 tmp2 = neon_load_reg(rd, pass);
4324 for (n = 0; n < 4; n++) {
7d1b0095 4325 tmp = tcg_temp_new_i32();
84496233
JR
4326 if (n == 0) {
4327 tcg_gen_mov_i32(tmp, tmp2);
4328 } else {
4329 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4330 }
08307563 4331 gen_aa32_st8(tmp, addr, IS_USER(s));
58ab8e96 4332 tcg_temp_free_i32(tmp);
84496233
JR
4333 tcg_gen_addi_i32(addr, addr, stride);
4334 }
7d1b0095 4335 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4336 }
4337 }
4338 }
4339 }
84496233 4340 rd += spacing;
9ee6e8bb 4341 }
e318a60b 4342 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4343 stride = nregs * 8;
4344 } else {
4345 size = (insn >> 10) & 3;
4346 if (size == 3) {
4347 /* Load single element to all lanes. */
8e18cde3
PM
4348 int a = (insn >> 4) & 1;
4349 if (!load) {
9ee6e8bb 4350 return 1;
8e18cde3 4351 }
9ee6e8bb
PB
4352 size = (insn >> 6) & 3;
4353 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4354
4355 if (size == 3) {
4356 if (nregs != 4 || a == 0) {
9ee6e8bb 4357 return 1;
99c475ab 4358 }
8e18cde3
PM
4359 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4360 size = 2;
4361 }
4362 if (nregs == 1 && a == 1 && size == 0) {
4363 return 1;
4364 }
4365 if (nregs == 3 && a == 1) {
4366 return 1;
4367 }
e318a60b 4368 addr = tcg_temp_new_i32();
8e18cde3
PM
4369 load_reg_var(s, addr, rn);
4370 if (nregs == 1) {
4371 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4372 tmp = gen_load_and_replicate(s, addr, size);
4373 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4374 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4375 if (insn & (1 << 5)) {
4376 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4377 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4378 }
4379 tcg_temp_free_i32(tmp);
4380 } else {
4381 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4382 stride = (insn & (1 << 5)) ? 2 : 1;
4383 for (reg = 0; reg < nregs; reg++) {
4384 tmp = gen_load_and_replicate(s, addr, size);
4385 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4386 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4387 tcg_temp_free_i32(tmp);
4388 tcg_gen_addi_i32(addr, addr, 1 << size);
4389 rd += stride;
4390 }
9ee6e8bb 4391 }
e318a60b 4392 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4393 stride = (1 << size) * nregs;
4394 } else {
4395 /* Single element. */
93262b16 4396 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4397 pass = (insn >> 7) & 1;
4398 switch (size) {
4399 case 0:
4400 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4401 stride = 1;
4402 break;
4403 case 1:
4404 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4405 stride = (insn & (1 << 5)) ? 2 : 1;
4406 break;
4407 case 2:
4408 shift = 0;
9ee6e8bb
PB
4409 stride = (insn & (1 << 6)) ? 2 : 1;
4410 break;
4411 default:
4412 abort();
4413 }
4414 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4415 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4416 switch (nregs) {
4417 case 1:
4418 if (((idx & (1 << size)) != 0) ||
4419 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4420 return 1;
4421 }
4422 break;
4423 case 3:
4424 if ((idx & 1) != 0) {
4425 return 1;
4426 }
4427 /* fall through */
4428 case 2:
4429 if (size == 2 && (idx & 2) != 0) {
4430 return 1;
4431 }
4432 break;
4433 case 4:
4434 if ((size == 2) && ((idx & 3) == 3)) {
4435 return 1;
4436 }
4437 break;
4438 default:
4439 abort();
4440 }
4441 if ((rd + stride * (nregs - 1)) > 31) {
4442 /* Attempts to write off the end of the register file
4443 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4444 * the neon_load_reg() would write off the end of the array.
4445 */
4446 return 1;
4447 }
e318a60b 4448 addr = tcg_temp_new_i32();
dcc65026 4449 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4450 for (reg = 0; reg < nregs; reg++) {
4451 if (load) {
58ab8e96 4452 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
4453 switch (size) {
4454 case 0:
08307563 4455 gen_aa32_ld8u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4456 break;
4457 case 1:
08307563 4458 gen_aa32_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4459 break;
4460 case 2:
08307563 4461 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb 4462 break;
a50f5b91
PB
4463 default: /* Avoid compiler warnings. */
4464 abort();
9ee6e8bb
PB
4465 }
4466 if (size != 2) {
8f8e3aa4 4467 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4468 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4469 shift, size ? 16 : 8);
7d1b0095 4470 tcg_temp_free_i32(tmp2);
9ee6e8bb 4471 }
8f8e3aa4 4472 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4473 } else { /* Store */
8f8e3aa4
PB
4474 tmp = neon_load_reg(rd, pass);
4475 if (shift)
4476 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4477 switch (size) {
4478 case 0:
08307563 4479 gen_aa32_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4480 break;
4481 case 1:
08307563 4482 gen_aa32_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4483 break;
4484 case 2:
08307563 4485 gen_aa32_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4486 break;
99c475ab 4487 }
58ab8e96 4488 tcg_temp_free_i32(tmp);
99c475ab 4489 }
9ee6e8bb 4490 rd += stride;
1b2b1e54 4491 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4492 }
e318a60b 4493 tcg_temp_free_i32(addr);
9ee6e8bb 4494 stride = nregs * (1 << size);
99c475ab 4495 }
9ee6e8bb
PB
4496 }
4497 if (rm != 15) {
39d5492a 4498 TCGv_i32 base;
b26eefb6
PB
4499
4500 base = load_reg(s, rn);
9ee6e8bb 4501 if (rm == 13) {
b26eefb6 4502 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4503 } else {
39d5492a 4504 TCGv_i32 index;
b26eefb6
PB
4505 index = load_reg(s, rm);
4506 tcg_gen_add_i32(base, base, index);
7d1b0095 4507 tcg_temp_free_i32(index);
9ee6e8bb 4508 }
b26eefb6 4509 store_reg(s, rn, base);
9ee6e8bb
PB
4510 }
4511 return 0;
4512}
3b46e624 4513
8f8e3aa4 4514/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 4515static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
4516{
4517 tcg_gen_and_i32(t, t, c);
f669df27 4518 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4519 tcg_gen_or_i32(dest, t, f);
4520}
4521
39d5492a 4522static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4523{
4524 switch (size) {
4525 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4526 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4527 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4528 default: abort();
4529 }
4530}
4531
39d5492a 4532static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4533{
4534 switch (size) {
02da0b2d
PM
4535 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4536 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4537 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4538 default: abort();
4539 }
4540}
4541
39d5492a 4542static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4543{
4544 switch (size) {
02da0b2d
PM
4545 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4546 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4547 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4548 default: abort();
4549 }
4550}
4551
39d5492a 4552static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
4553{
4554 switch (size) {
02da0b2d
PM
4555 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4556 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4557 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4558 default: abort();
4559 }
4560}
4561
39d5492a 4562static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
4563 int q, int u)
4564{
4565 if (q) {
4566 if (u) {
4567 switch (size) {
4568 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4569 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4570 default: abort();
4571 }
4572 } else {
4573 switch (size) {
4574 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4575 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4576 default: abort();
4577 }
4578 }
4579 } else {
4580 if (u) {
4581 switch (size) {
b408a9b0
CL
4582 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4583 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4584 default: abort();
4585 }
4586 } else {
4587 switch (size) {
4588 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4589 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4590 default: abort();
4591 }
4592 }
4593 }
4594}
4595
39d5492a 4596static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
4597{
4598 if (u) {
4599 switch (size) {
4600 case 0: gen_helper_neon_widen_u8(dest, src); break;
4601 case 1: gen_helper_neon_widen_u16(dest, src); break;
4602 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4603 default: abort();
4604 }
4605 } else {
4606 switch (size) {
4607 case 0: gen_helper_neon_widen_s8(dest, src); break;
4608 case 1: gen_helper_neon_widen_s16(dest, src); break;
4609 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4610 default: abort();
4611 }
4612 }
7d1b0095 4613 tcg_temp_free_i32(src);
ad69471c
PB
4614}
4615
4616static inline void gen_neon_addl(int size)
4617{
4618 switch (size) {
4619 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4620 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4621 case 2: tcg_gen_add_i64(CPU_V001); break;
4622 default: abort();
4623 }
4624}
4625
4626static inline void gen_neon_subl(int size)
4627{
4628 switch (size) {
4629 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4630 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4631 case 2: tcg_gen_sub_i64(CPU_V001); break;
4632 default: abort();
4633 }
4634}
4635
a7812ae4 4636static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4637{
4638 switch (size) {
4639 case 0: gen_helper_neon_negl_u16(var, var); break;
4640 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
4641 case 2:
4642 tcg_gen_neg_i64(var, var);
4643 break;
ad69471c
PB
4644 default: abort();
4645 }
4646}
4647
a7812ae4 4648static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4649{
4650 switch (size) {
02da0b2d
PM
4651 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4652 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4653 default: abort();
4654 }
4655}
4656
39d5492a
PM
4657static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4658 int size, int u)
ad69471c 4659{
a7812ae4 4660 TCGv_i64 tmp;
ad69471c
PB
4661
4662 switch ((size << 1) | u) {
4663 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4664 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4665 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4666 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4667 case 4:
4668 tmp = gen_muls_i64_i32(a, b);
4669 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4670 tcg_temp_free_i64(tmp);
ad69471c
PB
4671 break;
4672 case 5:
4673 tmp = gen_mulu_i64_i32(a, b);
4674 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4675 tcg_temp_free_i64(tmp);
ad69471c
PB
4676 break;
4677 default: abort();
4678 }
c6067f04
CL
4679
4680 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4681 Don't forget to clean them now. */
4682 if (size < 2) {
7d1b0095
PM
4683 tcg_temp_free_i32(a);
4684 tcg_temp_free_i32(b);
c6067f04 4685 }
ad69471c
PB
4686}
4687
39d5492a
PM
4688static void gen_neon_narrow_op(int op, int u, int size,
4689 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
4690{
4691 if (op) {
4692 if (u) {
4693 gen_neon_unarrow_sats(size, dest, src);
4694 } else {
4695 gen_neon_narrow(size, dest, src);
4696 }
4697 } else {
4698 if (u) {
4699 gen_neon_narrow_satu(size, dest, src);
4700 } else {
4701 gen_neon_narrow_sats(size, dest, src);
4702 }
4703 }
4704}
4705
62698be3
PM
4706/* Symbolic constants for op fields for Neon 3-register same-length.
4707 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4708 * table A7-9.
4709 */
4710#define NEON_3R_VHADD 0
4711#define NEON_3R_VQADD 1
4712#define NEON_3R_VRHADD 2
4713#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4714#define NEON_3R_VHSUB 4
4715#define NEON_3R_VQSUB 5
4716#define NEON_3R_VCGT 6
4717#define NEON_3R_VCGE 7
4718#define NEON_3R_VSHL 8
4719#define NEON_3R_VQSHL 9
4720#define NEON_3R_VRSHL 10
4721#define NEON_3R_VQRSHL 11
4722#define NEON_3R_VMAX 12
4723#define NEON_3R_VMIN 13
4724#define NEON_3R_VABD 14
4725#define NEON_3R_VABA 15
4726#define NEON_3R_VADD_VSUB 16
4727#define NEON_3R_VTST_VCEQ 17
4728#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4729#define NEON_3R_VMUL 19
4730#define NEON_3R_VPMAX 20
4731#define NEON_3R_VPMIN 21
4732#define NEON_3R_VQDMULH_VQRDMULH 22
4733#define NEON_3R_VPADD 23
da97f52c 4734#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
4735#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4736#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4737#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4738#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4739#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 4740#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
4741
4742static const uint8_t neon_3r_sizes[] = {
4743 [NEON_3R_VHADD] = 0x7,
4744 [NEON_3R_VQADD] = 0xf,
4745 [NEON_3R_VRHADD] = 0x7,
4746 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4747 [NEON_3R_VHSUB] = 0x7,
4748 [NEON_3R_VQSUB] = 0xf,
4749 [NEON_3R_VCGT] = 0x7,
4750 [NEON_3R_VCGE] = 0x7,
4751 [NEON_3R_VSHL] = 0xf,
4752 [NEON_3R_VQSHL] = 0xf,
4753 [NEON_3R_VRSHL] = 0xf,
4754 [NEON_3R_VQRSHL] = 0xf,
4755 [NEON_3R_VMAX] = 0x7,
4756 [NEON_3R_VMIN] = 0x7,
4757 [NEON_3R_VABD] = 0x7,
4758 [NEON_3R_VABA] = 0x7,
4759 [NEON_3R_VADD_VSUB] = 0xf,
4760 [NEON_3R_VTST_VCEQ] = 0x7,
4761 [NEON_3R_VML] = 0x7,
4762 [NEON_3R_VMUL] = 0x7,
4763 [NEON_3R_VPMAX] = 0x7,
4764 [NEON_3R_VPMIN] = 0x7,
4765 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4766 [NEON_3R_VPADD] = 0x7,
da97f52c 4767 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4768 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4769 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4770 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4771 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4772 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 4773 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4774};
4775
600b828c
PM
4776/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4777 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4778 * table A7-13.
4779 */
4780#define NEON_2RM_VREV64 0
4781#define NEON_2RM_VREV32 1
4782#define NEON_2RM_VREV16 2
4783#define NEON_2RM_VPADDL 4
4784#define NEON_2RM_VPADDL_U 5
9d935509
AB
4785#define NEON_2RM_AESE 6 /* Includes AESD */
4786#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
4787#define NEON_2RM_VCLS 8
4788#define NEON_2RM_VCLZ 9
4789#define NEON_2RM_VCNT 10
4790#define NEON_2RM_VMVN 11
4791#define NEON_2RM_VPADAL 12
4792#define NEON_2RM_VPADAL_U 13
4793#define NEON_2RM_VQABS 14
4794#define NEON_2RM_VQNEG 15
4795#define NEON_2RM_VCGT0 16
4796#define NEON_2RM_VCGE0 17
4797#define NEON_2RM_VCEQ0 18
4798#define NEON_2RM_VCLE0 19
4799#define NEON_2RM_VCLT0 20
4800#define NEON_2RM_VABS 22
4801#define NEON_2RM_VNEG 23
4802#define NEON_2RM_VCGT0_F 24
4803#define NEON_2RM_VCGE0_F 25
4804#define NEON_2RM_VCEQ0_F 26
4805#define NEON_2RM_VCLE0_F 27
4806#define NEON_2RM_VCLT0_F 28
4807#define NEON_2RM_VABS_F 30
4808#define NEON_2RM_VNEG_F 31
4809#define NEON_2RM_VSWP 32
4810#define NEON_2RM_VTRN 33
4811#define NEON_2RM_VUZP 34
4812#define NEON_2RM_VZIP 35
4813#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4814#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4815#define NEON_2RM_VSHLL 38
34f7b0a2 4816#define NEON_2RM_VRINTN 40
2ce70625 4817#define NEON_2RM_VRINTX 41
34f7b0a2
WN
4818#define NEON_2RM_VRINTA 42
4819#define NEON_2RM_VRINTZ 43
600b828c 4820#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 4821#define NEON_2RM_VRINTM 45
600b828c 4822#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 4823#define NEON_2RM_VRINTP 47
901ad525
WN
4824#define NEON_2RM_VCVTAU 48
4825#define NEON_2RM_VCVTAS 49
4826#define NEON_2RM_VCVTNU 50
4827#define NEON_2RM_VCVTNS 51
4828#define NEON_2RM_VCVTPU 52
4829#define NEON_2RM_VCVTPS 53
4830#define NEON_2RM_VCVTMU 54
4831#define NEON_2RM_VCVTMS 55
600b828c
PM
4832#define NEON_2RM_VRECPE 56
4833#define NEON_2RM_VRSQRTE 57
4834#define NEON_2RM_VRECPE_F 58
4835#define NEON_2RM_VRSQRTE_F 59
4836#define NEON_2RM_VCVT_FS 60
4837#define NEON_2RM_VCVT_FU 61
4838#define NEON_2RM_VCVT_SF 62
4839#define NEON_2RM_VCVT_UF 63
4840
4841static int neon_2rm_is_float_op(int op)
4842{
4843 /* Return true if this neon 2reg-misc op is float-to-float */
4844 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 4845 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
4846 op == NEON_2RM_VRINTM ||
4847 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 4848 op >= NEON_2RM_VRECPE_F);
600b828c
PM
4849}
4850
4851/* Each entry in this array has bit n set if the insn allows
4852 * size value n (otherwise it will UNDEF). Since unallocated
4853 * op values will have no bits set they always UNDEF.
4854 */
4855static const uint8_t neon_2rm_sizes[] = {
4856 [NEON_2RM_VREV64] = 0x7,
4857 [NEON_2RM_VREV32] = 0x3,
4858 [NEON_2RM_VREV16] = 0x1,
4859 [NEON_2RM_VPADDL] = 0x7,
4860 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
4861 [NEON_2RM_AESE] = 0x1,
4862 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
4863 [NEON_2RM_VCLS] = 0x7,
4864 [NEON_2RM_VCLZ] = 0x7,
4865 [NEON_2RM_VCNT] = 0x1,
4866 [NEON_2RM_VMVN] = 0x1,
4867 [NEON_2RM_VPADAL] = 0x7,
4868 [NEON_2RM_VPADAL_U] = 0x7,
4869 [NEON_2RM_VQABS] = 0x7,
4870 [NEON_2RM_VQNEG] = 0x7,
4871 [NEON_2RM_VCGT0] = 0x7,
4872 [NEON_2RM_VCGE0] = 0x7,
4873 [NEON_2RM_VCEQ0] = 0x7,
4874 [NEON_2RM_VCLE0] = 0x7,
4875 [NEON_2RM_VCLT0] = 0x7,
4876 [NEON_2RM_VABS] = 0x7,
4877 [NEON_2RM_VNEG] = 0x7,
4878 [NEON_2RM_VCGT0_F] = 0x4,
4879 [NEON_2RM_VCGE0_F] = 0x4,
4880 [NEON_2RM_VCEQ0_F] = 0x4,
4881 [NEON_2RM_VCLE0_F] = 0x4,
4882 [NEON_2RM_VCLT0_F] = 0x4,
4883 [NEON_2RM_VABS_F] = 0x4,
4884 [NEON_2RM_VNEG_F] = 0x4,
4885 [NEON_2RM_VSWP] = 0x1,
4886 [NEON_2RM_VTRN] = 0x7,
4887 [NEON_2RM_VUZP] = 0x7,
4888 [NEON_2RM_VZIP] = 0x7,
4889 [NEON_2RM_VMOVN] = 0x7,
4890 [NEON_2RM_VQMOVN] = 0x7,
4891 [NEON_2RM_VSHLL] = 0x7,
34f7b0a2 4892 [NEON_2RM_VRINTN] = 0x4,
2ce70625 4893 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
4894 [NEON_2RM_VRINTA] = 0x4,
4895 [NEON_2RM_VRINTZ] = 0x4,
600b828c 4896 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 4897 [NEON_2RM_VRINTM] = 0x4,
600b828c 4898 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 4899 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
4900 [NEON_2RM_VCVTAU] = 0x4,
4901 [NEON_2RM_VCVTAS] = 0x4,
4902 [NEON_2RM_VCVTNU] = 0x4,
4903 [NEON_2RM_VCVTNS] = 0x4,
4904 [NEON_2RM_VCVTPU] = 0x4,
4905 [NEON_2RM_VCVTPS] = 0x4,
4906 [NEON_2RM_VCVTMU] = 0x4,
4907 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
4908 [NEON_2RM_VRECPE] = 0x4,
4909 [NEON_2RM_VRSQRTE] = 0x4,
4910 [NEON_2RM_VRECPE_F] = 0x4,
4911 [NEON_2RM_VRSQRTE_F] = 0x4,
4912 [NEON_2RM_VCVT_FS] = 0x4,
4913 [NEON_2RM_VCVT_FU] = 0x4,
4914 [NEON_2RM_VCVT_SF] = 0x4,
4915 [NEON_2RM_VCVT_UF] = 0x4,
4916};
4917
9ee6e8bb
PB
4918/* Translate a NEON data processing instruction. Return nonzero if the
4919 instruction is invalid.
ad69471c
PB
4920 We process data in a mixture of 32-bit and 64-bit chunks.
4921 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4922
0ecb72a5 4923static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4924{
4925 int op;
4926 int q;
4927 int rd, rn, rm;
4928 int size;
4929 int shift;
4930 int pass;
4931 int count;
4932 int pairwise;
4933 int u;
ca9a32e4 4934 uint32_t imm, mask;
39d5492a 4935 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4936 TCGv_i64 tmp64;
9ee6e8bb 4937
5df8bac1 4938 if (!s->vfp_enabled)
9ee6e8bb
PB
4939 return 1;
4940 q = (insn & (1 << 6)) != 0;
4941 u = (insn >> 24) & 1;
4942 VFP_DREG_D(rd, insn);
4943 VFP_DREG_N(rn, insn);
4944 VFP_DREG_M(rm, insn);
4945 size = (insn >> 20) & 3;
4946 if ((insn & (1 << 23)) == 0) {
4947 /* Three register same length. */
4948 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4949 /* Catch invalid op and bad size combinations: UNDEF */
4950 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4951 return 1;
4952 }
25f84f79
PM
4953 /* All insns of this form UNDEF for either this condition or the
4954 * superset of cases "Q==1"; we catch the latter later.
4955 */
4956 if (q && ((rd | rn | rm) & 1)) {
4957 return 1;
4958 }
62698be3
PM
4959 if (size == 3 && op != NEON_3R_LOGIC) {
4960 /* 64-bit element instructions. */
9ee6e8bb 4961 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4962 neon_load_reg64(cpu_V0, rn + pass);
4963 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4964 switch (op) {
62698be3 4965 case NEON_3R_VQADD:
9ee6e8bb 4966 if (u) {
02da0b2d
PM
4967 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4968 cpu_V0, cpu_V1);
2c0262af 4969 } else {
02da0b2d
PM
4970 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4971 cpu_V0, cpu_V1);
2c0262af 4972 }
9ee6e8bb 4973 break;
62698be3 4974 case NEON_3R_VQSUB:
9ee6e8bb 4975 if (u) {
02da0b2d
PM
4976 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4977 cpu_V0, cpu_V1);
ad69471c 4978 } else {
02da0b2d
PM
4979 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4980 cpu_V0, cpu_V1);
ad69471c
PB
4981 }
4982 break;
62698be3 4983 case NEON_3R_VSHL:
ad69471c
PB
4984 if (u) {
4985 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4986 } else {
4987 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4988 }
4989 break;
62698be3 4990 case NEON_3R_VQSHL:
ad69471c 4991 if (u) {
02da0b2d
PM
4992 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4993 cpu_V1, cpu_V0);
ad69471c 4994 } else {
02da0b2d
PM
4995 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4996 cpu_V1, cpu_V0);
ad69471c
PB
4997 }
4998 break;
62698be3 4999 case NEON_3R_VRSHL:
ad69471c
PB
5000 if (u) {
5001 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5002 } else {
ad69471c
PB
5003 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5004 }
5005 break;
62698be3 5006 case NEON_3R_VQRSHL:
ad69471c 5007 if (u) {
02da0b2d
PM
5008 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5009 cpu_V1, cpu_V0);
ad69471c 5010 } else {
02da0b2d
PM
5011 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5012 cpu_V1, cpu_V0);
1e8d4eec 5013 }
9ee6e8bb 5014 break;
62698be3 5015 case NEON_3R_VADD_VSUB:
9ee6e8bb 5016 if (u) {
ad69471c 5017 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5018 } else {
ad69471c 5019 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5020 }
5021 break;
5022 default:
5023 abort();
2c0262af 5024 }
ad69471c 5025 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5026 }
9ee6e8bb 5027 return 0;
2c0262af 5028 }
25f84f79 5029 pairwise = 0;
9ee6e8bb 5030 switch (op) {
62698be3
PM
5031 case NEON_3R_VSHL:
5032 case NEON_3R_VQSHL:
5033 case NEON_3R_VRSHL:
5034 case NEON_3R_VQRSHL:
9ee6e8bb 5035 {
ad69471c
PB
5036 int rtmp;
5037 /* Shift instruction operands are reversed. */
5038 rtmp = rn;
9ee6e8bb 5039 rn = rm;
ad69471c 5040 rm = rtmp;
9ee6e8bb 5041 }
2c0262af 5042 break;
25f84f79
PM
5043 case NEON_3R_VPADD:
5044 if (u) {
5045 return 1;
5046 }
5047 /* Fall through */
62698be3
PM
5048 case NEON_3R_VPMAX:
5049 case NEON_3R_VPMIN:
9ee6e8bb 5050 pairwise = 1;
2c0262af 5051 break;
25f84f79
PM
5052 case NEON_3R_FLOAT_ARITH:
5053 pairwise = (u && size < 2); /* if VPADD (float) */
5054 break;
5055 case NEON_3R_FLOAT_MINMAX:
5056 pairwise = u; /* if VPMIN/VPMAX (float) */
5057 break;
5058 case NEON_3R_FLOAT_CMP:
5059 if (!u && size) {
5060 /* no encoding for U=0 C=1x */
5061 return 1;
5062 }
5063 break;
5064 case NEON_3R_FLOAT_ACMP:
5065 if (!u) {
5066 return 1;
5067 }
5068 break;
505935fc
WN
5069 case NEON_3R_FLOAT_MISC:
5070 /* VMAXNM/VMINNM in ARMv8 */
5071 if (u && !arm_feature(env, ARM_FEATURE_V8)) {
25f84f79
PM
5072 return 1;
5073 }
2c0262af 5074 break;
25f84f79
PM
5075 case NEON_3R_VMUL:
5076 if (u && (size != 0)) {
5077 /* UNDEF on invalid size for polynomial subcase */
5078 return 1;
5079 }
2c0262af 5080 break;
da97f52c
PM
5081 case NEON_3R_VFM:
5082 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
5083 return 1;
5084 }
5085 break;
9ee6e8bb 5086 default:
2c0262af 5087 break;
9ee6e8bb 5088 }
dd8fbd78 5089
25f84f79
PM
5090 if (pairwise && q) {
5091 /* All the pairwise insns UNDEF if Q is set */
5092 return 1;
5093 }
5094
9ee6e8bb
PB
5095 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5096
5097 if (pairwise) {
5098 /* Pairwise. */
a5a14945
JR
5099 if (pass < 1) {
5100 tmp = neon_load_reg(rn, 0);
5101 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5102 } else {
a5a14945
JR
5103 tmp = neon_load_reg(rm, 0);
5104 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5105 }
5106 } else {
5107 /* Elementwise. */
dd8fbd78
FN
5108 tmp = neon_load_reg(rn, pass);
5109 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5110 }
5111 switch (op) {
62698be3 5112 case NEON_3R_VHADD:
9ee6e8bb
PB
5113 GEN_NEON_INTEGER_OP(hadd);
5114 break;
62698be3 5115 case NEON_3R_VQADD:
02da0b2d 5116 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5117 break;
62698be3 5118 case NEON_3R_VRHADD:
9ee6e8bb 5119 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5120 break;
62698be3 5121 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5122 switch ((u << 2) | size) {
5123 case 0: /* VAND */
dd8fbd78 5124 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5125 break;
5126 case 1: /* BIC */
f669df27 5127 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5128 break;
5129 case 2: /* VORR */
dd8fbd78 5130 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5131 break;
5132 case 3: /* VORN */
f669df27 5133 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5134 break;
5135 case 4: /* VEOR */
dd8fbd78 5136 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5137 break;
5138 case 5: /* VBSL */
dd8fbd78
FN
5139 tmp3 = neon_load_reg(rd, pass);
5140 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5141 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5142 break;
5143 case 6: /* VBIT */
dd8fbd78
FN
5144 tmp3 = neon_load_reg(rd, pass);
5145 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5146 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5147 break;
5148 case 7: /* VBIF */
dd8fbd78
FN
5149 tmp3 = neon_load_reg(rd, pass);
5150 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5151 tcg_temp_free_i32(tmp3);
9ee6e8bb 5152 break;
2c0262af
FB
5153 }
5154 break;
62698be3 5155 case NEON_3R_VHSUB:
9ee6e8bb
PB
5156 GEN_NEON_INTEGER_OP(hsub);
5157 break;
62698be3 5158 case NEON_3R_VQSUB:
02da0b2d 5159 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5160 break;
62698be3 5161 case NEON_3R_VCGT:
9ee6e8bb
PB
5162 GEN_NEON_INTEGER_OP(cgt);
5163 break;
62698be3 5164 case NEON_3R_VCGE:
9ee6e8bb
PB
5165 GEN_NEON_INTEGER_OP(cge);
5166 break;
62698be3 5167 case NEON_3R_VSHL:
ad69471c 5168 GEN_NEON_INTEGER_OP(shl);
2c0262af 5169 break;
62698be3 5170 case NEON_3R_VQSHL:
02da0b2d 5171 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5172 break;
62698be3 5173 case NEON_3R_VRSHL:
ad69471c 5174 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5175 break;
62698be3 5176 case NEON_3R_VQRSHL:
02da0b2d 5177 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5178 break;
62698be3 5179 case NEON_3R_VMAX:
9ee6e8bb
PB
5180 GEN_NEON_INTEGER_OP(max);
5181 break;
62698be3 5182 case NEON_3R_VMIN:
9ee6e8bb
PB
5183 GEN_NEON_INTEGER_OP(min);
5184 break;
62698be3 5185 case NEON_3R_VABD:
9ee6e8bb
PB
5186 GEN_NEON_INTEGER_OP(abd);
5187 break;
62698be3 5188 case NEON_3R_VABA:
9ee6e8bb 5189 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5190 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5191 tmp2 = neon_load_reg(rd, pass);
5192 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5193 break;
62698be3 5194 case NEON_3R_VADD_VSUB:
9ee6e8bb 5195 if (!u) { /* VADD */
62698be3 5196 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5197 } else { /* VSUB */
5198 switch (size) {
dd8fbd78
FN
5199 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5200 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5201 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 5202 default: abort();
9ee6e8bb
PB
5203 }
5204 }
5205 break;
62698be3 5206 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
5207 if (!u) { /* VTST */
5208 switch (size) {
dd8fbd78
FN
5209 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5210 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5211 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 5212 default: abort();
9ee6e8bb
PB
5213 }
5214 } else { /* VCEQ */
5215 switch (size) {
dd8fbd78
FN
5216 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5217 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5218 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 5219 default: abort();
9ee6e8bb
PB
5220 }
5221 }
5222 break;
62698be3 5223 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 5224 switch (size) {
dd8fbd78
FN
5225 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5226 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5227 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5228 default: abort();
9ee6e8bb 5229 }
7d1b0095 5230 tcg_temp_free_i32(tmp2);
dd8fbd78 5231 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5232 if (u) { /* VMLS */
dd8fbd78 5233 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 5234 } else { /* VMLA */
dd8fbd78 5235 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5236 }
5237 break;
62698be3 5238 case NEON_3R_VMUL:
9ee6e8bb 5239 if (u) { /* polynomial */
dd8fbd78 5240 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
5241 } else { /* Integer */
5242 switch (size) {
dd8fbd78
FN
5243 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5244 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5245 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5246 default: abort();
9ee6e8bb
PB
5247 }
5248 }
5249 break;
62698be3 5250 case NEON_3R_VPMAX:
9ee6e8bb
PB
5251 GEN_NEON_INTEGER_OP(pmax);
5252 break;
62698be3 5253 case NEON_3R_VPMIN:
9ee6e8bb
PB
5254 GEN_NEON_INTEGER_OP(pmin);
5255 break;
62698be3 5256 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5257 if (!u) { /* VQDMULH */
5258 switch (size) {
02da0b2d
PM
5259 case 1:
5260 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5261 break;
5262 case 2:
5263 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5264 break;
62698be3 5265 default: abort();
9ee6e8bb 5266 }
62698be3 5267 } else { /* VQRDMULH */
9ee6e8bb 5268 switch (size) {
02da0b2d
PM
5269 case 1:
5270 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5271 break;
5272 case 2:
5273 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5274 break;
62698be3 5275 default: abort();
9ee6e8bb
PB
5276 }
5277 }
5278 break;
62698be3 5279 case NEON_3R_VPADD:
9ee6e8bb 5280 switch (size) {
dd8fbd78
FN
5281 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5282 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5283 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5284 default: abort();
9ee6e8bb
PB
5285 }
5286 break;
62698be3 5287 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5288 {
5289 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5290 switch ((u << 2) | size) {
5291 case 0: /* VADD */
aa47cfdd
PM
5292 case 4: /* VPADD */
5293 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5294 break;
5295 case 2: /* VSUB */
aa47cfdd 5296 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5297 break;
5298 case 6: /* VABD */
aa47cfdd 5299 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5300 break;
5301 default:
62698be3 5302 abort();
9ee6e8bb 5303 }
aa47cfdd 5304 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5305 break;
aa47cfdd 5306 }
62698be3 5307 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5308 {
5309 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5310 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5311 if (!u) {
7d1b0095 5312 tcg_temp_free_i32(tmp2);
dd8fbd78 5313 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5314 if (size == 0) {
aa47cfdd 5315 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5316 } else {
aa47cfdd 5317 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5318 }
5319 }
aa47cfdd 5320 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5321 break;
aa47cfdd 5322 }
62698be3 5323 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5324 {
5325 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5326 if (!u) {
aa47cfdd 5327 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5328 } else {
aa47cfdd
PM
5329 if (size == 0) {
5330 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5331 } else {
5332 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5333 }
b5ff1b31 5334 }
aa47cfdd 5335 tcg_temp_free_ptr(fpstatus);
2c0262af 5336 break;
aa47cfdd 5337 }
62698be3 5338 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
5339 {
5340 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5341 if (size == 0) {
5342 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5343 } else {
5344 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5345 }
5346 tcg_temp_free_ptr(fpstatus);
2c0262af 5347 break;
aa47cfdd 5348 }
62698be3 5349 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
5350 {
5351 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5352 if (size == 0) {
f71a2ae5 5353 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 5354 } else {
f71a2ae5 5355 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
5356 }
5357 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5358 break;
aa47cfdd 5359 }
505935fc
WN
5360 case NEON_3R_FLOAT_MISC:
5361 if (u) {
5362 /* VMAXNM/VMINNM */
5363 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5364 if (size == 0) {
f71a2ae5 5365 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 5366 } else {
f71a2ae5 5367 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
5368 }
5369 tcg_temp_free_ptr(fpstatus);
5370 } else {
5371 if (size == 0) {
5372 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5373 } else {
5374 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5375 }
5376 }
2c0262af 5377 break;
da97f52c
PM
5378 case NEON_3R_VFM:
5379 {
5380 /* VFMA, VFMS: fused multiply-add */
5381 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5382 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5383 if (size) {
5384 /* VFMS */
5385 gen_helper_vfp_negs(tmp, tmp);
5386 }
5387 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5388 tcg_temp_free_i32(tmp3);
5389 tcg_temp_free_ptr(fpstatus);
5390 break;
5391 }
9ee6e8bb
PB
5392 default:
5393 abort();
2c0262af 5394 }
7d1b0095 5395 tcg_temp_free_i32(tmp2);
dd8fbd78 5396
9ee6e8bb
PB
5397 /* Save the result. For elementwise operations we can put it
5398 straight into the destination register. For pairwise operations
5399 we have to be careful to avoid clobbering the source operands. */
5400 if (pairwise && rd == rm) {
dd8fbd78 5401 neon_store_scratch(pass, tmp);
9ee6e8bb 5402 } else {
dd8fbd78 5403 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5404 }
5405
5406 } /* for pass */
5407 if (pairwise && rd == rm) {
5408 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5409 tmp = neon_load_scratch(pass);
5410 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5411 }
5412 }
ad69471c 5413 /* End of 3 register same size operations. */
9ee6e8bb
PB
5414 } else if (insn & (1 << 4)) {
5415 if ((insn & 0x00380080) != 0) {
5416 /* Two registers and shift. */
5417 op = (insn >> 8) & 0xf;
5418 if (insn & (1 << 7)) {
cc13115b
PM
5419 /* 64-bit shift. */
5420 if (op > 7) {
5421 return 1;
5422 }
9ee6e8bb
PB
5423 size = 3;
5424 } else {
5425 size = 2;
5426 while ((insn & (1 << (size + 19))) == 0)
5427 size--;
5428 }
5429 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 5430 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
5431 by immediate using the variable shift operations. */
5432 if (op < 8) {
5433 /* Shift by immediate:
5434 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5435 if (q && ((rd | rm) & 1)) {
5436 return 1;
5437 }
5438 if (!u && (op == 4 || op == 6)) {
5439 return 1;
5440 }
9ee6e8bb
PB
5441 /* Right shifts are encoded as N - shift, where N is the
5442 element size in bits. */
5443 if (op <= 4)
5444 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
5445 if (size == 3) {
5446 count = q + 1;
5447 } else {
5448 count = q ? 4: 2;
5449 }
5450 switch (size) {
5451 case 0:
5452 imm = (uint8_t) shift;
5453 imm |= imm << 8;
5454 imm |= imm << 16;
5455 break;
5456 case 1:
5457 imm = (uint16_t) shift;
5458 imm |= imm << 16;
5459 break;
5460 case 2:
5461 case 3:
5462 imm = shift;
5463 break;
5464 default:
5465 abort();
5466 }
5467
5468 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5469 if (size == 3) {
5470 neon_load_reg64(cpu_V0, rm + pass);
5471 tcg_gen_movi_i64(cpu_V1, imm);
5472 switch (op) {
5473 case 0: /* VSHR */
5474 case 1: /* VSRA */
5475 if (u)
5476 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5477 else
ad69471c 5478 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5479 break;
ad69471c
PB
5480 case 2: /* VRSHR */
5481 case 3: /* VRSRA */
5482 if (u)
5483 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5484 else
ad69471c 5485 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5486 break;
ad69471c 5487 case 4: /* VSRI */
ad69471c
PB
5488 case 5: /* VSHL, VSLI */
5489 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5490 break;
0322b26e 5491 case 6: /* VQSHLU */
02da0b2d
PM
5492 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5493 cpu_V0, cpu_V1);
ad69471c 5494 break;
0322b26e
PM
5495 case 7: /* VQSHL */
5496 if (u) {
02da0b2d 5497 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5498 cpu_V0, cpu_V1);
5499 } else {
02da0b2d 5500 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5501 cpu_V0, cpu_V1);
5502 }
9ee6e8bb 5503 break;
9ee6e8bb 5504 }
ad69471c
PB
5505 if (op == 1 || op == 3) {
5506 /* Accumulate. */
5371cb81 5507 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5508 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5509 } else if (op == 4 || (op == 5 && u)) {
5510 /* Insert */
923e6509
CL
5511 neon_load_reg64(cpu_V1, rd + pass);
5512 uint64_t mask;
5513 if (shift < -63 || shift > 63) {
5514 mask = 0;
5515 } else {
5516 if (op == 4) {
5517 mask = 0xffffffffffffffffull >> -shift;
5518 } else {
5519 mask = 0xffffffffffffffffull << shift;
5520 }
5521 }
5522 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5523 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5524 }
5525 neon_store_reg64(cpu_V0, rd + pass);
5526 } else { /* size < 3 */
5527 /* Operands in T0 and T1. */
dd8fbd78 5528 tmp = neon_load_reg(rm, pass);
7d1b0095 5529 tmp2 = tcg_temp_new_i32();
dd8fbd78 5530 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5531 switch (op) {
5532 case 0: /* VSHR */
5533 case 1: /* VSRA */
5534 GEN_NEON_INTEGER_OP(shl);
5535 break;
5536 case 2: /* VRSHR */
5537 case 3: /* VRSRA */
5538 GEN_NEON_INTEGER_OP(rshl);
5539 break;
5540 case 4: /* VSRI */
ad69471c
PB
5541 case 5: /* VSHL, VSLI */
5542 switch (size) {
dd8fbd78
FN
5543 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5544 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5545 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5546 default: abort();
ad69471c
PB
5547 }
5548 break;
0322b26e 5549 case 6: /* VQSHLU */
ad69471c 5550 switch (size) {
0322b26e 5551 case 0:
02da0b2d
PM
5552 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5553 tmp, tmp2);
0322b26e
PM
5554 break;
5555 case 1:
02da0b2d
PM
5556 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5557 tmp, tmp2);
0322b26e
PM
5558 break;
5559 case 2:
02da0b2d
PM
5560 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5561 tmp, tmp2);
0322b26e
PM
5562 break;
5563 default:
cc13115b 5564 abort();
ad69471c
PB
5565 }
5566 break;
0322b26e 5567 case 7: /* VQSHL */
02da0b2d 5568 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5569 break;
ad69471c 5570 }
7d1b0095 5571 tcg_temp_free_i32(tmp2);
ad69471c
PB
5572
5573 if (op == 1 || op == 3) {
5574 /* Accumulate. */
dd8fbd78 5575 tmp2 = neon_load_reg(rd, pass);
5371cb81 5576 gen_neon_add(size, tmp, tmp2);
7d1b0095 5577 tcg_temp_free_i32(tmp2);
ad69471c
PB
5578 } else if (op == 4 || (op == 5 && u)) {
5579 /* Insert */
5580 switch (size) {
5581 case 0:
5582 if (op == 4)
ca9a32e4 5583 mask = 0xff >> -shift;
ad69471c 5584 else
ca9a32e4
JR
5585 mask = (uint8_t)(0xff << shift);
5586 mask |= mask << 8;
5587 mask |= mask << 16;
ad69471c
PB
5588 break;
5589 case 1:
5590 if (op == 4)
ca9a32e4 5591 mask = 0xffff >> -shift;
ad69471c 5592 else
ca9a32e4
JR
5593 mask = (uint16_t)(0xffff << shift);
5594 mask |= mask << 16;
ad69471c
PB
5595 break;
5596 case 2:
ca9a32e4
JR
5597 if (shift < -31 || shift > 31) {
5598 mask = 0;
5599 } else {
5600 if (op == 4)
5601 mask = 0xffffffffu >> -shift;
5602 else
5603 mask = 0xffffffffu << shift;
5604 }
ad69471c
PB
5605 break;
5606 default:
5607 abort();
5608 }
dd8fbd78 5609 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5610 tcg_gen_andi_i32(tmp, tmp, mask);
5611 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5612 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5613 tcg_temp_free_i32(tmp2);
ad69471c 5614 }
dd8fbd78 5615 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5616 }
5617 } /* for pass */
5618 } else if (op < 10) {
ad69471c 5619 /* Shift by immediate and narrow:
9ee6e8bb 5620 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5621 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5622 if (rm & 1) {
5623 return 1;
5624 }
9ee6e8bb
PB
5625 shift = shift - (1 << (size + 3));
5626 size++;
92cdfaeb 5627 if (size == 3) {
a7812ae4 5628 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5629 neon_load_reg64(cpu_V0, rm);
5630 neon_load_reg64(cpu_V1, rm + 1);
5631 for (pass = 0; pass < 2; pass++) {
5632 TCGv_i64 in;
5633 if (pass == 0) {
5634 in = cpu_V0;
5635 } else {
5636 in = cpu_V1;
5637 }
ad69471c 5638 if (q) {
0b36f4cd 5639 if (input_unsigned) {
92cdfaeb 5640 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5641 } else {
92cdfaeb 5642 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5643 }
ad69471c 5644 } else {
0b36f4cd 5645 if (input_unsigned) {
92cdfaeb 5646 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5647 } else {
92cdfaeb 5648 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5649 }
ad69471c 5650 }
7d1b0095 5651 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5652 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5653 neon_store_reg(rd, pass, tmp);
5654 } /* for pass */
5655 tcg_temp_free_i64(tmp64);
5656 } else {
5657 if (size == 1) {
5658 imm = (uint16_t)shift;
5659 imm |= imm << 16;
2c0262af 5660 } else {
92cdfaeb
PM
5661 /* size == 2 */
5662 imm = (uint32_t)shift;
5663 }
5664 tmp2 = tcg_const_i32(imm);
5665 tmp4 = neon_load_reg(rm + 1, 0);
5666 tmp5 = neon_load_reg(rm + 1, 1);
5667 for (pass = 0; pass < 2; pass++) {
5668 if (pass == 0) {
5669 tmp = neon_load_reg(rm, 0);
5670 } else {
5671 tmp = tmp4;
5672 }
0b36f4cd
CL
5673 gen_neon_shift_narrow(size, tmp, tmp2, q,
5674 input_unsigned);
92cdfaeb
PM
5675 if (pass == 0) {
5676 tmp3 = neon_load_reg(rm, 1);
5677 } else {
5678 tmp3 = tmp5;
5679 }
0b36f4cd
CL
5680 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5681 input_unsigned);
36aa55dc 5682 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5683 tcg_temp_free_i32(tmp);
5684 tcg_temp_free_i32(tmp3);
5685 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5686 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5687 neon_store_reg(rd, pass, tmp);
5688 } /* for pass */
c6067f04 5689 tcg_temp_free_i32(tmp2);
b75263d6 5690 }
9ee6e8bb 5691 } else if (op == 10) {
cc13115b
PM
5692 /* VSHLL, VMOVL */
5693 if (q || (rd & 1)) {
9ee6e8bb 5694 return 1;
cc13115b 5695 }
ad69471c
PB
5696 tmp = neon_load_reg(rm, 0);
5697 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5698 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5699 if (pass == 1)
5700 tmp = tmp2;
5701
5702 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5703
9ee6e8bb
PB
5704 if (shift != 0) {
5705 /* The shift is less than the width of the source
ad69471c
PB
5706 type, so we can just shift the whole register. */
5707 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5708 /* Widen the result of shift: we need to clear
5709 * the potential overflow bits resulting from
5710 * left bits of the narrow input appearing as
5711 * right bits of left the neighbour narrow
5712 * input. */
ad69471c
PB
5713 if (size < 2 || !u) {
5714 uint64_t imm64;
5715 if (size == 0) {
5716 imm = (0xffu >> (8 - shift));
5717 imm |= imm << 16;
acdf01ef 5718 } else if (size == 1) {
ad69471c 5719 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5720 } else {
5721 /* size == 2 */
5722 imm = 0xffffffff >> (32 - shift);
5723 }
5724 if (size < 2) {
5725 imm64 = imm | (((uint64_t)imm) << 32);
5726 } else {
5727 imm64 = imm;
9ee6e8bb 5728 }
acdf01ef 5729 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5730 }
5731 }
ad69471c 5732 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5733 }
f73534a5 5734 } else if (op >= 14) {
9ee6e8bb 5735 /* VCVT fixed-point. */
cc13115b
PM
5736 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5737 return 1;
5738 }
f73534a5
PM
5739 /* We have already masked out the must-be-1 top bit of imm6,
5740 * hence this 32-shift where the ARM ARM has 64-imm6.
5741 */
5742 shift = 32 - shift;
9ee6e8bb 5743 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5744 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5745 if (!(op & 1)) {
9ee6e8bb 5746 if (u)
5500b06c 5747 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5748 else
5500b06c 5749 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5750 } else {
5751 if (u)
5500b06c 5752 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5753 else
5500b06c 5754 gen_vfp_tosl(0, shift, 1);
2c0262af 5755 }
4373f3ce 5756 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5757 }
5758 } else {
9ee6e8bb
PB
5759 return 1;
5760 }
5761 } else { /* (insn & 0x00380080) == 0 */
5762 int invert;
7d80fee5
PM
5763 if (q && (rd & 1)) {
5764 return 1;
5765 }
9ee6e8bb
PB
5766
5767 op = (insn >> 8) & 0xf;
5768 /* One register and immediate. */
5769 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5770 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5771 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5772 * We choose to not special-case this and will behave as if a
5773 * valid constant encoding of 0 had been given.
5774 */
9ee6e8bb
PB
5775 switch (op) {
5776 case 0: case 1:
5777 /* no-op */
5778 break;
5779 case 2: case 3:
5780 imm <<= 8;
5781 break;
5782 case 4: case 5:
5783 imm <<= 16;
5784 break;
5785 case 6: case 7:
5786 imm <<= 24;
5787 break;
5788 case 8: case 9:
5789 imm |= imm << 16;
5790 break;
5791 case 10: case 11:
5792 imm = (imm << 8) | (imm << 24);
5793 break;
5794 case 12:
8e31209e 5795 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5796 break;
5797 case 13:
5798 imm = (imm << 16) | 0xffff;
5799 break;
5800 case 14:
5801 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5802 if (invert)
5803 imm = ~imm;
5804 break;
5805 case 15:
7d80fee5
PM
5806 if (invert) {
5807 return 1;
5808 }
9ee6e8bb
PB
5809 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5810 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5811 break;
5812 }
5813 if (invert)
5814 imm = ~imm;
5815
9ee6e8bb
PB
5816 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5817 if (op & 1 && op < 12) {
ad69471c 5818 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5819 if (invert) {
5820 /* The immediate value has already been inverted, so
5821 BIC becomes AND. */
ad69471c 5822 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5823 } else {
ad69471c 5824 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5825 }
9ee6e8bb 5826 } else {
ad69471c 5827 /* VMOV, VMVN. */
7d1b0095 5828 tmp = tcg_temp_new_i32();
9ee6e8bb 5829 if (op == 14 && invert) {
a5a14945 5830 int n;
ad69471c
PB
5831 uint32_t val;
5832 val = 0;
9ee6e8bb
PB
5833 for (n = 0; n < 4; n++) {
5834 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5835 val |= 0xff << (n * 8);
9ee6e8bb 5836 }
ad69471c
PB
5837 tcg_gen_movi_i32(tmp, val);
5838 } else {
5839 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5840 }
9ee6e8bb 5841 }
ad69471c 5842 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5843 }
5844 }
e4b3861d 5845 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5846 if (size != 3) {
5847 op = (insn >> 8) & 0xf;
5848 if ((insn & (1 << 6)) == 0) {
5849 /* Three registers of different lengths. */
5850 int src1_wide;
5851 int src2_wide;
5852 int prewiden;
695272dc
PM
5853 /* undefreq: bit 0 : UNDEF if size != 0
5854 * bit 1 : UNDEF if size == 0
5855 * bit 2 : UNDEF if U == 1
5856 * Note that [1:0] set implies 'always UNDEF'
5857 */
5858 int undefreq;
5859 /* prewiden, src1_wide, src2_wide, undefreq */
5860 static const int neon_3reg_wide[16][4] = {
5861 {1, 0, 0, 0}, /* VADDL */
5862 {1, 1, 0, 0}, /* VADDW */
5863 {1, 0, 0, 0}, /* VSUBL */
5864 {1, 1, 0, 0}, /* VSUBW */
5865 {0, 1, 1, 0}, /* VADDHN */
5866 {0, 0, 0, 0}, /* VABAL */
5867 {0, 1, 1, 0}, /* VSUBHN */
5868 {0, 0, 0, 0}, /* VABDL */
5869 {0, 0, 0, 0}, /* VMLAL */
5870 {0, 0, 0, 6}, /* VQDMLAL */
5871 {0, 0, 0, 0}, /* VMLSL */
5872 {0, 0, 0, 6}, /* VQDMLSL */
5873 {0, 0, 0, 0}, /* Integer VMULL */
5874 {0, 0, 0, 2}, /* VQDMULL */
5875 {0, 0, 0, 5}, /* Polynomial VMULL */
5876 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5877 };
5878
5879 prewiden = neon_3reg_wide[op][0];
5880 src1_wide = neon_3reg_wide[op][1];
5881 src2_wide = neon_3reg_wide[op][2];
695272dc 5882 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5883
695272dc
PM
5884 if (((undefreq & 1) && (size != 0)) ||
5885 ((undefreq & 2) && (size == 0)) ||
5886 ((undefreq & 4) && u)) {
5887 return 1;
5888 }
5889 if ((src1_wide && (rn & 1)) ||
5890 (src2_wide && (rm & 1)) ||
5891 (!src2_wide && (rd & 1))) {
ad69471c 5892 return 1;
695272dc 5893 }
ad69471c 5894
9ee6e8bb
PB
5895 /* Avoid overlapping operands. Wide source operands are
5896 always aligned so will never overlap with wide
5897 destinations in problematic ways. */
8f8e3aa4 5898 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5899 tmp = neon_load_reg(rm, 1);
5900 neon_store_scratch(2, tmp);
8f8e3aa4 5901 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5902 tmp = neon_load_reg(rn, 1);
5903 neon_store_scratch(2, tmp);
9ee6e8bb 5904 }
39d5492a 5905 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 5906 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5907 if (src1_wide) {
5908 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 5909 TCGV_UNUSED_I32(tmp);
9ee6e8bb 5910 } else {
ad69471c 5911 if (pass == 1 && rd == rn) {
dd8fbd78 5912 tmp = neon_load_scratch(2);
9ee6e8bb 5913 } else {
ad69471c
PB
5914 tmp = neon_load_reg(rn, pass);
5915 }
5916 if (prewiden) {
5917 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5918 }
5919 }
ad69471c
PB
5920 if (src2_wide) {
5921 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 5922 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 5923 } else {
ad69471c 5924 if (pass == 1 && rd == rm) {
dd8fbd78 5925 tmp2 = neon_load_scratch(2);
9ee6e8bb 5926 } else {
ad69471c
PB
5927 tmp2 = neon_load_reg(rm, pass);
5928 }
5929 if (prewiden) {
5930 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5931 }
9ee6e8bb
PB
5932 }
5933 switch (op) {
5934 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5935 gen_neon_addl(size);
9ee6e8bb 5936 break;
79b0e534 5937 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5938 gen_neon_subl(size);
9ee6e8bb
PB
5939 break;
5940 case 5: case 7: /* VABAL, VABDL */
5941 switch ((size << 1) | u) {
ad69471c
PB
5942 case 0:
5943 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5944 break;
5945 case 1:
5946 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5947 break;
5948 case 2:
5949 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5950 break;
5951 case 3:
5952 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5953 break;
5954 case 4:
5955 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5956 break;
5957 case 5:
5958 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5959 break;
9ee6e8bb
PB
5960 default: abort();
5961 }
7d1b0095
PM
5962 tcg_temp_free_i32(tmp2);
5963 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5964 break;
5965 case 8: case 9: case 10: case 11: case 12: case 13:
5966 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5967 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5968 break;
5969 case 14: /* Polynomial VMULL */
e5ca24cb 5970 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5971 tcg_temp_free_i32(tmp2);
5972 tcg_temp_free_i32(tmp);
e5ca24cb 5973 break;
695272dc
PM
5974 default: /* 15 is RESERVED: caught earlier */
5975 abort();
9ee6e8bb 5976 }
ebcd88ce
PM
5977 if (op == 13) {
5978 /* VQDMULL */
5979 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5980 neon_store_reg64(cpu_V0, rd + pass);
5981 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5982 /* Accumulate. */
ebcd88ce 5983 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5984 switch (op) {
4dc064e6
PM
5985 case 10: /* VMLSL */
5986 gen_neon_negl(cpu_V0, size);
5987 /* Fall through */
5988 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5989 gen_neon_addl(size);
9ee6e8bb
PB
5990 break;
5991 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5992 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5993 if (op == 11) {
5994 gen_neon_negl(cpu_V0, size);
5995 }
ad69471c
PB
5996 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5997 break;
9ee6e8bb
PB
5998 default:
5999 abort();
6000 }
ad69471c 6001 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6002 } else if (op == 4 || op == 6) {
6003 /* Narrowing operation. */
7d1b0095 6004 tmp = tcg_temp_new_i32();
79b0e534 6005 if (!u) {
9ee6e8bb 6006 switch (size) {
ad69471c
PB
6007 case 0:
6008 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6009 break;
6010 case 1:
6011 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6012 break;
6013 case 2:
6014 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6015 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
6016 break;
9ee6e8bb
PB
6017 default: abort();
6018 }
6019 } else {
6020 switch (size) {
ad69471c
PB
6021 case 0:
6022 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6023 break;
6024 case 1:
6025 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6026 break;
6027 case 2:
6028 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6029 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6030 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
6031 break;
9ee6e8bb
PB
6032 default: abort();
6033 }
6034 }
ad69471c
PB
6035 if (pass == 0) {
6036 tmp3 = tmp;
6037 } else {
6038 neon_store_reg(rd, 0, tmp3);
6039 neon_store_reg(rd, 1, tmp);
6040 }
9ee6e8bb
PB
6041 } else {
6042 /* Write back the result. */
ad69471c 6043 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6044 }
6045 }
6046 } else {
3e3326df
PM
6047 /* Two registers and a scalar. NB that for ops of this form
6048 * the ARM ARM labels bit 24 as Q, but it is in our variable
6049 * 'u', not 'q'.
6050 */
6051 if (size == 0) {
6052 return 1;
6053 }
9ee6e8bb 6054 switch (op) {
9ee6e8bb 6055 case 1: /* Float VMLA scalar */
9ee6e8bb 6056 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6057 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6058 if (size == 1) {
6059 return 1;
6060 }
6061 /* fall through */
6062 case 0: /* Integer VMLA scalar */
6063 case 4: /* Integer VMLS scalar */
6064 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6065 case 12: /* VQDMULH scalar */
6066 case 13: /* VQRDMULH scalar */
3e3326df
PM
6067 if (u && ((rd | rn) & 1)) {
6068 return 1;
6069 }
dd8fbd78
FN
6070 tmp = neon_get_scalar(size, rm);
6071 neon_store_scratch(0, tmp);
9ee6e8bb 6072 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6073 tmp = neon_load_scratch(0);
6074 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6075 if (op == 12) {
6076 if (size == 1) {
02da0b2d 6077 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6078 } else {
02da0b2d 6079 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6080 }
6081 } else if (op == 13) {
6082 if (size == 1) {
02da0b2d 6083 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6084 } else {
02da0b2d 6085 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6086 }
6087 } else if (op & 1) {
aa47cfdd
PM
6088 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6089 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6090 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6091 } else {
6092 switch (size) {
dd8fbd78
FN
6093 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6094 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6095 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6096 default: abort();
9ee6e8bb
PB
6097 }
6098 }
7d1b0095 6099 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6100 if (op < 8) {
6101 /* Accumulate. */
dd8fbd78 6102 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6103 switch (op) {
6104 case 0:
dd8fbd78 6105 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6106 break;
6107 case 1:
aa47cfdd
PM
6108 {
6109 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6110 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6111 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6112 break;
aa47cfdd 6113 }
9ee6e8bb 6114 case 4:
dd8fbd78 6115 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6116 break;
6117 case 5:
aa47cfdd
PM
6118 {
6119 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6120 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6121 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6122 break;
aa47cfdd 6123 }
9ee6e8bb
PB
6124 default:
6125 abort();
6126 }
7d1b0095 6127 tcg_temp_free_i32(tmp2);
9ee6e8bb 6128 }
dd8fbd78 6129 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6130 }
6131 break;
9ee6e8bb 6132 case 3: /* VQDMLAL scalar */
9ee6e8bb 6133 case 7: /* VQDMLSL scalar */
9ee6e8bb 6134 case 11: /* VQDMULL scalar */
3e3326df 6135 if (u == 1) {
ad69471c 6136 return 1;
3e3326df
PM
6137 }
6138 /* fall through */
6139 case 2: /* VMLAL sclar */
6140 case 6: /* VMLSL scalar */
6141 case 10: /* VMULL scalar */
6142 if (rd & 1) {
6143 return 1;
6144 }
dd8fbd78 6145 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6146 /* We need a copy of tmp2 because gen_neon_mull
6147 * deletes it during pass 0. */
7d1b0095 6148 tmp4 = tcg_temp_new_i32();
c6067f04 6149 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6150 tmp3 = neon_load_reg(rn, 1);
ad69471c 6151
9ee6e8bb 6152 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6153 if (pass == 0) {
6154 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6155 } else {
dd8fbd78 6156 tmp = tmp3;
c6067f04 6157 tmp2 = tmp4;
9ee6e8bb 6158 }
ad69471c 6159 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6160 if (op != 11) {
6161 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6162 }
9ee6e8bb 6163 switch (op) {
4dc064e6
PM
6164 case 6:
6165 gen_neon_negl(cpu_V0, size);
6166 /* Fall through */
6167 case 2:
ad69471c 6168 gen_neon_addl(size);
9ee6e8bb
PB
6169 break;
6170 case 3: case 7:
ad69471c 6171 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6172 if (op == 7) {
6173 gen_neon_negl(cpu_V0, size);
6174 }
ad69471c 6175 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6176 break;
6177 case 10:
6178 /* no-op */
6179 break;
6180 case 11:
ad69471c 6181 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6182 break;
6183 default:
6184 abort();
6185 }
ad69471c 6186 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6187 }
dd8fbd78 6188
dd8fbd78 6189
9ee6e8bb
PB
6190 break;
6191 default: /* 14 and 15 are RESERVED */
6192 return 1;
6193 }
6194 }
6195 } else { /* size == 3 */
6196 if (!u) {
6197 /* Extract. */
9ee6e8bb 6198 imm = (insn >> 8) & 0xf;
ad69471c
PB
6199
6200 if (imm > 7 && !q)
6201 return 1;
6202
52579ea1
PM
6203 if (q && ((rd | rn | rm) & 1)) {
6204 return 1;
6205 }
6206
ad69471c
PB
6207 if (imm == 0) {
6208 neon_load_reg64(cpu_V0, rn);
6209 if (q) {
6210 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6211 }
ad69471c
PB
6212 } else if (imm == 8) {
6213 neon_load_reg64(cpu_V0, rn + 1);
6214 if (q) {
6215 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6216 }
ad69471c 6217 } else if (q) {
a7812ae4 6218 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6219 if (imm < 8) {
6220 neon_load_reg64(cpu_V0, rn);
a7812ae4 6221 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6222 } else {
6223 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6224 neon_load_reg64(tmp64, rm);
ad69471c
PB
6225 }
6226 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6227 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6228 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6229 if (imm < 8) {
6230 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6231 } else {
ad69471c
PB
6232 neon_load_reg64(cpu_V1, rm + 1);
6233 imm -= 8;
9ee6e8bb 6234 }
ad69471c 6235 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6236 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6237 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6238 tcg_temp_free_i64(tmp64);
ad69471c 6239 } else {
a7812ae4 6240 /* BUGFIX */
ad69471c 6241 neon_load_reg64(cpu_V0, rn);
a7812ae4 6242 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6243 neon_load_reg64(cpu_V1, rm);
a7812ae4 6244 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6245 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6246 }
6247 neon_store_reg64(cpu_V0, rd);
6248 if (q) {
6249 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6250 }
6251 } else if ((insn & (1 << 11)) == 0) {
6252 /* Two register misc. */
6253 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6254 size = (insn >> 18) & 3;
600b828c
PM
6255 /* UNDEF for unknown op values and bad op-size combinations */
6256 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6257 return 1;
6258 }
fc2a9b37
PM
6259 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6260 q && ((rm | rd) & 1)) {
6261 return 1;
6262 }
9ee6e8bb 6263 switch (op) {
600b828c 6264 case NEON_2RM_VREV64:
9ee6e8bb 6265 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6266 tmp = neon_load_reg(rm, pass * 2);
6267 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6268 switch (size) {
dd8fbd78
FN
6269 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6270 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6271 case 2: /* no-op */ break;
6272 default: abort();
6273 }
dd8fbd78 6274 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6275 if (size == 2) {
dd8fbd78 6276 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6277 } else {
9ee6e8bb 6278 switch (size) {
dd8fbd78
FN
6279 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6280 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6281 default: abort();
6282 }
dd8fbd78 6283 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6284 }
6285 }
6286 break;
600b828c
PM
6287 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6288 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
6289 for (pass = 0; pass < q + 1; pass++) {
6290 tmp = neon_load_reg(rm, pass * 2);
6291 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6292 tmp = neon_load_reg(rm, pass * 2 + 1);
6293 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6294 switch (size) {
6295 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6296 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6297 case 2: tcg_gen_add_i64(CPU_V001); break;
6298 default: abort();
6299 }
600b828c 6300 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 6301 /* Accumulate. */
ad69471c
PB
6302 neon_load_reg64(cpu_V1, rd + pass);
6303 gen_neon_addl(size);
9ee6e8bb 6304 }
ad69471c 6305 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6306 }
6307 break;
600b828c 6308 case NEON_2RM_VTRN:
9ee6e8bb 6309 if (size == 2) {
a5a14945 6310 int n;
9ee6e8bb 6311 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
6312 tmp = neon_load_reg(rm, n);
6313 tmp2 = neon_load_reg(rd, n + 1);
6314 neon_store_reg(rm, n, tmp2);
6315 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
6316 }
6317 } else {
6318 goto elementwise;
6319 }
6320 break;
600b828c 6321 case NEON_2RM_VUZP:
02acedf9 6322 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 6323 return 1;
9ee6e8bb
PB
6324 }
6325 break;
600b828c 6326 case NEON_2RM_VZIP:
d68a6f3a 6327 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 6328 return 1;
9ee6e8bb
PB
6329 }
6330 break;
600b828c
PM
6331 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6332 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
6333 if (rm & 1) {
6334 return 1;
6335 }
39d5492a 6336 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6337 for (pass = 0; pass < 2; pass++) {
ad69471c 6338 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 6339 tmp = tcg_temp_new_i32();
600b828c
PM
6340 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6341 tmp, cpu_V0);
ad69471c
PB
6342 if (pass == 0) {
6343 tmp2 = tmp;
6344 } else {
6345 neon_store_reg(rd, 0, tmp2);
6346 neon_store_reg(rd, 1, tmp);
9ee6e8bb 6347 }
9ee6e8bb
PB
6348 }
6349 break;
600b828c 6350 case NEON_2RM_VSHLL:
fc2a9b37 6351 if (q || (rd & 1)) {
9ee6e8bb 6352 return 1;
600b828c 6353 }
ad69471c
PB
6354 tmp = neon_load_reg(rm, 0);
6355 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6356 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6357 if (pass == 1)
6358 tmp = tmp2;
6359 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 6360 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 6361 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6362 }
6363 break;
600b828c 6364 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
6365 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6366 q || (rm & 1)) {
6367 return 1;
6368 }
7d1b0095
PM
6369 tmp = tcg_temp_new_i32();
6370 tmp2 = tcg_temp_new_i32();
60011498 6371 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 6372 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 6373 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 6374 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6375 tcg_gen_shli_i32(tmp2, tmp2, 16);
6376 tcg_gen_or_i32(tmp2, tmp2, tmp);
6377 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 6378 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
6379 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6380 neon_store_reg(rd, 0, tmp2);
7d1b0095 6381 tmp2 = tcg_temp_new_i32();
2d981da7 6382 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6383 tcg_gen_shli_i32(tmp2, tmp2, 16);
6384 tcg_gen_or_i32(tmp2, tmp2, tmp);
6385 neon_store_reg(rd, 1, tmp2);
7d1b0095 6386 tcg_temp_free_i32(tmp);
60011498 6387 break;
600b828c 6388 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
6389 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6390 q || (rd & 1)) {
6391 return 1;
6392 }
7d1b0095 6393 tmp3 = tcg_temp_new_i32();
60011498
PB
6394 tmp = neon_load_reg(rm, 0);
6395 tmp2 = neon_load_reg(rm, 1);
6396 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 6397 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6398 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6399 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 6400 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6401 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 6402 tcg_temp_free_i32(tmp);
60011498 6403 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 6404 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6405 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6406 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 6407 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6408 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
6409 tcg_temp_free_i32(tmp2);
6410 tcg_temp_free_i32(tmp3);
60011498 6411 break;
9d935509
AB
6412 case NEON_2RM_AESE: case NEON_2RM_AESMC:
6413 if (!arm_feature(env, ARM_FEATURE_V8_AES)
6414 || ((rm | rd) & 1)) {
6415 return 1;
6416 }
6417 tmp = tcg_const_i32(rd);
6418 tmp2 = tcg_const_i32(rm);
6419
6420 /* Bit 6 is the lowest opcode bit; it distinguishes between
6421 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6422 */
6423 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6424
6425 if (op == NEON_2RM_AESE) {
6426 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
6427 } else {
6428 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
6429 }
6430 tcg_temp_free_i32(tmp);
6431 tcg_temp_free_i32(tmp2);
6432 tcg_temp_free_i32(tmp3);
6433 break;
9ee6e8bb
PB
6434 default:
6435 elementwise:
6436 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 6437 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6438 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6439 neon_reg_offset(rm, pass));
39d5492a 6440 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6441 } else {
dd8fbd78 6442 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
6443 }
6444 switch (op) {
600b828c 6445 case NEON_2RM_VREV32:
9ee6e8bb 6446 switch (size) {
dd8fbd78
FN
6447 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6448 case 1: gen_swap_half(tmp); break;
600b828c 6449 default: abort();
9ee6e8bb
PB
6450 }
6451 break;
600b828c 6452 case NEON_2RM_VREV16:
dd8fbd78 6453 gen_rev16(tmp);
9ee6e8bb 6454 break;
600b828c 6455 case NEON_2RM_VCLS:
9ee6e8bb 6456 switch (size) {
dd8fbd78
FN
6457 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6458 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6459 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6460 default: abort();
9ee6e8bb
PB
6461 }
6462 break;
600b828c 6463 case NEON_2RM_VCLZ:
9ee6e8bb 6464 switch (size) {
dd8fbd78
FN
6465 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6466 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6467 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 6468 default: abort();
9ee6e8bb
PB
6469 }
6470 break;
600b828c 6471 case NEON_2RM_VCNT:
dd8fbd78 6472 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6473 break;
600b828c 6474 case NEON_2RM_VMVN:
dd8fbd78 6475 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 6476 break;
600b828c 6477 case NEON_2RM_VQABS:
9ee6e8bb 6478 switch (size) {
02da0b2d
PM
6479 case 0:
6480 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6481 break;
6482 case 1:
6483 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6484 break;
6485 case 2:
6486 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6487 break;
600b828c 6488 default: abort();
9ee6e8bb
PB
6489 }
6490 break;
600b828c 6491 case NEON_2RM_VQNEG:
9ee6e8bb 6492 switch (size) {
02da0b2d
PM
6493 case 0:
6494 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6495 break;
6496 case 1:
6497 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6498 break;
6499 case 2:
6500 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6501 break;
600b828c 6502 default: abort();
9ee6e8bb
PB
6503 }
6504 break;
600b828c 6505 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6506 tmp2 = tcg_const_i32(0);
9ee6e8bb 6507 switch(size) {
dd8fbd78
FN
6508 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6509 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6510 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6511 default: abort();
9ee6e8bb 6512 }
39d5492a 6513 tcg_temp_free_i32(tmp2);
600b828c 6514 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6515 tcg_gen_not_i32(tmp, tmp);
600b828c 6516 }
9ee6e8bb 6517 break;
600b828c 6518 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6519 tmp2 = tcg_const_i32(0);
9ee6e8bb 6520 switch(size) {
dd8fbd78
FN
6521 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6522 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6523 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6524 default: abort();
9ee6e8bb 6525 }
39d5492a 6526 tcg_temp_free_i32(tmp2);
600b828c 6527 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6528 tcg_gen_not_i32(tmp, tmp);
600b828c 6529 }
9ee6e8bb 6530 break;
600b828c 6531 case NEON_2RM_VCEQ0:
dd8fbd78 6532 tmp2 = tcg_const_i32(0);
9ee6e8bb 6533 switch(size) {
dd8fbd78
FN
6534 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6535 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6536 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6537 default: abort();
9ee6e8bb 6538 }
39d5492a 6539 tcg_temp_free_i32(tmp2);
9ee6e8bb 6540 break;
600b828c 6541 case NEON_2RM_VABS:
9ee6e8bb 6542 switch(size) {
dd8fbd78
FN
6543 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6544 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6545 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6546 default: abort();
9ee6e8bb
PB
6547 }
6548 break;
600b828c 6549 case NEON_2RM_VNEG:
dd8fbd78
FN
6550 tmp2 = tcg_const_i32(0);
6551 gen_neon_rsb(size, tmp, tmp2);
39d5492a 6552 tcg_temp_free_i32(tmp2);
9ee6e8bb 6553 break;
600b828c 6554 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6555 {
6556 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6557 tmp2 = tcg_const_i32(0);
aa47cfdd 6558 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6559 tcg_temp_free_i32(tmp2);
aa47cfdd 6560 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6561 break;
aa47cfdd 6562 }
600b828c 6563 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6564 {
6565 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6566 tmp2 = tcg_const_i32(0);
aa47cfdd 6567 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6568 tcg_temp_free_i32(tmp2);
aa47cfdd 6569 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6570 break;
aa47cfdd 6571 }
600b828c 6572 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6573 {
6574 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6575 tmp2 = tcg_const_i32(0);
aa47cfdd 6576 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6577 tcg_temp_free_i32(tmp2);
aa47cfdd 6578 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6579 break;
aa47cfdd 6580 }
600b828c 6581 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6582 {
6583 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6584 tmp2 = tcg_const_i32(0);
aa47cfdd 6585 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6586 tcg_temp_free_i32(tmp2);
aa47cfdd 6587 tcg_temp_free_ptr(fpstatus);
0e326109 6588 break;
aa47cfdd 6589 }
600b828c 6590 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6591 {
6592 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6593 tmp2 = tcg_const_i32(0);
aa47cfdd 6594 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6595 tcg_temp_free_i32(tmp2);
aa47cfdd 6596 tcg_temp_free_ptr(fpstatus);
0e326109 6597 break;
aa47cfdd 6598 }
600b828c 6599 case NEON_2RM_VABS_F:
4373f3ce 6600 gen_vfp_abs(0);
9ee6e8bb 6601 break;
600b828c 6602 case NEON_2RM_VNEG_F:
4373f3ce 6603 gen_vfp_neg(0);
9ee6e8bb 6604 break;
600b828c 6605 case NEON_2RM_VSWP:
dd8fbd78
FN
6606 tmp2 = neon_load_reg(rd, pass);
6607 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6608 break;
600b828c 6609 case NEON_2RM_VTRN:
dd8fbd78 6610 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6611 switch (size) {
dd8fbd78
FN
6612 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6613 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6614 default: abort();
9ee6e8bb 6615 }
dd8fbd78 6616 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6617 break;
34f7b0a2
WN
6618 case NEON_2RM_VRINTN:
6619 case NEON_2RM_VRINTA:
6620 case NEON_2RM_VRINTM:
6621 case NEON_2RM_VRINTP:
6622 case NEON_2RM_VRINTZ:
6623 {
6624 TCGv_i32 tcg_rmode;
6625 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6626 int rmode;
6627
6628 if (op == NEON_2RM_VRINTZ) {
6629 rmode = FPROUNDING_ZERO;
6630 } else {
6631 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6632 }
6633
6634 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6635 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6636 cpu_env);
6637 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
6638 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6639 cpu_env);
6640 tcg_temp_free_ptr(fpstatus);
6641 tcg_temp_free_i32(tcg_rmode);
6642 break;
6643 }
2ce70625
WN
6644 case NEON_2RM_VRINTX:
6645 {
6646 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6647 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
6648 tcg_temp_free_ptr(fpstatus);
6649 break;
6650 }
901ad525
WN
6651 case NEON_2RM_VCVTAU:
6652 case NEON_2RM_VCVTAS:
6653 case NEON_2RM_VCVTNU:
6654 case NEON_2RM_VCVTNS:
6655 case NEON_2RM_VCVTPU:
6656 case NEON_2RM_VCVTPS:
6657 case NEON_2RM_VCVTMU:
6658 case NEON_2RM_VCVTMS:
6659 {
6660 bool is_signed = !extract32(insn, 7, 1);
6661 TCGv_ptr fpst = get_fpstatus_ptr(1);
6662 TCGv_i32 tcg_rmode, tcg_shift;
6663 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6664
6665 tcg_shift = tcg_const_i32(0);
6666 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6667 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6668 cpu_env);
6669
6670 if (is_signed) {
6671 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
6672 tcg_shift, fpst);
6673 } else {
6674 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
6675 tcg_shift, fpst);
6676 }
6677
6678 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6679 cpu_env);
6680 tcg_temp_free_i32(tcg_rmode);
6681 tcg_temp_free_i32(tcg_shift);
6682 tcg_temp_free_ptr(fpst);
6683 break;
6684 }
600b828c 6685 case NEON_2RM_VRECPE:
b6d4443a
AB
6686 {
6687 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6688 gen_helper_recpe_u32(tmp, tmp, fpstatus);
6689 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6690 break;
b6d4443a 6691 }
600b828c 6692 case NEON_2RM_VRSQRTE:
c2fb418e
AB
6693 {
6694 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6695 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
6696 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6697 break;
c2fb418e 6698 }
600b828c 6699 case NEON_2RM_VRECPE_F:
b6d4443a
AB
6700 {
6701 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6702 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
6703 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6704 break;
b6d4443a 6705 }
600b828c 6706 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
6707 {
6708 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6709 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
6710 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6711 break;
c2fb418e 6712 }
600b828c 6713 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6714 gen_vfp_sito(0, 1);
9ee6e8bb 6715 break;
600b828c 6716 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6717 gen_vfp_uito(0, 1);
9ee6e8bb 6718 break;
600b828c 6719 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6720 gen_vfp_tosiz(0, 1);
9ee6e8bb 6721 break;
600b828c 6722 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6723 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6724 break;
6725 default:
600b828c
PM
6726 /* Reserved op values were caught by the
6727 * neon_2rm_sizes[] check earlier.
6728 */
6729 abort();
9ee6e8bb 6730 }
600b828c 6731 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6732 tcg_gen_st_f32(cpu_F0s, cpu_env,
6733 neon_reg_offset(rd, pass));
9ee6e8bb 6734 } else {
dd8fbd78 6735 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6736 }
6737 }
6738 break;
6739 }
6740 } else if ((insn & (1 << 10)) == 0) {
6741 /* VTBL, VTBX. */
56907d77
PM
6742 int n = ((insn >> 8) & 3) + 1;
6743 if ((rn + n) > 32) {
6744 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6745 * helper function running off the end of the register file.
6746 */
6747 return 1;
6748 }
6749 n <<= 3;
9ee6e8bb 6750 if (insn & (1 << 6)) {
8f8e3aa4 6751 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6752 } else {
7d1b0095 6753 tmp = tcg_temp_new_i32();
8f8e3aa4 6754 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6755 }
8f8e3aa4 6756 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6757 tmp4 = tcg_const_i32(rn);
6758 tmp5 = tcg_const_i32(n);
9ef39277 6759 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 6760 tcg_temp_free_i32(tmp);
9ee6e8bb 6761 if (insn & (1 << 6)) {
8f8e3aa4 6762 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6763 } else {
7d1b0095 6764 tmp = tcg_temp_new_i32();
8f8e3aa4 6765 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6766 }
8f8e3aa4 6767 tmp3 = neon_load_reg(rm, 1);
9ef39277 6768 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6769 tcg_temp_free_i32(tmp5);
6770 tcg_temp_free_i32(tmp4);
8f8e3aa4 6771 neon_store_reg(rd, 0, tmp2);
3018f259 6772 neon_store_reg(rd, 1, tmp3);
7d1b0095 6773 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6774 } else if ((insn & 0x380) == 0) {
6775 /* VDUP */
133da6aa
JR
6776 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6777 return 1;
6778 }
9ee6e8bb 6779 if (insn & (1 << 19)) {
dd8fbd78 6780 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6781 } else {
dd8fbd78 6782 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6783 }
6784 if (insn & (1 << 16)) {
dd8fbd78 6785 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6786 } else if (insn & (1 << 17)) {
6787 if ((insn >> 18) & 1)
dd8fbd78 6788 gen_neon_dup_high16(tmp);
9ee6e8bb 6789 else
dd8fbd78 6790 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6791 }
6792 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6793 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6794 tcg_gen_mov_i32(tmp2, tmp);
6795 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6796 }
7d1b0095 6797 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6798 } else {
6799 return 1;
6800 }
6801 }
6802 }
6803 return 0;
6804}
6805
0ecb72a5 6806static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb 6807{
4b6a83fb
PM
6808 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6809 const ARMCPRegInfo *ri;
9ee6e8bb
PB
6810
6811 cpnum = (insn >> 8) & 0xf;
6812 if (arm_feature(env, ARM_FEATURE_XSCALE)
6813 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6814 return 1;
6815
4b6a83fb 6816 /* First check for coprocessor space used for actual instructions */
9ee6e8bb
PB
6817 switch (cpnum) {
6818 case 0:
6819 case 1:
6820 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6821 return disas_iwmmxt_insn(env, s, insn);
6822 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6823 return disas_dsp_insn(env, s, insn);
6824 }
6825 return 1;
4b6a83fb
PM
6826 default:
6827 break;
6828 }
6829
6830 /* Otherwise treat as a generic register access */
6831 is64 = (insn & (1 << 25)) == 0;
6832 if (!is64 && ((insn & (1 << 4)) == 0)) {
6833 /* cdp */
6834 return 1;
6835 }
6836
6837 crm = insn & 0xf;
6838 if (is64) {
6839 crn = 0;
6840 opc1 = (insn >> 4) & 0xf;
6841 opc2 = 0;
6842 rt2 = (insn >> 16) & 0xf;
6843 } else {
6844 crn = (insn >> 16) & 0xf;
6845 opc1 = (insn >> 21) & 7;
6846 opc2 = (insn >> 5) & 7;
6847 rt2 = 0;
6848 }
6849 isread = (insn >> 20) & 1;
6850 rt = (insn >> 12) & 0xf;
6851
60322b39 6852 ri = get_arm_cp_reginfo(s->cp_regs,
4b6a83fb
PM
6853 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
6854 if (ri) {
6855 /* Check access permissions */
60322b39 6856 if (!cp_access_ok(s->current_pl, ri, isread)) {
4b6a83fb
PM
6857 return 1;
6858 }
6859
f59df3f2
PM
6860 if (ri->accessfn) {
6861 /* Emit code to perform further access permissions checks at
6862 * runtime; this may result in an exception.
6863 */
6864 TCGv_ptr tmpptr;
6865 gen_set_pc_im(s, s->pc);
6866 tmpptr = tcg_const_ptr(ri);
6867 gen_helper_access_check_cp_reg(cpu_env, tmpptr);
6868 tcg_temp_free_ptr(tmpptr);
6869 }
6870
4b6a83fb
PM
6871 /* Handle special cases first */
6872 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
6873 case ARM_CP_NOP:
6874 return 0;
6875 case ARM_CP_WFI:
6876 if (isread) {
6877 return 1;
6878 }
eaed129d 6879 gen_set_pc_im(s, s->pc);
4b6a83fb 6880 s->is_jmp = DISAS_WFI;
2bee5105 6881 return 0;
4b6a83fb
PM
6882 default:
6883 break;
6884 }
6885
2452731c
PM
6886 if (use_icount && (ri->type & ARM_CP_IO)) {
6887 gen_io_start();
6888 }
6889
4b6a83fb
PM
6890 if (isread) {
6891 /* Read */
6892 if (is64) {
6893 TCGv_i64 tmp64;
6894 TCGv_i32 tmp;
6895 if (ri->type & ARM_CP_CONST) {
6896 tmp64 = tcg_const_i64(ri->resetvalue);
6897 } else if (ri->readfn) {
6898 TCGv_ptr tmpptr;
4b6a83fb
PM
6899 tmp64 = tcg_temp_new_i64();
6900 tmpptr = tcg_const_ptr(ri);
6901 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
6902 tcg_temp_free_ptr(tmpptr);
6903 } else {
6904 tmp64 = tcg_temp_new_i64();
6905 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
6906 }
6907 tmp = tcg_temp_new_i32();
6908 tcg_gen_trunc_i64_i32(tmp, tmp64);
6909 store_reg(s, rt, tmp);
6910 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 6911 tmp = tcg_temp_new_i32();
4b6a83fb 6912 tcg_gen_trunc_i64_i32(tmp, tmp64);
ed336850 6913 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
6914 store_reg(s, rt2, tmp);
6915 } else {
39d5492a 6916 TCGv_i32 tmp;
4b6a83fb
PM
6917 if (ri->type & ARM_CP_CONST) {
6918 tmp = tcg_const_i32(ri->resetvalue);
6919 } else if (ri->readfn) {
6920 TCGv_ptr tmpptr;
4b6a83fb
PM
6921 tmp = tcg_temp_new_i32();
6922 tmpptr = tcg_const_ptr(ri);
6923 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
6924 tcg_temp_free_ptr(tmpptr);
6925 } else {
6926 tmp = load_cpu_offset(ri->fieldoffset);
6927 }
6928 if (rt == 15) {
6929 /* Destination register of r15 for 32 bit loads sets
6930 * the condition codes from the high 4 bits of the value
6931 */
6932 gen_set_nzcv(tmp);
6933 tcg_temp_free_i32(tmp);
6934 } else {
6935 store_reg(s, rt, tmp);
6936 }
6937 }
6938 } else {
6939 /* Write */
6940 if (ri->type & ARM_CP_CONST) {
6941 /* If not forbidden by access permissions, treat as WI */
6942 return 0;
6943 }
6944
6945 if (is64) {
39d5492a 6946 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
6947 TCGv_i64 tmp64 = tcg_temp_new_i64();
6948 tmplo = load_reg(s, rt);
6949 tmphi = load_reg(s, rt2);
6950 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
6951 tcg_temp_free_i32(tmplo);
6952 tcg_temp_free_i32(tmphi);
6953 if (ri->writefn) {
6954 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
6955 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
6956 tcg_temp_free_ptr(tmpptr);
6957 } else {
6958 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
6959 }
6960 tcg_temp_free_i64(tmp64);
6961 } else {
6962 if (ri->writefn) {
39d5492a 6963 TCGv_i32 tmp;
4b6a83fb 6964 TCGv_ptr tmpptr;
4b6a83fb
PM
6965 tmp = load_reg(s, rt);
6966 tmpptr = tcg_const_ptr(ri);
6967 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
6968 tcg_temp_free_ptr(tmpptr);
6969 tcg_temp_free_i32(tmp);
6970 } else {
39d5492a 6971 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
6972 store_cpu_offset(tmp, ri->fieldoffset);
6973 }
6974 }
2452731c
PM
6975 }
6976
6977 if (use_icount && (ri->type & ARM_CP_IO)) {
6978 /* I/O operations must end the TB here (whether read or write) */
6979 gen_io_end();
6980 gen_lookup_tb(s);
6981 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
6982 /* We default to ending the TB on a coprocessor register write,
6983 * but allow this to be suppressed by the register definition
6984 * (usually only necessary to work around guest bugs).
6985 */
2452731c 6986 gen_lookup_tb(s);
4b6a83fb 6987 }
2452731c 6988
4b6a83fb
PM
6989 return 0;
6990 }
6991
626187d8
PM
6992 /* Unknown register; this might be a guest error or a QEMU
6993 * unimplemented feature.
6994 */
6995 if (is64) {
6996 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
6997 "64 bit system register cp:%d opc1: %d crm:%d\n",
6998 isread ? "read" : "write", cpnum, opc1, crm);
6999 } else {
7000 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7001 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d\n",
7002 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2);
7003 }
7004
4a9a539f 7005 return 1;
9ee6e8bb
PB
7006}
7007
5e3f878a
PB
7008
7009/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7010static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7011{
39d5492a 7012 TCGv_i32 tmp;
7d1b0095 7013 tmp = tcg_temp_new_i32();
5e3f878a
PB
7014 tcg_gen_trunc_i64_i32(tmp, val);
7015 store_reg(s, rlow, tmp);
7d1b0095 7016 tmp = tcg_temp_new_i32();
5e3f878a
PB
7017 tcg_gen_shri_i64(val, val, 32);
7018 tcg_gen_trunc_i64_i32(tmp, val);
7019 store_reg(s, rhigh, tmp);
7020}
7021
7022/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7023static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7024{
a7812ae4 7025 TCGv_i64 tmp;
39d5492a 7026 TCGv_i32 tmp2;
5e3f878a 7027
36aa55dc 7028 /* Load value and extend to 64 bits. */
a7812ae4 7029 tmp = tcg_temp_new_i64();
5e3f878a
PB
7030 tmp2 = load_reg(s, rlow);
7031 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7032 tcg_temp_free_i32(tmp2);
5e3f878a 7033 tcg_gen_add_i64(val, val, tmp);
b75263d6 7034 tcg_temp_free_i64(tmp);
5e3f878a
PB
7035}
7036
7037/* load and add a 64-bit value from a register pair. */
a7812ae4 7038static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7039{
a7812ae4 7040 TCGv_i64 tmp;
39d5492a
PM
7041 TCGv_i32 tmpl;
7042 TCGv_i32 tmph;
5e3f878a
PB
7043
7044 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7045 tmpl = load_reg(s, rlow);
7046 tmph = load_reg(s, rhigh);
a7812ae4 7047 tmp = tcg_temp_new_i64();
36aa55dc 7048 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7049 tcg_temp_free_i32(tmpl);
7050 tcg_temp_free_i32(tmph);
5e3f878a 7051 tcg_gen_add_i64(val, val, tmp);
b75263d6 7052 tcg_temp_free_i64(tmp);
5e3f878a
PB
7053}
7054
c9f10124 7055/* Set N and Z flags from hi|lo. */
39d5492a 7056static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7057{
c9f10124
RH
7058 tcg_gen_mov_i32(cpu_NF, hi);
7059 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7060}
7061
426f5abc
PB
7062/* Load/Store exclusive instructions are implemented by remembering
7063 the value/address loaded, and seeing if these are the same
b90372ad 7064 when the store is performed. This should be sufficient to implement
426f5abc
PB
7065 the architecturally mandated semantics, and avoids having to monitor
7066 regular stores.
7067
7068 In system emulation mode only one CPU will be running at once, so
7069 this sequence is effectively atomic. In user emulation mode we
7070 throw an exception and handle the atomic operation elsewhere. */
7071static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7072 TCGv_i32 addr, int size)
426f5abc 7073{
94ee24e7 7074 TCGv_i32 tmp = tcg_temp_new_i32();
426f5abc
PB
7075
7076 switch (size) {
7077 case 0:
08307563 7078 gen_aa32_ld8u(tmp, addr, IS_USER(s));
426f5abc
PB
7079 break;
7080 case 1:
08307563 7081 gen_aa32_ld16u(tmp, addr, IS_USER(s));
426f5abc
PB
7082 break;
7083 case 2:
7084 case 3:
08307563 7085 gen_aa32_ld32u(tmp, addr, IS_USER(s));
426f5abc
PB
7086 break;
7087 default:
7088 abort();
7089 }
03d05e2d 7090
426f5abc 7091 if (size == 3) {
39d5492a 7092 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d
PM
7093 TCGv_i32 tmp3 = tcg_temp_new_i32();
7094
2c9adbda 7095 tcg_gen_addi_i32(tmp2, addr, 4);
03d05e2d 7096 gen_aa32_ld32u(tmp3, tmp2, IS_USER(s));
7d1b0095 7097 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7098 tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
7099 store_reg(s, rt2, tmp3);
7100 } else {
7101 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7102 }
03d05e2d
PM
7103
7104 store_reg(s, rt, tmp);
7105 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7106}
7107
7108static void gen_clrex(DisasContext *s)
7109{
03d05e2d 7110 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7111}
7112
7113#ifdef CONFIG_USER_ONLY
7114static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7115 TCGv_i32 addr, int size)
426f5abc 7116{
03d05e2d 7117 tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
426f5abc
PB
7118 tcg_gen_movi_i32(cpu_exclusive_info,
7119 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 7120 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
7121}
7122#else
7123static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7124 TCGv_i32 addr, int size)
426f5abc 7125{
39d5492a 7126 TCGv_i32 tmp;
03d05e2d 7127 TCGv_i64 val64, extaddr;
426f5abc
PB
7128 int done_label;
7129 int fail_label;
7130
7131 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7132 [addr] = {Rt};
7133 {Rd} = 0;
7134 } else {
7135 {Rd} = 1;
7136 } */
7137 fail_label = gen_new_label();
7138 done_label = gen_new_label();
03d05e2d
PM
7139 extaddr = tcg_temp_new_i64();
7140 tcg_gen_extu_i32_i64(extaddr, addr);
7141 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7142 tcg_temp_free_i64(extaddr);
7143
94ee24e7 7144 tmp = tcg_temp_new_i32();
426f5abc
PB
7145 switch (size) {
7146 case 0:
08307563 7147 gen_aa32_ld8u(tmp, addr, IS_USER(s));
426f5abc
PB
7148 break;
7149 case 1:
08307563 7150 gen_aa32_ld16u(tmp, addr, IS_USER(s));
426f5abc
PB
7151 break;
7152 case 2:
7153 case 3:
08307563 7154 gen_aa32_ld32u(tmp, addr, IS_USER(s));
426f5abc
PB
7155 break;
7156 default:
7157 abort();
7158 }
03d05e2d
PM
7159
7160 val64 = tcg_temp_new_i64();
426f5abc 7161 if (size == 3) {
39d5492a 7162 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d 7163 TCGv_i32 tmp3 = tcg_temp_new_i32();
426f5abc 7164 tcg_gen_addi_i32(tmp2, addr, 4);
03d05e2d 7165 gen_aa32_ld32u(tmp3, tmp2, IS_USER(s));
7d1b0095 7166 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7167 tcg_gen_concat_i32_i64(val64, tmp, tmp3);
7168 tcg_temp_free_i32(tmp3);
7169 } else {
7170 tcg_gen_extu_i32_i64(val64, tmp);
426f5abc 7171 }
03d05e2d
PM
7172 tcg_temp_free_i32(tmp);
7173
7174 tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
7175 tcg_temp_free_i64(val64);
7176
426f5abc
PB
7177 tmp = load_reg(s, rt);
7178 switch (size) {
7179 case 0:
08307563 7180 gen_aa32_st8(tmp, addr, IS_USER(s));
426f5abc
PB
7181 break;
7182 case 1:
08307563 7183 gen_aa32_st16(tmp, addr, IS_USER(s));
426f5abc
PB
7184 break;
7185 case 2:
7186 case 3:
08307563 7187 gen_aa32_st32(tmp, addr, IS_USER(s));
426f5abc
PB
7188 break;
7189 default:
7190 abort();
7191 }
94ee24e7 7192 tcg_temp_free_i32(tmp);
426f5abc
PB
7193 if (size == 3) {
7194 tcg_gen_addi_i32(addr, addr, 4);
7195 tmp = load_reg(s, rt2);
08307563 7196 gen_aa32_st32(tmp, addr, IS_USER(s));
94ee24e7 7197 tcg_temp_free_i32(tmp);
426f5abc
PB
7198 }
7199 tcg_gen_movi_i32(cpu_R[rd], 0);
7200 tcg_gen_br(done_label);
7201 gen_set_label(fail_label);
7202 tcg_gen_movi_i32(cpu_R[rd], 1);
7203 gen_set_label(done_label);
03d05e2d 7204 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7205}
7206#endif
7207
81465888
PM
7208/* gen_srs:
7209 * @env: CPUARMState
7210 * @s: DisasContext
7211 * @mode: mode field from insn (which stack to store to)
7212 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7213 * @writeback: true if writeback bit set
7214 *
7215 * Generate code for the SRS (Store Return State) insn.
7216 */
7217static void gen_srs(DisasContext *s,
7218 uint32_t mode, uint32_t amode, bool writeback)
7219{
7220 int32_t offset;
7221 TCGv_i32 addr = tcg_temp_new_i32();
7222 TCGv_i32 tmp = tcg_const_i32(mode);
7223 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7224 tcg_temp_free_i32(tmp);
7225 switch (amode) {
7226 case 0: /* DA */
7227 offset = -4;
7228 break;
7229 case 1: /* IA */
7230 offset = 0;
7231 break;
7232 case 2: /* DB */
7233 offset = -8;
7234 break;
7235 case 3: /* IB */
7236 offset = 4;
7237 break;
7238 default:
7239 abort();
7240 }
7241 tcg_gen_addi_i32(addr, addr, offset);
7242 tmp = load_reg(s, 14);
08307563 7243 gen_aa32_st32(tmp, addr, 0);
5a839c0d 7244 tcg_temp_free_i32(tmp);
81465888
PM
7245 tmp = load_cpu_field(spsr);
7246 tcg_gen_addi_i32(addr, addr, 4);
08307563 7247 gen_aa32_st32(tmp, addr, 0);
5a839c0d 7248 tcg_temp_free_i32(tmp);
81465888
PM
7249 if (writeback) {
7250 switch (amode) {
7251 case 0:
7252 offset = -8;
7253 break;
7254 case 1:
7255 offset = 4;
7256 break;
7257 case 2:
7258 offset = -4;
7259 break;
7260 case 3:
7261 offset = 0;
7262 break;
7263 default:
7264 abort();
7265 }
7266 tcg_gen_addi_i32(addr, addr, offset);
7267 tmp = tcg_const_i32(mode);
7268 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7269 tcg_temp_free_i32(tmp);
7270 }
7271 tcg_temp_free_i32(addr);
7272}
7273
0ecb72a5 7274static void disas_arm_insn(CPUARMState * env, DisasContext *s)
9ee6e8bb
PB
7275{
7276 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
7277 TCGv_i32 tmp;
7278 TCGv_i32 tmp2;
7279 TCGv_i32 tmp3;
7280 TCGv_i32 addr;
a7812ae4 7281 TCGv_i64 tmp64;
9ee6e8bb 7282
d31dd73e 7283 insn = arm_ldl_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
7284 s->pc += 4;
7285
7286 /* M variants do not implement ARM mode. */
7287 if (IS_M(env))
7288 goto illegal_op;
7289 cond = insn >> 28;
7290 if (cond == 0xf){
be5e7a76
DES
7291 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7292 * choose to UNDEF. In ARMv5 and above the space is used
7293 * for miscellaneous unconditional instructions.
7294 */
7295 ARCH(5);
7296
9ee6e8bb
PB
7297 /* Unconditional instructions. */
7298 if (((insn >> 25) & 7) == 1) {
7299 /* NEON Data processing. */
7300 if (!arm_feature(env, ARM_FEATURE_NEON))
7301 goto illegal_op;
7302
7303 if (disas_neon_data_insn(env, s, insn))
7304 goto illegal_op;
7305 return;
7306 }
7307 if ((insn & 0x0f100000) == 0x04000000) {
7308 /* NEON load/store. */
7309 if (!arm_feature(env, ARM_FEATURE_NEON))
7310 goto illegal_op;
7311
7312 if (disas_neon_ls_insn(env, s, insn))
7313 goto illegal_op;
7314 return;
7315 }
6a57f3eb
WN
7316 if ((insn & 0x0f000e10) == 0x0e000a00) {
7317 /* VFP. */
7318 if (disas_vfp_insn(env, s, insn)) {
7319 goto illegal_op;
7320 }
7321 return;
7322 }
3d185e5d
PM
7323 if (((insn & 0x0f30f000) == 0x0510f000) ||
7324 ((insn & 0x0f30f010) == 0x0710f000)) {
7325 if ((insn & (1 << 22)) == 0) {
7326 /* PLDW; v7MP */
7327 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
7328 goto illegal_op;
7329 }
7330 }
7331 /* Otherwise PLD; v5TE+ */
be5e7a76 7332 ARCH(5TE);
3d185e5d
PM
7333 return;
7334 }
7335 if (((insn & 0x0f70f000) == 0x0450f000) ||
7336 ((insn & 0x0f70f010) == 0x0650f000)) {
7337 ARCH(7);
7338 return; /* PLI; V7 */
7339 }
7340 if (((insn & 0x0f700000) == 0x04100000) ||
7341 ((insn & 0x0f700010) == 0x06100000)) {
7342 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
7343 goto illegal_op;
7344 }
7345 return; /* v7MP: Unallocated memory hint: must NOP */
7346 }
7347
7348 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
7349 ARCH(6);
7350 /* setend */
10962fd5
PM
7351 if (((insn >> 9) & 1) != s->bswap_code) {
7352 /* Dynamic endianness switching not implemented. */
e0c270d9 7353 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
9ee6e8bb
PB
7354 goto illegal_op;
7355 }
7356 return;
7357 } else if ((insn & 0x0fffff00) == 0x057ff000) {
7358 switch ((insn >> 4) & 0xf) {
7359 case 1: /* clrex */
7360 ARCH(6K);
426f5abc 7361 gen_clrex(s);
9ee6e8bb
PB
7362 return;
7363 case 4: /* dsb */
7364 case 5: /* dmb */
7365 case 6: /* isb */
7366 ARCH(7);
7367 /* We don't emulate caches so these are a no-op. */
7368 return;
7369 default:
7370 goto illegal_op;
7371 }
7372 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
7373 /* srs */
81465888 7374 if (IS_USER(s)) {
9ee6e8bb 7375 goto illegal_op;
9ee6e8bb 7376 }
81465888
PM
7377 ARCH(6);
7378 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 7379 return;
ea825eee 7380 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 7381 /* rfe */
c67b6b71 7382 int32_t offset;
9ee6e8bb
PB
7383 if (IS_USER(s))
7384 goto illegal_op;
7385 ARCH(6);
7386 rn = (insn >> 16) & 0xf;
b0109805 7387 addr = load_reg(s, rn);
9ee6e8bb
PB
7388 i = (insn >> 23) & 3;
7389 switch (i) {
b0109805 7390 case 0: offset = -4; break; /* DA */
c67b6b71
FN
7391 case 1: offset = 0; break; /* IA */
7392 case 2: offset = -8; break; /* DB */
b0109805 7393 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
7394 default: abort();
7395 }
7396 if (offset)
b0109805
PB
7397 tcg_gen_addi_i32(addr, addr, offset);
7398 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 7399 tmp = tcg_temp_new_i32();
08307563 7400 gen_aa32_ld32u(tmp, addr, 0);
b0109805 7401 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 7402 tmp2 = tcg_temp_new_i32();
08307563 7403 gen_aa32_ld32u(tmp2, addr, 0);
9ee6e8bb
PB
7404 if (insn & (1 << 21)) {
7405 /* Base writeback. */
7406 switch (i) {
b0109805 7407 case 0: offset = -8; break;
c67b6b71
FN
7408 case 1: offset = 4; break;
7409 case 2: offset = -4; break;
b0109805 7410 case 3: offset = 0; break;
9ee6e8bb
PB
7411 default: abort();
7412 }
7413 if (offset)
b0109805
PB
7414 tcg_gen_addi_i32(addr, addr, offset);
7415 store_reg(s, rn, addr);
7416 } else {
7d1b0095 7417 tcg_temp_free_i32(addr);
9ee6e8bb 7418 }
b0109805 7419 gen_rfe(s, tmp, tmp2);
c67b6b71 7420 return;
9ee6e8bb
PB
7421 } else if ((insn & 0x0e000000) == 0x0a000000) {
7422 /* branch link and change to thumb (blx <offset>) */
7423 int32_t offset;
7424
7425 val = (uint32_t)s->pc;
7d1b0095 7426 tmp = tcg_temp_new_i32();
d9ba4830
PB
7427 tcg_gen_movi_i32(tmp, val);
7428 store_reg(s, 14, tmp);
9ee6e8bb
PB
7429 /* Sign-extend the 24-bit offset */
7430 offset = (((int32_t)insn) << 8) >> 8;
7431 /* offset * 4 + bit24 * 2 + (thumb bit) */
7432 val += (offset << 2) | ((insn >> 23) & 2) | 1;
7433 /* pipeline offset */
7434 val += 4;
be5e7a76 7435 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 7436 gen_bx_im(s, val);
9ee6e8bb
PB
7437 return;
7438 } else if ((insn & 0x0e000f00) == 0x0c000100) {
7439 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
7440 /* iWMMXt register transfer. */
7441 if (env->cp15.c15_cpar & (1 << 1))
7442 if (!disas_iwmmxt_insn(env, s, insn))
7443 return;
7444 }
7445 } else if ((insn & 0x0fe00000) == 0x0c400000) {
7446 /* Coprocessor double register transfer. */
be5e7a76 7447 ARCH(5TE);
9ee6e8bb
PB
7448 } else if ((insn & 0x0f000010) == 0x0e000010) {
7449 /* Additional coprocessor register transfer. */
7997d92f 7450 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
7451 uint32_t mask;
7452 uint32_t val;
7453 /* cps (privileged) */
7454 if (IS_USER(s))
7455 return;
7456 mask = val = 0;
7457 if (insn & (1 << 19)) {
7458 if (insn & (1 << 8))
7459 mask |= CPSR_A;
7460 if (insn & (1 << 7))
7461 mask |= CPSR_I;
7462 if (insn & (1 << 6))
7463 mask |= CPSR_F;
7464 if (insn & (1 << 18))
7465 val |= mask;
7466 }
7997d92f 7467 if (insn & (1 << 17)) {
9ee6e8bb
PB
7468 mask |= CPSR_M;
7469 val |= (insn & 0x1f);
7470 }
7471 if (mask) {
2fbac54b 7472 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
7473 }
7474 return;
7475 }
7476 goto illegal_op;
7477 }
7478 if (cond != 0xe) {
7479 /* if not always execute, we generate a conditional jump to
7480 next instruction */
7481 s->condlabel = gen_new_label();
39fb730a 7482 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
7483 s->condjmp = 1;
7484 }
7485 if ((insn & 0x0f900000) == 0x03000000) {
7486 if ((insn & (1 << 21)) == 0) {
7487 ARCH(6T2);
7488 rd = (insn >> 12) & 0xf;
7489 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7490 if ((insn & (1 << 22)) == 0) {
7491 /* MOVW */
7d1b0095 7492 tmp = tcg_temp_new_i32();
5e3f878a 7493 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
7494 } else {
7495 /* MOVT */
5e3f878a 7496 tmp = load_reg(s, rd);
86831435 7497 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7498 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 7499 }
5e3f878a 7500 store_reg(s, rd, tmp);
9ee6e8bb
PB
7501 } else {
7502 if (((insn >> 12) & 0xf) != 0xf)
7503 goto illegal_op;
7504 if (((insn >> 16) & 0xf) == 0) {
7505 gen_nop_hint(s, insn & 0xff);
7506 } else {
7507 /* CPSR = immediate */
7508 val = insn & 0xff;
7509 shift = ((insn >> 8) & 0xf) * 2;
7510 if (shift)
7511 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 7512 i = ((insn & (1 << 22)) != 0);
2fbac54b 7513 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
7514 goto illegal_op;
7515 }
7516 }
7517 } else if ((insn & 0x0f900000) == 0x01000000
7518 && (insn & 0x00000090) != 0x00000090) {
7519 /* miscellaneous instructions */
7520 op1 = (insn >> 21) & 3;
7521 sh = (insn >> 4) & 0xf;
7522 rm = insn & 0xf;
7523 switch (sh) {
7524 case 0x0: /* move program status register */
7525 if (op1 & 1) {
7526 /* PSR = reg */
2fbac54b 7527 tmp = load_reg(s, rm);
9ee6e8bb 7528 i = ((op1 & 2) != 0);
2fbac54b 7529 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
7530 goto illegal_op;
7531 } else {
7532 /* reg = PSR */
7533 rd = (insn >> 12) & 0xf;
7534 if (op1 & 2) {
7535 if (IS_USER(s))
7536 goto illegal_op;
d9ba4830 7537 tmp = load_cpu_field(spsr);
9ee6e8bb 7538 } else {
7d1b0095 7539 tmp = tcg_temp_new_i32();
9ef39277 7540 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 7541 }
d9ba4830 7542 store_reg(s, rd, tmp);
9ee6e8bb
PB
7543 }
7544 break;
7545 case 0x1:
7546 if (op1 == 1) {
7547 /* branch/exchange thumb (bx). */
be5e7a76 7548 ARCH(4T);
d9ba4830
PB
7549 tmp = load_reg(s, rm);
7550 gen_bx(s, tmp);
9ee6e8bb
PB
7551 } else if (op1 == 3) {
7552 /* clz */
be5e7a76 7553 ARCH(5);
9ee6e8bb 7554 rd = (insn >> 12) & 0xf;
1497c961
PB
7555 tmp = load_reg(s, rm);
7556 gen_helper_clz(tmp, tmp);
7557 store_reg(s, rd, tmp);
9ee6e8bb
PB
7558 } else {
7559 goto illegal_op;
7560 }
7561 break;
7562 case 0x2:
7563 if (op1 == 1) {
7564 ARCH(5J); /* bxj */
7565 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7566 tmp = load_reg(s, rm);
7567 gen_bx(s, tmp);
9ee6e8bb
PB
7568 } else {
7569 goto illegal_op;
7570 }
7571 break;
7572 case 0x3:
7573 if (op1 != 1)
7574 goto illegal_op;
7575
be5e7a76 7576 ARCH(5);
9ee6e8bb 7577 /* branch link/exchange thumb (blx) */
d9ba4830 7578 tmp = load_reg(s, rm);
7d1b0095 7579 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
7580 tcg_gen_movi_i32(tmp2, s->pc);
7581 store_reg(s, 14, tmp2);
7582 gen_bx(s, tmp);
9ee6e8bb 7583 break;
eb0ecd5a
WN
7584 case 0x4:
7585 {
7586 /* crc32/crc32c */
7587 uint32_t c = extract32(insn, 8, 4);
7588
7589 /* Check this CPU supports ARMv8 CRC instructions.
7590 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
7591 * Bits 8, 10 and 11 should be zero.
7592 */
7593 if (!arm_feature(env, ARM_FEATURE_CRC) || op1 == 0x3 ||
7594 (c & 0xd) != 0) {
7595 goto illegal_op;
7596 }
7597
7598 rn = extract32(insn, 16, 4);
7599 rd = extract32(insn, 12, 4);
7600
7601 tmp = load_reg(s, rn);
7602 tmp2 = load_reg(s, rm);
7603 tmp3 = tcg_const_i32(1 << op1);
7604 if (c & 0x2) {
7605 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
7606 } else {
7607 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
7608 }
7609 tcg_temp_free_i32(tmp2);
7610 tcg_temp_free_i32(tmp3);
7611 store_reg(s, rd, tmp);
7612 break;
7613 }
9ee6e8bb 7614 case 0x5: /* saturating add/subtract */
be5e7a76 7615 ARCH(5TE);
9ee6e8bb
PB
7616 rd = (insn >> 12) & 0xf;
7617 rn = (insn >> 16) & 0xf;
b40d0353 7618 tmp = load_reg(s, rm);
5e3f878a 7619 tmp2 = load_reg(s, rn);
9ee6e8bb 7620 if (op1 & 2)
9ef39277 7621 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 7622 if (op1 & 1)
9ef39277 7623 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 7624 else
9ef39277 7625 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 7626 tcg_temp_free_i32(tmp2);
5e3f878a 7627 store_reg(s, rd, tmp);
9ee6e8bb 7628 break;
49e14940
AL
7629 case 7:
7630 /* SMC instruction (op1 == 3)
7631 and undefined instructions (op1 == 0 || op1 == 2)
7632 will trap */
7633 if (op1 != 1) {
7634 goto illegal_op;
7635 }
7636 /* bkpt */
be5e7a76 7637 ARCH(5);
bc4a0de0 7638 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
7639 break;
7640 case 0x8: /* signed multiply */
7641 case 0xa:
7642 case 0xc:
7643 case 0xe:
be5e7a76 7644 ARCH(5TE);
9ee6e8bb
PB
7645 rs = (insn >> 8) & 0xf;
7646 rn = (insn >> 12) & 0xf;
7647 rd = (insn >> 16) & 0xf;
7648 if (op1 == 1) {
7649 /* (32 * 16) >> 16 */
5e3f878a
PB
7650 tmp = load_reg(s, rm);
7651 tmp2 = load_reg(s, rs);
9ee6e8bb 7652 if (sh & 4)
5e3f878a 7653 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7654 else
5e3f878a 7655 gen_sxth(tmp2);
a7812ae4
PB
7656 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7657 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 7658 tmp = tcg_temp_new_i32();
a7812ae4 7659 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7660 tcg_temp_free_i64(tmp64);
9ee6e8bb 7661 if ((sh & 2) == 0) {
5e3f878a 7662 tmp2 = load_reg(s, rn);
9ef39277 7663 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7664 tcg_temp_free_i32(tmp2);
9ee6e8bb 7665 }
5e3f878a 7666 store_reg(s, rd, tmp);
9ee6e8bb
PB
7667 } else {
7668 /* 16 * 16 */
5e3f878a
PB
7669 tmp = load_reg(s, rm);
7670 tmp2 = load_reg(s, rs);
7671 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 7672 tcg_temp_free_i32(tmp2);
9ee6e8bb 7673 if (op1 == 2) {
a7812ae4
PB
7674 tmp64 = tcg_temp_new_i64();
7675 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7676 tcg_temp_free_i32(tmp);
a7812ae4
PB
7677 gen_addq(s, tmp64, rn, rd);
7678 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7679 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7680 } else {
7681 if (op1 == 0) {
5e3f878a 7682 tmp2 = load_reg(s, rn);
9ef39277 7683 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7684 tcg_temp_free_i32(tmp2);
9ee6e8bb 7685 }
5e3f878a 7686 store_reg(s, rd, tmp);
9ee6e8bb
PB
7687 }
7688 }
7689 break;
7690 default:
7691 goto illegal_op;
7692 }
7693 } else if (((insn & 0x0e000000) == 0 &&
7694 (insn & 0x00000090) != 0x90) ||
7695 ((insn & 0x0e000000) == (1 << 25))) {
7696 int set_cc, logic_cc, shiftop;
7697
7698 op1 = (insn >> 21) & 0xf;
7699 set_cc = (insn >> 20) & 1;
7700 logic_cc = table_logic_cc[op1] & set_cc;
7701
7702 /* data processing instruction */
7703 if (insn & (1 << 25)) {
7704 /* immediate operand */
7705 val = insn & 0xff;
7706 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 7707 if (shift) {
9ee6e8bb 7708 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 7709 }
7d1b0095 7710 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
7711 tcg_gen_movi_i32(tmp2, val);
7712 if (logic_cc && shift) {
7713 gen_set_CF_bit31(tmp2);
7714 }
9ee6e8bb
PB
7715 } else {
7716 /* register */
7717 rm = (insn) & 0xf;
e9bb4aa9 7718 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7719 shiftop = (insn >> 5) & 3;
7720 if (!(insn & (1 << 4))) {
7721 shift = (insn >> 7) & 0x1f;
e9bb4aa9 7722 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
7723 } else {
7724 rs = (insn >> 8) & 0xf;
8984bd2e 7725 tmp = load_reg(s, rs);
e9bb4aa9 7726 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
7727 }
7728 }
7729 if (op1 != 0x0f && op1 != 0x0d) {
7730 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
7731 tmp = load_reg(s, rn);
7732 } else {
39d5492a 7733 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
7734 }
7735 rd = (insn >> 12) & 0xf;
7736 switch(op1) {
7737 case 0x00:
e9bb4aa9
JR
7738 tcg_gen_and_i32(tmp, tmp, tmp2);
7739 if (logic_cc) {
7740 gen_logic_CC(tmp);
7741 }
21aeb343 7742 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7743 break;
7744 case 0x01:
e9bb4aa9
JR
7745 tcg_gen_xor_i32(tmp, tmp, tmp2);
7746 if (logic_cc) {
7747 gen_logic_CC(tmp);
7748 }
21aeb343 7749 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7750 break;
7751 case 0x02:
7752 if (set_cc && rd == 15) {
7753 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 7754 if (IS_USER(s)) {
9ee6e8bb 7755 goto illegal_op;
e9bb4aa9 7756 }
72485ec4 7757 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 7758 gen_exception_return(s, tmp);
9ee6e8bb 7759 } else {
e9bb4aa9 7760 if (set_cc) {
72485ec4 7761 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7762 } else {
7763 tcg_gen_sub_i32(tmp, tmp, tmp2);
7764 }
21aeb343 7765 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7766 }
7767 break;
7768 case 0x03:
e9bb4aa9 7769 if (set_cc) {
72485ec4 7770 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7771 } else {
7772 tcg_gen_sub_i32(tmp, tmp2, tmp);
7773 }
21aeb343 7774 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7775 break;
7776 case 0x04:
e9bb4aa9 7777 if (set_cc) {
72485ec4 7778 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7779 } else {
7780 tcg_gen_add_i32(tmp, tmp, tmp2);
7781 }
21aeb343 7782 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7783 break;
7784 case 0x05:
e9bb4aa9 7785 if (set_cc) {
49b4c31e 7786 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7787 } else {
7788 gen_add_carry(tmp, tmp, tmp2);
7789 }
21aeb343 7790 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7791 break;
7792 case 0x06:
e9bb4aa9 7793 if (set_cc) {
2de68a49 7794 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7795 } else {
7796 gen_sub_carry(tmp, tmp, tmp2);
7797 }
21aeb343 7798 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7799 break;
7800 case 0x07:
e9bb4aa9 7801 if (set_cc) {
2de68a49 7802 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7803 } else {
7804 gen_sub_carry(tmp, tmp2, tmp);
7805 }
21aeb343 7806 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7807 break;
7808 case 0x08:
7809 if (set_cc) {
e9bb4aa9
JR
7810 tcg_gen_and_i32(tmp, tmp, tmp2);
7811 gen_logic_CC(tmp);
9ee6e8bb 7812 }
7d1b0095 7813 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7814 break;
7815 case 0x09:
7816 if (set_cc) {
e9bb4aa9
JR
7817 tcg_gen_xor_i32(tmp, tmp, tmp2);
7818 gen_logic_CC(tmp);
9ee6e8bb 7819 }
7d1b0095 7820 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7821 break;
7822 case 0x0a:
7823 if (set_cc) {
72485ec4 7824 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 7825 }
7d1b0095 7826 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7827 break;
7828 case 0x0b:
7829 if (set_cc) {
72485ec4 7830 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 7831 }
7d1b0095 7832 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7833 break;
7834 case 0x0c:
e9bb4aa9
JR
7835 tcg_gen_or_i32(tmp, tmp, tmp2);
7836 if (logic_cc) {
7837 gen_logic_CC(tmp);
7838 }
21aeb343 7839 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7840 break;
7841 case 0x0d:
7842 if (logic_cc && rd == 15) {
7843 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 7844 if (IS_USER(s)) {
9ee6e8bb 7845 goto illegal_op;
e9bb4aa9
JR
7846 }
7847 gen_exception_return(s, tmp2);
9ee6e8bb 7848 } else {
e9bb4aa9
JR
7849 if (logic_cc) {
7850 gen_logic_CC(tmp2);
7851 }
21aeb343 7852 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7853 }
7854 break;
7855 case 0x0e:
f669df27 7856 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
7857 if (logic_cc) {
7858 gen_logic_CC(tmp);
7859 }
21aeb343 7860 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7861 break;
7862 default:
7863 case 0x0f:
e9bb4aa9
JR
7864 tcg_gen_not_i32(tmp2, tmp2);
7865 if (logic_cc) {
7866 gen_logic_CC(tmp2);
7867 }
21aeb343 7868 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7869 break;
7870 }
e9bb4aa9 7871 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 7872 tcg_temp_free_i32(tmp2);
e9bb4aa9 7873 }
9ee6e8bb
PB
7874 } else {
7875 /* other instructions */
7876 op1 = (insn >> 24) & 0xf;
7877 switch(op1) {
7878 case 0x0:
7879 case 0x1:
7880 /* multiplies, extra load/stores */
7881 sh = (insn >> 5) & 3;
7882 if (sh == 0) {
7883 if (op1 == 0x0) {
7884 rd = (insn >> 16) & 0xf;
7885 rn = (insn >> 12) & 0xf;
7886 rs = (insn >> 8) & 0xf;
7887 rm = (insn) & 0xf;
7888 op1 = (insn >> 20) & 0xf;
7889 switch (op1) {
7890 case 0: case 1: case 2: case 3: case 6:
7891 /* 32 bit mul */
5e3f878a
PB
7892 tmp = load_reg(s, rs);
7893 tmp2 = load_reg(s, rm);
7894 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7895 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7896 if (insn & (1 << 22)) {
7897 /* Subtract (mls) */
7898 ARCH(6T2);
5e3f878a
PB
7899 tmp2 = load_reg(s, rn);
7900 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 7901 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7902 } else if (insn & (1 << 21)) {
7903 /* Add */
5e3f878a
PB
7904 tmp2 = load_reg(s, rn);
7905 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7906 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7907 }
7908 if (insn & (1 << 20))
5e3f878a
PB
7909 gen_logic_CC(tmp);
7910 store_reg(s, rd, tmp);
9ee6e8bb 7911 break;
8aac08b1
AJ
7912 case 4:
7913 /* 64 bit mul double accumulate (UMAAL) */
7914 ARCH(6);
7915 tmp = load_reg(s, rs);
7916 tmp2 = load_reg(s, rm);
7917 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7918 gen_addq_lo(s, tmp64, rn);
7919 gen_addq_lo(s, tmp64, rd);
7920 gen_storeq_reg(s, rn, rd, tmp64);
7921 tcg_temp_free_i64(tmp64);
7922 break;
7923 case 8: case 9: case 10: case 11:
7924 case 12: case 13: case 14: case 15:
7925 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
7926 tmp = load_reg(s, rs);
7927 tmp2 = load_reg(s, rm);
8aac08b1 7928 if (insn & (1 << 22)) {
c9f10124 7929 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 7930 } else {
c9f10124 7931 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
7932 }
7933 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
7934 TCGv_i32 al = load_reg(s, rn);
7935 TCGv_i32 ah = load_reg(s, rd);
c9f10124 7936 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
7937 tcg_temp_free_i32(al);
7938 tcg_temp_free_i32(ah);
9ee6e8bb 7939 }
8aac08b1 7940 if (insn & (1 << 20)) {
c9f10124 7941 gen_logicq_cc(tmp, tmp2);
8aac08b1 7942 }
c9f10124
RH
7943 store_reg(s, rn, tmp);
7944 store_reg(s, rd, tmp2);
9ee6e8bb 7945 break;
8aac08b1
AJ
7946 default:
7947 goto illegal_op;
9ee6e8bb
PB
7948 }
7949 } else {
7950 rn = (insn >> 16) & 0xf;
7951 rd = (insn >> 12) & 0xf;
7952 if (insn & (1 << 23)) {
7953 /* load/store exclusive */
2359bf80 7954 int op2 = (insn >> 8) & 3;
86753403 7955 op1 = (insn >> 21) & 0x3;
2359bf80
MR
7956
7957 switch (op2) {
7958 case 0: /* lda/stl */
7959 if (op1 == 1) {
7960 goto illegal_op;
7961 }
7962 ARCH(8);
7963 break;
7964 case 1: /* reserved */
7965 goto illegal_op;
7966 case 2: /* ldaex/stlex */
7967 ARCH(8);
7968 break;
7969 case 3: /* ldrex/strex */
7970 if (op1) {
7971 ARCH(6K);
7972 } else {
7973 ARCH(6);
7974 }
7975 break;
7976 }
7977
3174f8e9 7978 addr = tcg_temp_local_new_i32();
98a46317 7979 load_reg_var(s, addr, rn);
2359bf80
MR
7980
7981 /* Since the emulation does not have barriers,
7982 the acquire/release semantics need no special
7983 handling */
7984 if (op2 == 0) {
7985 if (insn & (1 << 20)) {
7986 tmp = tcg_temp_new_i32();
7987 switch (op1) {
7988 case 0: /* lda */
08307563 7989 gen_aa32_ld32u(tmp, addr, IS_USER(s));
2359bf80
MR
7990 break;
7991 case 2: /* ldab */
08307563 7992 gen_aa32_ld8u(tmp, addr, IS_USER(s));
2359bf80
MR
7993 break;
7994 case 3: /* ldah */
08307563 7995 gen_aa32_ld16u(tmp, addr, IS_USER(s));
2359bf80
MR
7996 break;
7997 default:
7998 abort();
7999 }
8000 store_reg(s, rd, tmp);
8001 } else {
8002 rm = insn & 0xf;
8003 tmp = load_reg(s, rm);
8004 switch (op1) {
8005 case 0: /* stl */
08307563 8006 gen_aa32_st32(tmp, addr, IS_USER(s));
2359bf80
MR
8007 break;
8008 case 2: /* stlb */
08307563 8009 gen_aa32_st8(tmp, addr, IS_USER(s));
2359bf80
MR
8010 break;
8011 case 3: /* stlh */
08307563 8012 gen_aa32_st16(tmp, addr, IS_USER(s));
2359bf80
MR
8013 break;
8014 default:
8015 abort();
8016 }
8017 tcg_temp_free_i32(tmp);
8018 }
8019 } else if (insn & (1 << 20)) {
86753403
PB
8020 switch (op1) {
8021 case 0: /* ldrex */
426f5abc 8022 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8023 break;
8024 case 1: /* ldrexd */
426f5abc 8025 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8026 break;
8027 case 2: /* ldrexb */
426f5abc 8028 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8029 break;
8030 case 3: /* ldrexh */
426f5abc 8031 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8032 break;
8033 default:
8034 abort();
8035 }
9ee6e8bb
PB
8036 } else {
8037 rm = insn & 0xf;
86753403
PB
8038 switch (op1) {
8039 case 0: /* strex */
426f5abc 8040 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8041 break;
8042 case 1: /* strexd */
502e64fe 8043 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
8044 break;
8045 case 2: /* strexb */
426f5abc 8046 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
8047 break;
8048 case 3: /* strexh */
426f5abc 8049 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
8050 break;
8051 default:
8052 abort();
8053 }
9ee6e8bb 8054 }
39d5492a 8055 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8056 } else {
8057 /* SWP instruction */
8058 rm = (insn) & 0xf;
8059
8984bd2e
PB
8060 /* ??? This is not really atomic. However we know
8061 we never have multiple CPUs running in parallel,
8062 so it is good enough. */
8063 addr = load_reg(s, rn);
8064 tmp = load_reg(s, rm);
5a839c0d 8065 tmp2 = tcg_temp_new_i32();
9ee6e8bb 8066 if (insn & (1 << 22)) {
08307563
PM
8067 gen_aa32_ld8u(tmp2, addr, IS_USER(s));
8068 gen_aa32_st8(tmp, addr, IS_USER(s));
9ee6e8bb 8069 } else {
08307563
PM
8070 gen_aa32_ld32u(tmp2, addr, IS_USER(s));
8071 gen_aa32_st32(tmp, addr, IS_USER(s));
9ee6e8bb 8072 }
5a839c0d 8073 tcg_temp_free_i32(tmp);
7d1b0095 8074 tcg_temp_free_i32(addr);
8984bd2e 8075 store_reg(s, rd, tmp2);
9ee6e8bb
PB
8076 }
8077 }
8078 } else {
8079 int address_offset;
8080 int load;
8081 /* Misc load/store */
8082 rn = (insn >> 16) & 0xf;
8083 rd = (insn >> 12) & 0xf;
b0109805 8084 addr = load_reg(s, rn);
9ee6e8bb 8085 if (insn & (1 << 24))
b0109805 8086 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
8087 address_offset = 0;
8088 if (insn & (1 << 20)) {
8089 /* load */
5a839c0d 8090 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
8091 switch(sh) {
8092 case 1:
08307563 8093 gen_aa32_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
8094 break;
8095 case 2:
08307563 8096 gen_aa32_ld8s(tmp, addr, IS_USER(s));
9ee6e8bb
PB
8097 break;
8098 default:
8099 case 3:
08307563 8100 gen_aa32_ld16s(tmp, addr, IS_USER(s));
9ee6e8bb
PB
8101 break;
8102 }
8103 load = 1;
8104 } else if (sh & 2) {
be5e7a76 8105 ARCH(5TE);
9ee6e8bb
PB
8106 /* doubleword */
8107 if (sh & 1) {
8108 /* store */
b0109805 8109 tmp = load_reg(s, rd);
08307563 8110 gen_aa32_st32(tmp, addr, IS_USER(s));
5a839c0d 8111 tcg_temp_free_i32(tmp);
b0109805
PB
8112 tcg_gen_addi_i32(addr, addr, 4);
8113 tmp = load_reg(s, rd + 1);
08307563 8114 gen_aa32_st32(tmp, addr, IS_USER(s));
5a839c0d 8115 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8116 load = 0;
8117 } else {
8118 /* load */
5a839c0d 8119 tmp = tcg_temp_new_i32();
08307563 8120 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805
PB
8121 store_reg(s, rd, tmp);
8122 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8123 tmp = tcg_temp_new_i32();
08307563 8124 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
8125 rd++;
8126 load = 1;
8127 }
8128 address_offset = -4;
8129 } else {
8130 /* store */
b0109805 8131 tmp = load_reg(s, rd);
08307563 8132 gen_aa32_st16(tmp, addr, IS_USER(s));
5a839c0d 8133 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8134 load = 0;
8135 }
8136 /* Perform base writeback before the loaded value to
8137 ensure correct behavior with overlapping index registers.
8138 ldrd with base writeback is is undefined if the
8139 destination and index registers overlap. */
8140 if (!(insn & (1 << 24))) {
b0109805
PB
8141 gen_add_datah_offset(s, insn, address_offset, addr);
8142 store_reg(s, rn, addr);
9ee6e8bb
PB
8143 } else if (insn & (1 << 21)) {
8144 if (address_offset)
b0109805
PB
8145 tcg_gen_addi_i32(addr, addr, address_offset);
8146 store_reg(s, rn, addr);
8147 } else {
7d1b0095 8148 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8149 }
8150 if (load) {
8151 /* Complete the load. */
b0109805 8152 store_reg(s, rd, tmp);
9ee6e8bb
PB
8153 }
8154 }
8155 break;
8156 case 0x4:
8157 case 0x5:
8158 goto do_ldst;
8159 case 0x6:
8160 case 0x7:
8161 if (insn & (1 << 4)) {
8162 ARCH(6);
8163 /* Armv6 Media instructions. */
8164 rm = insn & 0xf;
8165 rn = (insn >> 16) & 0xf;
2c0262af 8166 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
8167 rs = (insn >> 8) & 0xf;
8168 switch ((insn >> 23) & 3) {
8169 case 0: /* Parallel add/subtract. */
8170 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
8171 tmp = load_reg(s, rn);
8172 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8173 sh = (insn >> 5) & 7;
8174 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8175 goto illegal_op;
6ddbc6e4 8176 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 8177 tcg_temp_free_i32(tmp2);
6ddbc6e4 8178 store_reg(s, rd, tmp);
9ee6e8bb
PB
8179 break;
8180 case 1:
8181 if ((insn & 0x00700020) == 0) {
6c95676b 8182 /* Halfword pack. */
3670669c
PB
8183 tmp = load_reg(s, rn);
8184 tmp2 = load_reg(s, rm);
9ee6e8bb 8185 shift = (insn >> 7) & 0x1f;
3670669c
PB
8186 if (insn & (1 << 6)) {
8187 /* pkhtb */
22478e79
AZ
8188 if (shift == 0)
8189 shift = 31;
8190 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 8191 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 8192 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
8193 } else {
8194 /* pkhbt */
22478e79
AZ
8195 if (shift)
8196 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 8197 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
8198 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8199 }
8200 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8201 tcg_temp_free_i32(tmp2);
3670669c 8202 store_reg(s, rd, tmp);
9ee6e8bb
PB
8203 } else if ((insn & 0x00200020) == 0x00200000) {
8204 /* [us]sat */
6ddbc6e4 8205 tmp = load_reg(s, rm);
9ee6e8bb
PB
8206 shift = (insn >> 7) & 0x1f;
8207 if (insn & (1 << 6)) {
8208 if (shift == 0)
8209 shift = 31;
6ddbc6e4 8210 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8211 } else {
6ddbc6e4 8212 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
8213 }
8214 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8215 tmp2 = tcg_const_i32(sh);
8216 if (insn & (1 << 22))
9ef39277 8217 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 8218 else
9ef39277 8219 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 8220 tcg_temp_free_i32(tmp2);
6ddbc6e4 8221 store_reg(s, rd, tmp);
9ee6e8bb
PB
8222 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8223 /* [us]sat16 */
6ddbc6e4 8224 tmp = load_reg(s, rm);
9ee6e8bb 8225 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8226 tmp2 = tcg_const_i32(sh);
8227 if (insn & (1 << 22))
9ef39277 8228 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8229 else
9ef39277 8230 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8231 tcg_temp_free_i32(tmp2);
6ddbc6e4 8232 store_reg(s, rd, tmp);
9ee6e8bb
PB
8233 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8234 /* Select bytes. */
6ddbc6e4
PB
8235 tmp = load_reg(s, rn);
8236 tmp2 = load_reg(s, rm);
7d1b0095 8237 tmp3 = tcg_temp_new_i32();
0ecb72a5 8238 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 8239 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8240 tcg_temp_free_i32(tmp3);
8241 tcg_temp_free_i32(tmp2);
6ddbc6e4 8242 store_reg(s, rd, tmp);
9ee6e8bb 8243 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 8244 tmp = load_reg(s, rm);
9ee6e8bb 8245 shift = (insn >> 10) & 3;
1301f322 8246 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8247 rotate, a shift is sufficient. */
8248 if (shift != 0)
f669df27 8249 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8250 op1 = (insn >> 20) & 7;
8251 switch (op1) {
5e3f878a
PB
8252 case 0: gen_sxtb16(tmp); break;
8253 case 2: gen_sxtb(tmp); break;
8254 case 3: gen_sxth(tmp); break;
8255 case 4: gen_uxtb16(tmp); break;
8256 case 6: gen_uxtb(tmp); break;
8257 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
8258 default: goto illegal_op;
8259 }
8260 if (rn != 15) {
5e3f878a 8261 tmp2 = load_reg(s, rn);
9ee6e8bb 8262 if ((op1 & 3) == 0) {
5e3f878a 8263 gen_add16(tmp, tmp2);
9ee6e8bb 8264 } else {
5e3f878a 8265 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8266 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8267 }
8268 }
6c95676b 8269 store_reg(s, rd, tmp);
9ee6e8bb
PB
8270 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
8271 /* rev */
b0109805 8272 tmp = load_reg(s, rm);
9ee6e8bb
PB
8273 if (insn & (1 << 22)) {
8274 if (insn & (1 << 7)) {
b0109805 8275 gen_revsh(tmp);
9ee6e8bb
PB
8276 } else {
8277 ARCH(6T2);
b0109805 8278 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8279 }
8280 } else {
8281 if (insn & (1 << 7))
b0109805 8282 gen_rev16(tmp);
9ee6e8bb 8283 else
66896cb8 8284 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 8285 }
b0109805 8286 store_reg(s, rd, tmp);
9ee6e8bb
PB
8287 } else {
8288 goto illegal_op;
8289 }
8290 break;
8291 case 2: /* Multiplies (Type 3). */
41e9564d
PM
8292 switch ((insn >> 20) & 0x7) {
8293 case 5:
8294 if (((insn >> 6) ^ (insn >> 7)) & 1) {
8295 /* op2 not 00x or 11x : UNDEF */
8296 goto illegal_op;
8297 }
838fa72d
AJ
8298 /* Signed multiply most significant [accumulate].
8299 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
8300 tmp = load_reg(s, rm);
8301 tmp2 = load_reg(s, rs);
a7812ae4 8302 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 8303
955a7dd5 8304 if (rd != 15) {
838fa72d 8305 tmp = load_reg(s, rd);
9ee6e8bb 8306 if (insn & (1 << 6)) {
838fa72d 8307 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 8308 } else {
838fa72d 8309 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
8310 }
8311 }
838fa72d
AJ
8312 if (insn & (1 << 5)) {
8313 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8314 }
8315 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8316 tmp = tcg_temp_new_i32();
838fa72d
AJ
8317 tcg_gen_trunc_i64_i32(tmp, tmp64);
8318 tcg_temp_free_i64(tmp64);
955a7dd5 8319 store_reg(s, rn, tmp);
41e9564d
PM
8320 break;
8321 case 0:
8322 case 4:
8323 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8324 if (insn & (1 << 7)) {
8325 goto illegal_op;
8326 }
8327 tmp = load_reg(s, rm);
8328 tmp2 = load_reg(s, rs);
9ee6e8bb 8329 if (insn & (1 << 5))
5e3f878a
PB
8330 gen_swap_half(tmp2);
8331 gen_smul_dual(tmp, tmp2);
5e3f878a 8332 if (insn & (1 << 6)) {
e1d177b9 8333 /* This subtraction cannot overflow. */
5e3f878a
PB
8334 tcg_gen_sub_i32(tmp, tmp, tmp2);
8335 } else {
e1d177b9
PM
8336 /* This addition cannot overflow 32 bits;
8337 * however it may overflow considered as a signed
8338 * operation, in which case we must set the Q flag.
8339 */
9ef39277 8340 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
5e3f878a 8341 }
7d1b0095 8342 tcg_temp_free_i32(tmp2);
9ee6e8bb 8343 if (insn & (1 << 22)) {
5e3f878a 8344 /* smlald, smlsld */
a7812ae4
PB
8345 tmp64 = tcg_temp_new_i64();
8346 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8347 tcg_temp_free_i32(tmp);
a7812ae4
PB
8348 gen_addq(s, tmp64, rd, rn);
8349 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 8350 tcg_temp_free_i64(tmp64);
9ee6e8bb 8351 } else {
5e3f878a 8352 /* smuad, smusd, smlad, smlsd */
22478e79 8353 if (rd != 15)
9ee6e8bb 8354 {
22478e79 8355 tmp2 = load_reg(s, rd);
9ef39277 8356 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8357 tcg_temp_free_i32(tmp2);
9ee6e8bb 8358 }
22478e79 8359 store_reg(s, rn, tmp);
9ee6e8bb 8360 }
41e9564d 8361 break;
b8b8ea05
PM
8362 case 1:
8363 case 3:
8364 /* SDIV, UDIV */
8365 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
8366 goto illegal_op;
8367 }
8368 if (((insn >> 5) & 7) || (rd != 15)) {
8369 goto illegal_op;
8370 }
8371 tmp = load_reg(s, rm);
8372 tmp2 = load_reg(s, rs);
8373 if (insn & (1 << 21)) {
8374 gen_helper_udiv(tmp, tmp, tmp2);
8375 } else {
8376 gen_helper_sdiv(tmp, tmp, tmp2);
8377 }
8378 tcg_temp_free_i32(tmp2);
8379 store_reg(s, rn, tmp);
8380 break;
41e9564d
PM
8381 default:
8382 goto illegal_op;
9ee6e8bb
PB
8383 }
8384 break;
8385 case 3:
8386 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
8387 switch (op1) {
8388 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
8389 ARCH(6);
8390 tmp = load_reg(s, rm);
8391 tmp2 = load_reg(s, rs);
8392 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8393 tcg_temp_free_i32(tmp2);
ded9d295
AZ
8394 if (rd != 15) {
8395 tmp2 = load_reg(s, rd);
6ddbc6e4 8396 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8397 tcg_temp_free_i32(tmp2);
9ee6e8bb 8398 }
ded9d295 8399 store_reg(s, rn, tmp);
9ee6e8bb
PB
8400 break;
8401 case 0x20: case 0x24: case 0x28: case 0x2c:
8402 /* Bitfield insert/clear. */
8403 ARCH(6T2);
8404 shift = (insn >> 7) & 0x1f;
8405 i = (insn >> 16) & 0x1f;
8406 i = i + 1 - shift;
8407 if (rm == 15) {
7d1b0095 8408 tmp = tcg_temp_new_i32();
5e3f878a 8409 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 8410 } else {
5e3f878a 8411 tmp = load_reg(s, rm);
9ee6e8bb
PB
8412 }
8413 if (i != 32) {
5e3f878a 8414 tmp2 = load_reg(s, rd);
d593c48e 8415 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 8416 tcg_temp_free_i32(tmp2);
9ee6e8bb 8417 }
5e3f878a 8418 store_reg(s, rd, tmp);
9ee6e8bb
PB
8419 break;
8420 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
8421 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 8422 ARCH(6T2);
5e3f878a 8423 tmp = load_reg(s, rm);
9ee6e8bb
PB
8424 shift = (insn >> 7) & 0x1f;
8425 i = ((insn >> 16) & 0x1f) + 1;
8426 if (shift + i > 32)
8427 goto illegal_op;
8428 if (i < 32) {
8429 if (op1 & 0x20) {
5e3f878a 8430 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 8431 } else {
5e3f878a 8432 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
8433 }
8434 }
5e3f878a 8435 store_reg(s, rd, tmp);
9ee6e8bb
PB
8436 break;
8437 default:
8438 goto illegal_op;
8439 }
8440 break;
8441 }
8442 break;
8443 }
8444 do_ldst:
8445 /* Check for undefined extension instructions
8446 * per the ARM Bible IE:
8447 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
8448 */
8449 sh = (0xf << 20) | (0xf << 4);
8450 if (op1 == 0x7 && ((insn & sh) == sh))
8451 {
8452 goto illegal_op;
8453 }
8454 /* load/store byte/word */
8455 rn = (insn >> 16) & 0xf;
8456 rd = (insn >> 12) & 0xf;
b0109805 8457 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
8458 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
8459 if (insn & (1 << 24))
b0109805 8460 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
8461 if (insn & (1 << 20)) {
8462 /* load */
5a839c0d 8463 tmp = tcg_temp_new_i32();
9ee6e8bb 8464 if (insn & (1 << 22)) {
08307563 8465 gen_aa32_ld8u(tmp, tmp2, i);
9ee6e8bb 8466 } else {
08307563 8467 gen_aa32_ld32u(tmp, tmp2, i);
9ee6e8bb 8468 }
9ee6e8bb
PB
8469 } else {
8470 /* store */
b0109805 8471 tmp = load_reg(s, rd);
5a839c0d 8472 if (insn & (1 << 22)) {
08307563 8473 gen_aa32_st8(tmp, tmp2, i);
5a839c0d 8474 } else {
08307563 8475 gen_aa32_st32(tmp, tmp2, i);
5a839c0d
PM
8476 }
8477 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8478 }
8479 if (!(insn & (1 << 24))) {
b0109805
PB
8480 gen_add_data_offset(s, insn, tmp2);
8481 store_reg(s, rn, tmp2);
8482 } else if (insn & (1 << 21)) {
8483 store_reg(s, rn, tmp2);
8484 } else {
7d1b0095 8485 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8486 }
8487 if (insn & (1 << 20)) {
8488 /* Complete the load. */
be5e7a76 8489 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
8490 }
8491 break;
8492 case 0x08:
8493 case 0x09:
8494 {
8495 int j, n, user, loaded_base;
39d5492a 8496 TCGv_i32 loaded_var;
9ee6e8bb
PB
8497 /* load/store multiple words */
8498 /* XXX: store correct base if write back */
8499 user = 0;
8500 if (insn & (1 << 22)) {
8501 if (IS_USER(s))
8502 goto illegal_op; /* only usable in supervisor mode */
8503
8504 if ((insn & (1 << 15)) == 0)
8505 user = 1;
8506 }
8507 rn = (insn >> 16) & 0xf;
b0109805 8508 addr = load_reg(s, rn);
9ee6e8bb
PB
8509
8510 /* compute total size */
8511 loaded_base = 0;
39d5492a 8512 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
8513 n = 0;
8514 for(i=0;i<16;i++) {
8515 if (insn & (1 << i))
8516 n++;
8517 }
8518 /* XXX: test invalid n == 0 case ? */
8519 if (insn & (1 << 23)) {
8520 if (insn & (1 << 24)) {
8521 /* pre increment */
b0109805 8522 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8523 } else {
8524 /* post increment */
8525 }
8526 } else {
8527 if (insn & (1 << 24)) {
8528 /* pre decrement */
b0109805 8529 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
8530 } else {
8531 /* post decrement */
8532 if (n != 1)
b0109805 8533 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
8534 }
8535 }
8536 j = 0;
8537 for(i=0;i<16;i++) {
8538 if (insn & (1 << i)) {
8539 if (insn & (1 << 20)) {
8540 /* load */
5a839c0d 8541 tmp = tcg_temp_new_i32();
08307563 8542 gen_aa32_ld32u(tmp, addr, IS_USER(s));
be5e7a76 8543 if (user) {
b75263d6 8544 tmp2 = tcg_const_i32(i);
1ce94f81 8545 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 8546 tcg_temp_free_i32(tmp2);
7d1b0095 8547 tcg_temp_free_i32(tmp);
9ee6e8bb 8548 } else if (i == rn) {
b0109805 8549 loaded_var = tmp;
9ee6e8bb
PB
8550 loaded_base = 1;
8551 } else {
be5e7a76 8552 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
8553 }
8554 } else {
8555 /* store */
8556 if (i == 15) {
8557 /* special case: r15 = PC + 8 */
8558 val = (long)s->pc + 4;
7d1b0095 8559 tmp = tcg_temp_new_i32();
b0109805 8560 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 8561 } else if (user) {
7d1b0095 8562 tmp = tcg_temp_new_i32();
b75263d6 8563 tmp2 = tcg_const_i32(i);
9ef39277 8564 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 8565 tcg_temp_free_i32(tmp2);
9ee6e8bb 8566 } else {
b0109805 8567 tmp = load_reg(s, i);
9ee6e8bb 8568 }
08307563 8569 gen_aa32_st32(tmp, addr, IS_USER(s));
5a839c0d 8570 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8571 }
8572 j++;
8573 /* no need to add after the last transfer */
8574 if (j != n)
b0109805 8575 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8576 }
8577 }
8578 if (insn & (1 << 21)) {
8579 /* write back */
8580 if (insn & (1 << 23)) {
8581 if (insn & (1 << 24)) {
8582 /* pre increment */
8583 } else {
8584 /* post increment */
b0109805 8585 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8586 }
8587 } else {
8588 if (insn & (1 << 24)) {
8589 /* pre decrement */
8590 if (n != 1)
b0109805 8591 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
8592 } else {
8593 /* post decrement */
b0109805 8594 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
8595 }
8596 }
b0109805
PB
8597 store_reg(s, rn, addr);
8598 } else {
7d1b0095 8599 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8600 }
8601 if (loaded_base) {
b0109805 8602 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
8603 }
8604 if ((insn & (1 << 22)) && !user) {
8605 /* Restore CPSR from SPSR. */
d9ba4830
PB
8606 tmp = load_cpu_field(spsr);
8607 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 8608 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8609 s->is_jmp = DISAS_UPDATE;
8610 }
8611 }
8612 break;
8613 case 0xa:
8614 case 0xb:
8615 {
8616 int32_t offset;
8617
8618 /* branch (and link) */
8619 val = (int32_t)s->pc;
8620 if (insn & (1 << 24)) {
7d1b0095 8621 tmp = tcg_temp_new_i32();
5e3f878a
PB
8622 tcg_gen_movi_i32(tmp, val);
8623 store_reg(s, 14, tmp);
9ee6e8bb 8624 }
534df156
PM
8625 offset = sextract32(insn << 2, 0, 26);
8626 val += offset + 4;
9ee6e8bb
PB
8627 gen_jmp(s, val);
8628 }
8629 break;
8630 case 0xc:
8631 case 0xd:
8632 case 0xe:
6a57f3eb
WN
8633 if (((insn >> 8) & 0xe) == 10) {
8634 /* VFP. */
8635 if (disas_vfp_insn(env, s, insn)) {
8636 goto illegal_op;
8637 }
8638 } else if (disas_coproc_insn(env, s, insn)) {
8639 /* Coprocessor. */
9ee6e8bb 8640 goto illegal_op;
6a57f3eb 8641 }
9ee6e8bb
PB
8642 break;
8643 case 0xf:
8644 /* swi */
eaed129d 8645 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
8646 s->is_jmp = DISAS_SWI;
8647 break;
8648 default:
8649 illegal_op:
bc4a0de0 8650 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
8651 break;
8652 }
8653 }
8654}
8655
8656/* Return true if this is a Thumb-2 logical op. */
8657static int
8658thumb2_logic_op(int op)
8659{
8660 return (op < 8);
8661}
8662
8663/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
8664 then set condition code flags based on the result of the operation.
8665 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
8666 to the high bit of T1.
8667 Returns zero if the opcode is valid. */
8668
8669static int
39d5492a
PM
8670gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
8671 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
8672{
8673 int logic_cc;
8674
8675 logic_cc = 0;
8676 switch (op) {
8677 case 0: /* and */
396e467c 8678 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
8679 logic_cc = conds;
8680 break;
8681 case 1: /* bic */
f669df27 8682 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
8683 logic_cc = conds;
8684 break;
8685 case 2: /* orr */
396e467c 8686 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
8687 logic_cc = conds;
8688 break;
8689 case 3: /* orn */
29501f1b 8690 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
8691 logic_cc = conds;
8692 break;
8693 case 4: /* eor */
396e467c 8694 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
8695 logic_cc = conds;
8696 break;
8697 case 8: /* add */
8698 if (conds)
72485ec4 8699 gen_add_CC(t0, t0, t1);
9ee6e8bb 8700 else
396e467c 8701 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
8702 break;
8703 case 10: /* adc */
8704 if (conds)
49b4c31e 8705 gen_adc_CC(t0, t0, t1);
9ee6e8bb 8706 else
396e467c 8707 gen_adc(t0, t1);
9ee6e8bb
PB
8708 break;
8709 case 11: /* sbc */
2de68a49
RH
8710 if (conds) {
8711 gen_sbc_CC(t0, t0, t1);
8712 } else {
396e467c 8713 gen_sub_carry(t0, t0, t1);
2de68a49 8714 }
9ee6e8bb
PB
8715 break;
8716 case 13: /* sub */
8717 if (conds)
72485ec4 8718 gen_sub_CC(t0, t0, t1);
9ee6e8bb 8719 else
396e467c 8720 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
8721 break;
8722 case 14: /* rsb */
8723 if (conds)
72485ec4 8724 gen_sub_CC(t0, t1, t0);
9ee6e8bb 8725 else
396e467c 8726 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
8727 break;
8728 default: /* 5, 6, 7, 9, 12, 15. */
8729 return 1;
8730 }
8731 if (logic_cc) {
396e467c 8732 gen_logic_CC(t0);
9ee6e8bb 8733 if (shifter_out)
396e467c 8734 gen_set_CF_bit31(t1);
9ee6e8bb
PB
8735 }
8736 return 0;
8737}
8738
8739/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
8740 is not legal. */
0ecb72a5 8741static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 8742{
b0109805 8743 uint32_t insn, imm, shift, offset;
9ee6e8bb 8744 uint32_t rd, rn, rm, rs;
39d5492a
PM
8745 TCGv_i32 tmp;
8746 TCGv_i32 tmp2;
8747 TCGv_i32 tmp3;
8748 TCGv_i32 addr;
a7812ae4 8749 TCGv_i64 tmp64;
9ee6e8bb
PB
8750 int op;
8751 int shiftop;
8752 int conds;
8753 int logic_cc;
8754
8755 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
8756 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 8757 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
8758 16-bit instructions to get correct prefetch abort behavior. */
8759 insn = insn_hw1;
8760 if ((insn & (1 << 12)) == 0) {
be5e7a76 8761 ARCH(5);
9ee6e8bb
PB
8762 /* Second half of blx. */
8763 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
8764 tmp = load_reg(s, 14);
8765 tcg_gen_addi_i32(tmp, tmp, offset);
8766 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 8767
7d1b0095 8768 tmp2 = tcg_temp_new_i32();
b0109805 8769 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8770 store_reg(s, 14, tmp2);
8771 gen_bx(s, tmp);
9ee6e8bb
PB
8772 return 0;
8773 }
8774 if (insn & (1 << 11)) {
8775 /* Second half of bl. */
8776 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 8777 tmp = load_reg(s, 14);
6a0d8a1d 8778 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 8779
7d1b0095 8780 tmp2 = tcg_temp_new_i32();
b0109805 8781 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8782 store_reg(s, 14, tmp2);
8783 gen_bx(s, tmp);
9ee6e8bb
PB
8784 return 0;
8785 }
8786 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
8787 /* Instruction spans a page boundary. Implement it as two
8788 16-bit instructions in case the second half causes an
8789 prefetch abort. */
8790 offset = ((int32_t)insn << 21) >> 9;
396e467c 8791 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
8792 return 0;
8793 }
8794 /* Fall through to 32-bit decode. */
8795 }
8796
d31dd73e 8797 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
8798 s->pc += 2;
8799 insn |= (uint32_t)insn_hw1 << 16;
8800
8801 if ((insn & 0xf800e800) != 0xf000e800) {
8802 ARCH(6T2);
8803 }
8804
8805 rn = (insn >> 16) & 0xf;
8806 rs = (insn >> 12) & 0xf;
8807 rd = (insn >> 8) & 0xf;
8808 rm = insn & 0xf;
8809 switch ((insn >> 25) & 0xf) {
8810 case 0: case 1: case 2: case 3:
8811 /* 16-bit instructions. Should never happen. */
8812 abort();
8813 case 4:
8814 if (insn & (1 << 22)) {
8815 /* Other load/store, table branch. */
8816 if (insn & 0x01200000) {
8817 /* Load/store doubleword. */
8818 if (rn == 15) {
7d1b0095 8819 addr = tcg_temp_new_i32();
b0109805 8820 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 8821 } else {
b0109805 8822 addr = load_reg(s, rn);
9ee6e8bb
PB
8823 }
8824 offset = (insn & 0xff) * 4;
8825 if ((insn & (1 << 23)) == 0)
8826 offset = -offset;
8827 if (insn & (1 << 24)) {
b0109805 8828 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
8829 offset = 0;
8830 }
8831 if (insn & (1 << 20)) {
8832 /* ldrd */
e2592fad 8833 tmp = tcg_temp_new_i32();
08307563 8834 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805
PB
8835 store_reg(s, rs, tmp);
8836 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 8837 tmp = tcg_temp_new_i32();
08307563 8838 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 8839 store_reg(s, rd, tmp);
9ee6e8bb
PB
8840 } else {
8841 /* strd */
b0109805 8842 tmp = load_reg(s, rs);
08307563 8843 gen_aa32_st32(tmp, addr, IS_USER(s));
e2592fad 8844 tcg_temp_free_i32(tmp);
b0109805
PB
8845 tcg_gen_addi_i32(addr, addr, 4);
8846 tmp = load_reg(s, rd);
08307563 8847 gen_aa32_st32(tmp, addr, IS_USER(s));
e2592fad 8848 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8849 }
8850 if (insn & (1 << 21)) {
8851 /* Base writeback. */
8852 if (rn == 15)
8853 goto illegal_op;
b0109805
PB
8854 tcg_gen_addi_i32(addr, addr, offset - 4);
8855 store_reg(s, rn, addr);
8856 } else {
7d1b0095 8857 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8858 }
8859 } else if ((insn & (1 << 23)) == 0) {
8860 /* Load/store exclusive word. */
39d5492a 8861 addr = tcg_temp_local_new_i32();
98a46317 8862 load_reg_var(s, addr, rn);
426f5abc 8863 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 8864 if (insn & (1 << 20)) {
426f5abc 8865 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 8866 } else {
426f5abc 8867 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 8868 }
39d5492a 8869 tcg_temp_free_i32(addr);
2359bf80 8870 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
8871 /* Table Branch. */
8872 if (rn == 15) {
7d1b0095 8873 addr = tcg_temp_new_i32();
b0109805 8874 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 8875 } else {
b0109805 8876 addr = load_reg(s, rn);
9ee6e8bb 8877 }
b26eefb6 8878 tmp = load_reg(s, rm);
b0109805 8879 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
8880 if (insn & (1 << 4)) {
8881 /* tbh */
b0109805 8882 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8883 tcg_temp_free_i32(tmp);
e2592fad 8884 tmp = tcg_temp_new_i32();
08307563 8885 gen_aa32_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb 8886 } else { /* tbb */
7d1b0095 8887 tcg_temp_free_i32(tmp);
e2592fad 8888 tmp = tcg_temp_new_i32();
08307563 8889 gen_aa32_ld8u(tmp, addr, IS_USER(s));
9ee6e8bb 8890 }
7d1b0095 8891 tcg_temp_free_i32(addr);
b0109805
PB
8892 tcg_gen_shli_i32(tmp, tmp, 1);
8893 tcg_gen_addi_i32(tmp, tmp, s->pc);
8894 store_reg(s, 15, tmp);
9ee6e8bb 8895 } else {
2359bf80 8896 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 8897 op = (insn >> 4) & 0x3;
2359bf80
MR
8898 switch (op2) {
8899 case 0:
426f5abc 8900 goto illegal_op;
2359bf80
MR
8901 case 1:
8902 /* Load/store exclusive byte/halfword/doubleword */
8903 if (op == 2) {
8904 goto illegal_op;
8905 }
8906 ARCH(7);
8907 break;
8908 case 2:
8909 /* Load-acquire/store-release */
8910 if (op == 3) {
8911 goto illegal_op;
8912 }
8913 /* Fall through */
8914 case 3:
8915 /* Load-acquire/store-release exclusive */
8916 ARCH(8);
8917 break;
426f5abc 8918 }
39d5492a 8919 addr = tcg_temp_local_new_i32();
98a46317 8920 load_reg_var(s, addr, rn);
2359bf80
MR
8921 if (!(op2 & 1)) {
8922 if (insn & (1 << 20)) {
8923 tmp = tcg_temp_new_i32();
8924 switch (op) {
8925 case 0: /* ldab */
08307563 8926 gen_aa32_ld8u(tmp, addr, IS_USER(s));
2359bf80
MR
8927 break;
8928 case 1: /* ldah */
08307563 8929 gen_aa32_ld16u(tmp, addr, IS_USER(s));
2359bf80
MR
8930 break;
8931 case 2: /* lda */
08307563 8932 gen_aa32_ld32u(tmp, addr, IS_USER(s));
2359bf80
MR
8933 break;
8934 default:
8935 abort();
8936 }
8937 store_reg(s, rs, tmp);
8938 } else {
8939 tmp = load_reg(s, rs);
8940 switch (op) {
8941 case 0: /* stlb */
08307563 8942 gen_aa32_st8(tmp, addr, IS_USER(s));
2359bf80
MR
8943 break;
8944 case 1: /* stlh */
08307563 8945 gen_aa32_st16(tmp, addr, IS_USER(s));
2359bf80
MR
8946 break;
8947 case 2: /* stl */
08307563 8948 gen_aa32_st32(tmp, addr, IS_USER(s));
2359bf80
MR
8949 break;
8950 default:
8951 abort();
8952 }
8953 tcg_temp_free_i32(tmp);
8954 }
8955 } else if (insn & (1 << 20)) {
426f5abc 8956 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 8957 } else {
426f5abc 8958 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 8959 }
39d5492a 8960 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8961 }
8962 } else {
8963 /* Load/store multiple, RFE, SRS. */
8964 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976
PM
8965 /* RFE, SRS: not available in user mode or on M profile */
8966 if (IS_USER(s) || IS_M(env)) {
9ee6e8bb 8967 goto illegal_op;
00115976 8968 }
9ee6e8bb
PB
8969 if (insn & (1 << 20)) {
8970 /* rfe */
b0109805
PB
8971 addr = load_reg(s, rn);
8972 if ((insn & (1 << 24)) == 0)
8973 tcg_gen_addi_i32(addr, addr, -8);
8974 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 8975 tmp = tcg_temp_new_i32();
08307563 8976 gen_aa32_ld32u(tmp, addr, 0);
b0109805 8977 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 8978 tmp2 = tcg_temp_new_i32();
08307563 8979 gen_aa32_ld32u(tmp2, addr, 0);
9ee6e8bb
PB
8980 if (insn & (1 << 21)) {
8981 /* Base writeback. */
b0109805
PB
8982 if (insn & (1 << 24)) {
8983 tcg_gen_addi_i32(addr, addr, 4);
8984 } else {
8985 tcg_gen_addi_i32(addr, addr, -4);
8986 }
8987 store_reg(s, rn, addr);
8988 } else {
7d1b0095 8989 tcg_temp_free_i32(addr);
9ee6e8bb 8990 }
b0109805 8991 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
8992 } else {
8993 /* srs */
81465888
PM
8994 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
8995 insn & (1 << 21));
9ee6e8bb
PB
8996 }
8997 } else {
5856d44e 8998 int i, loaded_base = 0;
39d5492a 8999 TCGv_i32 loaded_var;
9ee6e8bb 9000 /* Load/store multiple. */
b0109805 9001 addr = load_reg(s, rn);
9ee6e8bb
PB
9002 offset = 0;
9003 for (i = 0; i < 16; i++) {
9004 if (insn & (1 << i))
9005 offset += 4;
9006 }
9007 if (insn & (1 << 24)) {
b0109805 9008 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9009 }
9010
39d5492a 9011 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9012 for (i = 0; i < 16; i++) {
9013 if ((insn & (1 << i)) == 0)
9014 continue;
9015 if (insn & (1 << 20)) {
9016 /* Load. */
e2592fad 9017 tmp = tcg_temp_new_i32();
08307563 9018 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb 9019 if (i == 15) {
b0109805 9020 gen_bx(s, tmp);
5856d44e
YO
9021 } else if (i == rn) {
9022 loaded_var = tmp;
9023 loaded_base = 1;
9ee6e8bb 9024 } else {
b0109805 9025 store_reg(s, i, tmp);
9ee6e8bb
PB
9026 }
9027 } else {
9028 /* Store. */
b0109805 9029 tmp = load_reg(s, i);
08307563 9030 gen_aa32_st32(tmp, addr, IS_USER(s));
e2592fad 9031 tcg_temp_free_i32(tmp);
9ee6e8bb 9032 }
b0109805 9033 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 9034 }
5856d44e
YO
9035 if (loaded_base) {
9036 store_reg(s, rn, loaded_var);
9037 }
9ee6e8bb
PB
9038 if (insn & (1 << 21)) {
9039 /* Base register writeback. */
9040 if (insn & (1 << 24)) {
b0109805 9041 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9042 }
9043 /* Fault if writeback register is in register list. */
9044 if (insn & (1 << rn))
9045 goto illegal_op;
b0109805
PB
9046 store_reg(s, rn, addr);
9047 } else {
7d1b0095 9048 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9049 }
9050 }
9051 }
9052 break;
2af9ab77
JB
9053 case 5:
9054
9ee6e8bb 9055 op = (insn >> 21) & 0xf;
2af9ab77
JB
9056 if (op == 6) {
9057 /* Halfword pack. */
9058 tmp = load_reg(s, rn);
9059 tmp2 = load_reg(s, rm);
9060 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9061 if (insn & (1 << 5)) {
9062 /* pkhtb */
9063 if (shift == 0)
9064 shift = 31;
9065 tcg_gen_sari_i32(tmp2, tmp2, shift);
9066 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9067 tcg_gen_ext16u_i32(tmp2, tmp2);
9068 } else {
9069 /* pkhbt */
9070 if (shift)
9071 tcg_gen_shli_i32(tmp2, tmp2, shift);
9072 tcg_gen_ext16u_i32(tmp, tmp);
9073 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9074 }
9075 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9076 tcg_temp_free_i32(tmp2);
3174f8e9
FN
9077 store_reg(s, rd, tmp);
9078 } else {
2af9ab77
JB
9079 /* Data processing register constant shift. */
9080 if (rn == 15) {
7d1b0095 9081 tmp = tcg_temp_new_i32();
2af9ab77
JB
9082 tcg_gen_movi_i32(tmp, 0);
9083 } else {
9084 tmp = load_reg(s, rn);
9085 }
9086 tmp2 = load_reg(s, rm);
9087
9088 shiftop = (insn >> 4) & 3;
9089 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9090 conds = (insn & (1 << 20)) != 0;
9091 logic_cc = (conds && thumb2_logic_op(op));
9092 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9093 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9094 goto illegal_op;
7d1b0095 9095 tcg_temp_free_i32(tmp2);
2af9ab77
JB
9096 if (rd != 15) {
9097 store_reg(s, rd, tmp);
9098 } else {
7d1b0095 9099 tcg_temp_free_i32(tmp);
2af9ab77 9100 }
3174f8e9 9101 }
9ee6e8bb
PB
9102 break;
9103 case 13: /* Misc data processing. */
9104 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9105 if (op < 4 && (insn & 0xf000) != 0xf000)
9106 goto illegal_op;
9107 switch (op) {
9108 case 0: /* Register controlled shift. */
8984bd2e
PB
9109 tmp = load_reg(s, rn);
9110 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9111 if ((insn & 0x70) != 0)
9112 goto illegal_op;
9113 op = (insn >> 21) & 3;
8984bd2e
PB
9114 logic_cc = (insn & (1 << 20)) != 0;
9115 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9116 if (logic_cc)
9117 gen_logic_CC(tmp);
21aeb343 9118 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
9119 break;
9120 case 1: /* Sign/zero extend. */
5e3f878a 9121 tmp = load_reg(s, rm);
9ee6e8bb 9122 shift = (insn >> 4) & 3;
1301f322 9123 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9124 rotate, a shift is sufficient. */
9125 if (shift != 0)
f669df27 9126 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9127 op = (insn >> 20) & 7;
9128 switch (op) {
5e3f878a
PB
9129 case 0: gen_sxth(tmp); break;
9130 case 1: gen_uxth(tmp); break;
9131 case 2: gen_sxtb16(tmp); break;
9132 case 3: gen_uxtb16(tmp); break;
9133 case 4: gen_sxtb(tmp); break;
9134 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
9135 default: goto illegal_op;
9136 }
9137 if (rn != 15) {
5e3f878a 9138 tmp2 = load_reg(s, rn);
9ee6e8bb 9139 if ((op >> 1) == 1) {
5e3f878a 9140 gen_add16(tmp, tmp2);
9ee6e8bb 9141 } else {
5e3f878a 9142 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9143 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9144 }
9145 }
5e3f878a 9146 store_reg(s, rd, tmp);
9ee6e8bb
PB
9147 break;
9148 case 2: /* SIMD add/subtract. */
9149 op = (insn >> 20) & 7;
9150 shift = (insn >> 4) & 7;
9151 if ((op & 3) == 3 || (shift & 3) == 3)
9152 goto illegal_op;
6ddbc6e4
PB
9153 tmp = load_reg(s, rn);
9154 tmp2 = load_reg(s, rm);
9155 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 9156 tcg_temp_free_i32(tmp2);
6ddbc6e4 9157 store_reg(s, rd, tmp);
9ee6e8bb
PB
9158 break;
9159 case 3: /* Other data processing. */
9160 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9161 if (op < 4) {
9162 /* Saturating add/subtract. */
d9ba4830
PB
9163 tmp = load_reg(s, rn);
9164 tmp2 = load_reg(s, rm);
9ee6e8bb 9165 if (op & 1)
9ef39277 9166 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 9167 if (op & 2)
9ef39277 9168 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 9169 else
9ef39277 9170 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 9171 tcg_temp_free_i32(tmp2);
9ee6e8bb 9172 } else {
d9ba4830 9173 tmp = load_reg(s, rn);
9ee6e8bb
PB
9174 switch (op) {
9175 case 0x0a: /* rbit */
d9ba4830 9176 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9177 break;
9178 case 0x08: /* rev */
66896cb8 9179 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
9180 break;
9181 case 0x09: /* rev16 */
d9ba4830 9182 gen_rev16(tmp);
9ee6e8bb
PB
9183 break;
9184 case 0x0b: /* revsh */
d9ba4830 9185 gen_revsh(tmp);
9ee6e8bb
PB
9186 break;
9187 case 0x10: /* sel */
d9ba4830 9188 tmp2 = load_reg(s, rm);
7d1b0095 9189 tmp3 = tcg_temp_new_i32();
0ecb72a5 9190 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 9191 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9192 tcg_temp_free_i32(tmp3);
9193 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9194 break;
9195 case 0x18: /* clz */
d9ba4830 9196 gen_helper_clz(tmp, tmp);
9ee6e8bb 9197 break;
eb0ecd5a
WN
9198 case 0x20:
9199 case 0x21:
9200 case 0x22:
9201 case 0x28:
9202 case 0x29:
9203 case 0x2a:
9204 {
9205 /* crc32/crc32c */
9206 uint32_t sz = op & 0x3;
9207 uint32_t c = op & 0x8;
9208
9209 if (!arm_feature(env, ARM_FEATURE_CRC)) {
9210 goto illegal_op;
9211 }
9212
9213 tmp2 = load_reg(s, rm);
9214 tmp3 = tcg_const_i32(1 << sz);
9215 if (c) {
9216 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
9217 } else {
9218 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
9219 }
9220 tcg_temp_free_i32(tmp2);
9221 tcg_temp_free_i32(tmp3);
9222 break;
9223 }
9ee6e8bb
PB
9224 default:
9225 goto illegal_op;
9226 }
9227 }
d9ba4830 9228 store_reg(s, rd, tmp);
9ee6e8bb
PB
9229 break;
9230 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
9231 op = (insn >> 4) & 0xf;
d9ba4830
PB
9232 tmp = load_reg(s, rn);
9233 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9234 switch ((insn >> 20) & 7) {
9235 case 0: /* 32 x 32 -> 32 */
d9ba4830 9236 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 9237 tcg_temp_free_i32(tmp2);
9ee6e8bb 9238 if (rs != 15) {
d9ba4830 9239 tmp2 = load_reg(s, rs);
9ee6e8bb 9240 if (op)
d9ba4830 9241 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 9242 else
d9ba4830 9243 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9244 tcg_temp_free_i32(tmp2);
9ee6e8bb 9245 }
9ee6e8bb
PB
9246 break;
9247 case 1: /* 16 x 16 -> 32 */
d9ba4830 9248 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 9249 tcg_temp_free_i32(tmp2);
9ee6e8bb 9250 if (rs != 15) {
d9ba4830 9251 tmp2 = load_reg(s, rs);
9ef39277 9252 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9253 tcg_temp_free_i32(tmp2);
9ee6e8bb 9254 }
9ee6e8bb
PB
9255 break;
9256 case 2: /* Dual multiply add. */
9257 case 4: /* Dual multiply subtract. */
9258 if (op)
d9ba4830
PB
9259 gen_swap_half(tmp2);
9260 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9261 if (insn & (1 << 22)) {
e1d177b9 9262 /* This subtraction cannot overflow. */
d9ba4830 9263 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9264 } else {
e1d177b9
PM
9265 /* This addition cannot overflow 32 bits;
9266 * however it may overflow considered as a signed
9267 * operation, in which case we must set the Q flag.
9268 */
9ef39277 9269 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9270 }
7d1b0095 9271 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9272 if (rs != 15)
9273 {
d9ba4830 9274 tmp2 = load_reg(s, rs);
9ef39277 9275 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9276 tcg_temp_free_i32(tmp2);
9ee6e8bb 9277 }
9ee6e8bb
PB
9278 break;
9279 case 3: /* 32 * 16 -> 32msb */
9280 if (op)
d9ba4830 9281 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 9282 else
d9ba4830 9283 gen_sxth(tmp2);
a7812ae4
PB
9284 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9285 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 9286 tmp = tcg_temp_new_i32();
a7812ae4 9287 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 9288 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9289 if (rs != 15)
9290 {
d9ba4830 9291 tmp2 = load_reg(s, rs);
9ef39277 9292 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9293 tcg_temp_free_i32(tmp2);
9ee6e8bb 9294 }
9ee6e8bb 9295 break;
838fa72d
AJ
9296 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9297 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 9298 if (rs != 15) {
838fa72d
AJ
9299 tmp = load_reg(s, rs);
9300 if (insn & (1 << 20)) {
9301 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 9302 } else {
838fa72d 9303 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 9304 }
2c0262af 9305 }
838fa72d
AJ
9306 if (insn & (1 << 4)) {
9307 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9308 }
9309 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9310 tmp = tcg_temp_new_i32();
838fa72d
AJ
9311 tcg_gen_trunc_i64_i32(tmp, tmp64);
9312 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9313 break;
9314 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 9315 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9316 tcg_temp_free_i32(tmp2);
9ee6e8bb 9317 if (rs != 15) {
d9ba4830
PB
9318 tmp2 = load_reg(s, rs);
9319 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9320 tcg_temp_free_i32(tmp2);
5fd46862 9321 }
9ee6e8bb 9322 break;
2c0262af 9323 }
d9ba4830 9324 store_reg(s, rd, tmp);
2c0262af 9325 break;
9ee6e8bb
PB
9326 case 6: case 7: /* 64-bit multiply, Divide. */
9327 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
9328 tmp = load_reg(s, rn);
9329 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9330 if ((op & 0x50) == 0x10) {
9331 /* sdiv, udiv */
47789990 9332 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 9333 goto illegal_op;
47789990 9334 }
9ee6e8bb 9335 if (op & 0x20)
5e3f878a 9336 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 9337 else
5e3f878a 9338 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 9339 tcg_temp_free_i32(tmp2);
5e3f878a 9340 store_reg(s, rd, tmp);
9ee6e8bb
PB
9341 } else if ((op & 0xe) == 0xc) {
9342 /* Dual multiply accumulate long. */
9343 if (op & 1)
5e3f878a
PB
9344 gen_swap_half(tmp2);
9345 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9346 if (op & 0x10) {
5e3f878a 9347 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 9348 } else {
5e3f878a 9349 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 9350 }
7d1b0095 9351 tcg_temp_free_i32(tmp2);
a7812ae4
PB
9352 /* BUGFIX */
9353 tmp64 = tcg_temp_new_i64();
9354 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9355 tcg_temp_free_i32(tmp);
a7812ae4
PB
9356 gen_addq(s, tmp64, rs, rd);
9357 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 9358 tcg_temp_free_i64(tmp64);
2c0262af 9359 } else {
9ee6e8bb
PB
9360 if (op & 0x20) {
9361 /* Unsigned 64-bit multiply */
a7812ae4 9362 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 9363 } else {
9ee6e8bb
PB
9364 if (op & 8) {
9365 /* smlalxy */
5e3f878a 9366 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 9367 tcg_temp_free_i32(tmp2);
a7812ae4
PB
9368 tmp64 = tcg_temp_new_i64();
9369 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9370 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9371 } else {
9372 /* Signed 64-bit multiply */
a7812ae4 9373 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 9374 }
b5ff1b31 9375 }
9ee6e8bb
PB
9376 if (op & 4) {
9377 /* umaal */
a7812ae4
PB
9378 gen_addq_lo(s, tmp64, rs);
9379 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
9380 } else if (op & 0x40) {
9381 /* 64-bit accumulate. */
a7812ae4 9382 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 9383 }
a7812ae4 9384 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 9385 tcg_temp_free_i64(tmp64);
5fd46862 9386 }
2c0262af 9387 break;
9ee6e8bb
PB
9388 }
9389 break;
9390 case 6: case 7: case 14: case 15:
9391 /* Coprocessor. */
9392 if (((insn >> 24) & 3) == 3) {
9393 /* Translate into the equivalent ARM encoding. */
f06053e3 9394 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
9395 if (disas_neon_data_insn(env, s, insn))
9396 goto illegal_op;
6a57f3eb
WN
9397 } else if (((insn >> 8) & 0xe) == 10) {
9398 if (disas_vfp_insn(env, s, insn)) {
9399 goto illegal_op;
9400 }
9ee6e8bb
PB
9401 } else {
9402 if (insn & (1 << 28))
9403 goto illegal_op;
9404 if (disas_coproc_insn (env, s, insn))
9405 goto illegal_op;
9406 }
9407 break;
9408 case 8: case 9: case 10: case 11:
9409 if (insn & (1 << 15)) {
9410 /* Branches, misc control. */
9411 if (insn & 0x5000) {
9412 /* Unconditional branch. */
9413 /* signextend(hw1[10:0]) -> offset[:12]. */
9414 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
9415 /* hw1[10:0] -> offset[11:1]. */
9416 offset |= (insn & 0x7ff) << 1;
9417 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
9418 offset[24:22] already have the same value because of the
9419 sign extension above. */
9420 offset ^= ((~insn) & (1 << 13)) << 10;
9421 offset ^= ((~insn) & (1 << 11)) << 11;
9422
9ee6e8bb
PB
9423 if (insn & (1 << 14)) {
9424 /* Branch and link. */
3174f8e9 9425 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 9426 }
3b46e624 9427
b0109805 9428 offset += s->pc;
9ee6e8bb
PB
9429 if (insn & (1 << 12)) {
9430 /* b/bl */
b0109805 9431 gen_jmp(s, offset);
9ee6e8bb
PB
9432 } else {
9433 /* blx */
b0109805 9434 offset &= ~(uint32_t)2;
be5e7a76 9435 /* thumb2 bx, no need to check */
b0109805 9436 gen_bx_im(s, offset);
2c0262af 9437 }
9ee6e8bb
PB
9438 } else if (((insn >> 23) & 7) == 7) {
9439 /* Misc control */
9440 if (insn & (1 << 13))
9441 goto illegal_op;
9442
9443 if (insn & (1 << 26)) {
9444 /* Secure monitor call (v6Z) */
e0c270d9
SW
9445 qemu_log_mask(LOG_UNIMP,
9446 "arm: unimplemented secure monitor call\n");
9ee6e8bb 9447 goto illegal_op; /* not implemented. */
2c0262af 9448 } else {
9ee6e8bb
PB
9449 op = (insn >> 20) & 7;
9450 switch (op) {
9451 case 0: /* msr cpsr. */
9452 if (IS_M(env)) {
8984bd2e
PB
9453 tmp = load_reg(s, rn);
9454 addr = tcg_const_i32(insn & 0xff);
9455 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9456 tcg_temp_free_i32(addr);
7d1b0095 9457 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9458 gen_lookup_tb(s);
9459 break;
9460 }
9461 /* fall through */
9462 case 1: /* msr spsr. */
9463 if (IS_M(env))
9464 goto illegal_op;
2fbac54b
FN
9465 tmp = load_reg(s, rn);
9466 if (gen_set_psr(s,
9ee6e8bb 9467 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 9468 op == 1, tmp))
9ee6e8bb
PB
9469 goto illegal_op;
9470 break;
9471 case 2: /* cps, nop-hint. */
9472 if (((insn >> 8) & 7) == 0) {
9473 gen_nop_hint(s, insn & 0xff);
9474 }
9475 /* Implemented as NOP in user mode. */
9476 if (IS_USER(s))
9477 break;
9478 offset = 0;
9479 imm = 0;
9480 if (insn & (1 << 10)) {
9481 if (insn & (1 << 7))
9482 offset |= CPSR_A;
9483 if (insn & (1 << 6))
9484 offset |= CPSR_I;
9485 if (insn & (1 << 5))
9486 offset |= CPSR_F;
9487 if (insn & (1 << 9))
9488 imm = CPSR_A | CPSR_I | CPSR_F;
9489 }
9490 if (insn & (1 << 8)) {
9491 offset |= 0x1f;
9492 imm |= (insn & 0x1f);
9493 }
9494 if (offset) {
2fbac54b 9495 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
9496 }
9497 break;
9498 case 3: /* Special control operations. */
426f5abc 9499 ARCH(7);
9ee6e8bb
PB
9500 op = (insn >> 4) & 0xf;
9501 switch (op) {
9502 case 2: /* clrex */
426f5abc 9503 gen_clrex(s);
9ee6e8bb
PB
9504 break;
9505 case 4: /* dsb */
9506 case 5: /* dmb */
9507 case 6: /* isb */
9508 /* These execute as NOPs. */
9ee6e8bb
PB
9509 break;
9510 default:
9511 goto illegal_op;
9512 }
9513 break;
9514 case 4: /* bxj */
9515 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
9516 tmp = load_reg(s, rn);
9517 gen_bx(s, tmp);
9ee6e8bb
PB
9518 break;
9519 case 5: /* Exception return. */
b8b45b68
RV
9520 if (IS_USER(s)) {
9521 goto illegal_op;
9522 }
9523 if (rn != 14 || rd != 15) {
9524 goto illegal_op;
9525 }
9526 tmp = load_reg(s, rn);
9527 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
9528 gen_exception_return(s, tmp);
9529 break;
9ee6e8bb 9530 case 6: /* mrs cpsr. */
7d1b0095 9531 tmp = tcg_temp_new_i32();
9ee6e8bb 9532 if (IS_M(env)) {
8984bd2e
PB
9533 addr = tcg_const_i32(insn & 0xff);
9534 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 9535 tcg_temp_free_i32(addr);
9ee6e8bb 9536 } else {
9ef39277 9537 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 9538 }
8984bd2e 9539 store_reg(s, rd, tmp);
9ee6e8bb
PB
9540 break;
9541 case 7: /* mrs spsr. */
9542 /* Not accessible in user mode. */
9543 if (IS_USER(s) || IS_M(env))
9544 goto illegal_op;
d9ba4830
PB
9545 tmp = load_cpu_field(spsr);
9546 store_reg(s, rd, tmp);
9ee6e8bb 9547 break;
2c0262af
FB
9548 }
9549 }
9ee6e8bb
PB
9550 } else {
9551 /* Conditional branch. */
9552 op = (insn >> 22) & 0xf;
9553 /* Generate a conditional jump to next instruction. */
9554 s->condlabel = gen_new_label();
39fb730a 9555 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
9556 s->condjmp = 1;
9557
9558 /* offset[11:1] = insn[10:0] */
9559 offset = (insn & 0x7ff) << 1;
9560 /* offset[17:12] = insn[21:16]. */
9561 offset |= (insn & 0x003f0000) >> 4;
9562 /* offset[31:20] = insn[26]. */
9563 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
9564 /* offset[18] = insn[13]. */
9565 offset |= (insn & (1 << 13)) << 5;
9566 /* offset[19] = insn[11]. */
9567 offset |= (insn & (1 << 11)) << 8;
9568
9569 /* jump to the offset */
b0109805 9570 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
9571 }
9572 } else {
9573 /* Data processing immediate. */
9574 if (insn & (1 << 25)) {
9575 if (insn & (1 << 24)) {
9576 if (insn & (1 << 20))
9577 goto illegal_op;
9578 /* Bitfield/Saturate. */
9579 op = (insn >> 21) & 7;
9580 imm = insn & 0x1f;
9581 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 9582 if (rn == 15) {
7d1b0095 9583 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
9584 tcg_gen_movi_i32(tmp, 0);
9585 } else {
9586 tmp = load_reg(s, rn);
9587 }
9ee6e8bb
PB
9588 switch (op) {
9589 case 2: /* Signed bitfield extract. */
9590 imm++;
9591 if (shift + imm > 32)
9592 goto illegal_op;
9593 if (imm < 32)
6ddbc6e4 9594 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
9595 break;
9596 case 6: /* Unsigned bitfield extract. */
9597 imm++;
9598 if (shift + imm > 32)
9599 goto illegal_op;
9600 if (imm < 32)
6ddbc6e4 9601 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
9602 break;
9603 case 3: /* Bitfield insert/clear. */
9604 if (imm < shift)
9605 goto illegal_op;
9606 imm = imm + 1 - shift;
9607 if (imm != 32) {
6ddbc6e4 9608 tmp2 = load_reg(s, rd);
d593c48e 9609 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 9610 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9611 }
9612 break;
9613 case 7:
9614 goto illegal_op;
9615 default: /* Saturate. */
9ee6e8bb
PB
9616 if (shift) {
9617 if (op & 1)
6ddbc6e4 9618 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9619 else
6ddbc6e4 9620 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 9621 }
6ddbc6e4 9622 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
9623 if (op & 4) {
9624 /* Unsigned. */
9ee6e8bb 9625 if ((op & 1) && shift == 0)
9ef39277 9626 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9627 else
9ef39277 9628 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
2c0262af 9629 } else {
9ee6e8bb 9630 /* Signed. */
9ee6e8bb 9631 if ((op & 1) && shift == 0)
9ef39277 9632 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9633 else
9ef39277 9634 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
2c0262af 9635 }
b75263d6 9636 tcg_temp_free_i32(tmp2);
9ee6e8bb 9637 break;
2c0262af 9638 }
6ddbc6e4 9639 store_reg(s, rd, tmp);
9ee6e8bb
PB
9640 } else {
9641 imm = ((insn & 0x04000000) >> 15)
9642 | ((insn & 0x7000) >> 4) | (insn & 0xff);
9643 if (insn & (1 << 22)) {
9644 /* 16-bit immediate. */
9645 imm |= (insn >> 4) & 0xf000;
9646 if (insn & (1 << 23)) {
9647 /* movt */
5e3f878a 9648 tmp = load_reg(s, rd);
86831435 9649 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 9650 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 9651 } else {
9ee6e8bb 9652 /* movw */
7d1b0095 9653 tmp = tcg_temp_new_i32();
5e3f878a 9654 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
9655 }
9656 } else {
9ee6e8bb
PB
9657 /* Add/sub 12-bit immediate. */
9658 if (rn == 15) {
b0109805 9659 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 9660 if (insn & (1 << 23))
b0109805 9661 offset -= imm;
9ee6e8bb 9662 else
b0109805 9663 offset += imm;
7d1b0095 9664 tmp = tcg_temp_new_i32();
5e3f878a 9665 tcg_gen_movi_i32(tmp, offset);
2c0262af 9666 } else {
5e3f878a 9667 tmp = load_reg(s, rn);
9ee6e8bb 9668 if (insn & (1 << 23))
5e3f878a 9669 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 9670 else
5e3f878a 9671 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 9672 }
9ee6e8bb 9673 }
5e3f878a 9674 store_reg(s, rd, tmp);
191abaa2 9675 }
9ee6e8bb
PB
9676 } else {
9677 int shifter_out = 0;
9678 /* modified 12-bit immediate. */
9679 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
9680 imm = (insn & 0xff);
9681 switch (shift) {
9682 case 0: /* XY */
9683 /* Nothing to do. */
9684 break;
9685 case 1: /* 00XY00XY */
9686 imm |= imm << 16;
9687 break;
9688 case 2: /* XY00XY00 */
9689 imm |= imm << 16;
9690 imm <<= 8;
9691 break;
9692 case 3: /* XYXYXYXY */
9693 imm |= imm << 16;
9694 imm |= imm << 8;
9695 break;
9696 default: /* Rotated constant. */
9697 shift = (shift << 1) | (imm >> 7);
9698 imm |= 0x80;
9699 imm = imm << (32 - shift);
9700 shifter_out = 1;
9701 break;
b5ff1b31 9702 }
7d1b0095 9703 tmp2 = tcg_temp_new_i32();
3174f8e9 9704 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 9705 rn = (insn >> 16) & 0xf;
3174f8e9 9706 if (rn == 15) {
7d1b0095 9707 tmp = tcg_temp_new_i32();
3174f8e9
FN
9708 tcg_gen_movi_i32(tmp, 0);
9709 } else {
9710 tmp = load_reg(s, rn);
9711 }
9ee6e8bb
PB
9712 op = (insn >> 21) & 0xf;
9713 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 9714 shifter_out, tmp, tmp2))
9ee6e8bb 9715 goto illegal_op;
7d1b0095 9716 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9717 rd = (insn >> 8) & 0xf;
9718 if (rd != 15) {
3174f8e9
FN
9719 store_reg(s, rd, tmp);
9720 } else {
7d1b0095 9721 tcg_temp_free_i32(tmp);
2c0262af 9722 }
2c0262af 9723 }
9ee6e8bb
PB
9724 }
9725 break;
9726 case 12: /* Load/store single data item. */
9727 {
9728 int postinc = 0;
9729 int writeback = 0;
b0109805 9730 int user;
9ee6e8bb
PB
9731 if ((insn & 0x01100000) == 0x01000000) {
9732 if (disas_neon_ls_insn(env, s, insn))
c1713132 9733 goto illegal_op;
9ee6e8bb
PB
9734 break;
9735 }
a2fdc890
PM
9736 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
9737 if (rs == 15) {
9738 if (!(insn & (1 << 20))) {
9739 goto illegal_op;
9740 }
9741 if (op != 2) {
9742 /* Byte or halfword load space with dest == r15 : memory hints.
9743 * Catch them early so we don't emit pointless addressing code.
9744 * This space is a mix of:
9745 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
9746 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
9747 * cores)
9748 * unallocated hints, which must be treated as NOPs
9749 * UNPREDICTABLE space, which we NOP or UNDEF depending on
9750 * which is easiest for the decoding logic
9751 * Some space which must UNDEF
9752 */
9753 int op1 = (insn >> 23) & 3;
9754 int op2 = (insn >> 6) & 0x3f;
9755 if (op & 2) {
9756 goto illegal_op;
9757 }
9758 if (rn == 15) {
02afbf64
PM
9759 /* UNPREDICTABLE, unallocated hint or
9760 * PLD/PLDW/PLI (literal)
9761 */
a2fdc890
PM
9762 return 0;
9763 }
9764 if (op1 & 1) {
02afbf64 9765 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
9766 }
9767 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 9768 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
9769 }
9770 /* UNDEF space, or an UNPREDICTABLE */
9771 return 1;
9772 }
9773 }
b0109805 9774 user = IS_USER(s);
9ee6e8bb 9775 if (rn == 15) {
7d1b0095 9776 addr = tcg_temp_new_i32();
9ee6e8bb
PB
9777 /* PC relative. */
9778 /* s->pc has already been incremented by 4. */
9779 imm = s->pc & 0xfffffffc;
9780 if (insn & (1 << 23))
9781 imm += insn & 0xfff;
9782 else
9783 imm -= insn & 0xfff;
b0109805 9784 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 9785 } else {
b0109805 9786 addr = load_reg(s, rn);
9ee6e8bb
PB
9787 if (insn & (1 << 23)) {
9788 /* Positive offset. */
9789 imm = insn & 0xfff;
b0109805 9790 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 9791 } else {
9ee6e8bb 9792 imm = insn & 0xff;
2a0308c5
PM
9793 switch ((insn >> 8) & 0xf) {
9794 case 0x0: /* Shifted Register. */
9ee6e8bb 9795 shift = (insn >> 4) & 0xf;
2a0308c5
PM
9796 if (shift > 3) {
9797 tcg_temp_free_i32(addr);
18c9b560 9798 goto illegal_op;
2a0308c5 9799 }
b26eefb6 9800 tmp = load_reg(s, rm);
9ee6e8bb 9801 if (shift)
b26eefb6 9802 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 9803 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9804 tcg_temp_free_i32(tmp);
9ee6e8bb 9805 break;
2a0308c5 9806 case 0xc: /* Negative offset. */
b0109805 9807 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 9808 break;
2a0308c5 9809 case 0xe: /* User privilege. */
b0109805
PB
9810 tcg_gen_addi_i32(addr, addr, imm);
9811 user = 1;
9ee6e8bb 9812 break;
2a0308c5 9813 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
9814 imm = -imm;
9815 /* Fall through. */
2a0308c5 9816 case 0xb: /* Post-increment. */
9ee6e8bb
PB
9817 postinc = 1;
9818 writeback = 1;
9819 break;
2a0308c5 9820 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
9821 imm = -imm;
9822 /* Fall through. */
2a0308c5 9823 case 0xf: /* Pre-increment. */
b0109805 9824 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
9825 writeback = 1;
9826 break;
9827 default:
2a0308c5 9828 tcg_temp_free_i32(addr);
b7bcbe95 9829 goto illegal_op;
9ee6e8bb
PB
9830 }
9831 }
9832 }
9ee6e8bb
PB
9833 if (insn & (1 << 20)) {
9834 /* Load. */
5a839c0d 9835 tmp = tcg_temp_new_i32();
a2fdc890 9836 switch (op) {
5a839c0d 9837 case 0:
08307563 9838 gen_aa32_ld8u(tmp, addr, user);
5a839c0d
PM
9839 break;
9840 case 4:
08307563 9841 gen_aa32_ld8s(tmp, addr, user);
5a839c0d
PM
9842 break;
9843 case 1:
08307563 9844 gen_aa32_ld16u(tmp, addr, user);
5a839c0d
PM
9845 break;
9846 case 5:
08307563 9847 gen_aa32_ld16s(tmp, addr, user);
5a839c0d
PM
9848 break;
9849 case 2:
08307563 9850 gen_aa32_ld32u(tmp, addr, user);
5a839c0d 9851 break;
2a0308c5 9852 default:
5a839c0d 9853 tcg_temp_free_i32(tmp);
2a0308c5
PM
9854 tcg_temp_free_i32(addr);
9855 goto illegal_op;
a2fdc890
PM
9856 }
9857 if (rs == 15) {
9858 gen_bx(s, tmp);
9ee6e8bb 9859 } else {
a2fdc890 9860 store_reg(s, rs, tmp);
9ee6e8bb
PB
9861 }
9862 } else {
9863 /* Store. */
b0109805 9864 tmp = load_reg(s, rs);
9ee6e8bb 9865 switch (op) {
5a839c0d 9866 case 0:
08307563 9867 gen_aa32_st8(tmp, addr, user);
5a839c0d
PM
9868 break;
9869 case 1:
08307563 9870 gen_aa32_st16(tmp, addr, user);
5a839c0d
PM
9871 break;
9872 case 2:
08307563 9873 gen_aa32_st32(tmp, addr, user);
5a839c0d 9874 break;
2a0308c5 9875 default:
5a839c0d 9876 tcg_temp_free_i32(tmp);
2a0308c5
PM
9877 tcg_temp_free_i32(addr);
9878 goto illegal_op;
b7bcbe95 9879 }
5a839c0d 9880 tcg_temp_free_i32(tmp);
2c0262af 9881 }
9ee6e8bb 9882 if (postinc)
b0109805
PB
9883 tcg_gen_addi_i32(addr, addr, imm);
9884 if (writeback) {
9885 store_reg(s, rn, addr);
9886 } else {
7d1b0095 9887 tcg_temp_free_i32(addr);
b0109805 9888 }
9ee6e8bb
PB
9889 }
9890 break;
9891 default:
9892 goto illegal_op;
2c0262af 9893 }
9ee6e8bb
PB
9894 return 0;
9895illegal_op:
9896 return 1;
2c0262af
FB
9897}
9898
0ecb72a5 9899static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
9900{
9901 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9902 int32_t offset;
9903 int i;
39d5492a
PM
9904 TCGv_i32 tmp;
9905 TCGv_i32 tmp2;
9906 TCGv_i32 addr;
99c475ab 9907
9ee6e8bb
PB
9908 if (s->condexec_mask) {
9909 cond = s->condexec_cond;
bedd2912
JB
9910 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9911 s->condlabel = gen_new_label();
39fb730a 9912 arm_gen_test_cc(cond ^ 1, s->condlabel);
bedd2912
JB
9913 s->condjmp = 1;
9914 }
9ee6e8bb
PB
9915 }
9916
d31dd73e 9917 insn = arm_lduw_code(env, s->pc, s->bswap_code);
99c475ab 9918 s->pc += 2;
b5ff1b31 9919
99c475ab
FB
9920 switch (insn >> 12) {
9921 case 0: case 1:
396e467c 9922
99c475ab
FB
9923 rd = insn & 7;
9924 op = (insn >> 11) & 3;
9925 if (op == 3) {
9926 /* add/subtract */
9927 rn = (insn >> 3) & 7;
396e467c 9928 tmp = load_reg(s, rn);
99c475ab
FB
9929 if (insn & (1 << 10)) {
9930 /* immediate */
7d1b0095 9931 tmp2 = tcg_temp_new_i32();
396e467c 9932 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
9933 } else {
9934 /* reg */
9935 rm = (insn >> 6) & 7;
396e467c 9936 tmp2 = load_reg(s, rm);
99c475ab 9937 }
9ee6e8bb
PB
9938 if (insn & (1 << 9)) {
9939 if (s->condexec_mask)
396e467c 9940 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9941 else
72485ec4 9942 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
9943 } else {
9944 if (s->condexec_mask)
396e467c 9945 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 9946 else
72485ec4 9947 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 9948 }
7d1b0095 9949 tcg_temp_free_i32(tmp2);
396e467c 9950 store_reg(s, rd, tmp);
99c475ab
FB
9951 } else {
9952 /* shift immediate */
9953 rm = (insn >> 3) & 7;
9954 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
9955 tmp = load_reg(s, rm);
9956 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9957 if (!s->condexec_mask)
9958 gen_logic_CC(tmp);
9959 store_reg(s, rd, tmp);
99c475ab
FB
9960 }
9961 break;
9962 case 2: case 3:
9963 /* arithmetic large immediate */
9964 op = (insn >> 11) & 3;
9965 rd = (insn >> 8) & 0x7;
396e467c 9966 if (op == 0) { /* mov */
7d1b0095 9967 tmp = tcg_temp_new_i32();
396e467c 9968 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 9969 if (!s->condexec_mask)
396e467c
FN
9970 gen_logic_CC(tmp);
9971 store_reg(s, rd, tmp);
9972 } else {
9973 tmp = load_reg(s, rd);
7d1b0095 9974 tmp2 = tcg_temp_new_i32();
396e467c
FN
9975 tcg_gen_movi_i32(tmp2, insn & 0xff);
9976 switch (op) {
9977 case 1: /* cmp */
72485ec4 9978 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9979 tcg_temp_free_i32(tmp);
9980 tcg_temp_free_i32(tmp2);
396e467c
FN
9981 break;
9982 case 2: /* add */
9983 if (s->condexec_mask)
9984 tcg_gen_add_i32(tmp, tmp, tmp2);
9985 else
72485ec4 9986 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 9987 tcg_temp_free_i32(tmp2);
396e467c
FN
9988 store_reg(s, rd, tmp);
9989 break;
9990 case 3: /* sub */
9991 if (s->condexec_mask)
9992 tcg_gen_sub_i32(tmp, tmp, tmp2);
9993 else
72485ec4 9994 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 9995 tcg_temp_free_i32(tmp2);
396e467c
FN
9996 store_reg(s, rd, tmp);
9997 break;
9998 }
99c475ab 9999 }
99c475ab
FB
10000 break;
10001 case 4:
10002 if (insn & (1 << 11)) {
10003 rd = (insn >> 8) & 7;
5899f386
FB
10004 /* load pc-relative. Bit 1 of PC is ignored. */
10005 val = s->pc + 2 + ((insn & 0xff) * 4);
10006 val &= ~(uint32_t)2;
7d1b0095 10007 addr = tcg_temp_new_i32();
b0109805 10008 tcg_gen_movi_i32(addr, val);
c40c8556 10009 tmp = tcg_temp_new_i32();
08307563 10010 gen_aa32_ld32u(tmp, addr, IS_USER(s));
7d1b0095 10011 tcg_temp_free_i32(addr);
b0109805 10012 store_reg(s, rd, tmp);
99c475ab
FB
10013 break;
10014 }
10015 if (insn & (1 << 10)) {
10016 /* data processing extended or blx */
10017 rd = (insn & 7) | ((insn >> 4) & 8);
10018 rm = (insn >> 3) & 0xf;
10019 op = (insn >> 8) & 3;
10020 switch (op) {
10021 case 0: /* add */
396e467c
FN
10022 tmp = load_reg(s, rd);
10023 tmp2 = load_reg(s, rm);
10024 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10025 tcg_temp_free_i32(tmp2);
396e467c 10026 store_reg(s, rd, tmp);
99c475ab
FB
10027 break;
10028 case 1: /* cmp */
396e467c
FN
10029 tmp = load_reg(s, rd);
10030 tmp2 = load_reg(s, rm);
72485ec4 10031 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10032 tcg_temp_free_i32(tmp2);
10033 tcg_temp_free_i32(tmp);
99c475ab
FB
10034 break;
10035 case 2: /* mov/cpy */
396e467c
FN
10036 tmp = load_reg(s, rm);
10037 store_reg(s, rd, tmp);
99c475ab
FB
10038 break;
10039 case 3:/* branch [and link] exchange thumb register */
b0109805 10040 tmp = load_reg(s, rm);
99c475ab 10041 if (insn & (1 << 7)) {
be5e7a76 10042 ARCH(5);
99c475ab 10043 val = (uint32_t)s->pc | 1;
7d1b0095 10044 tmp2 = tcg_temp_new_i32();
b0109805
PB
10045 tcg_gen_movi_i32(tmp2, val);
10046 store_reg(s, 14, tmp2);
99c475ab 10047 }
be5e7a76 10048 /* already thumb, no need to check */
d9ba4830 10049 gen_bx(s, tmp);
99c475ab
FB
10050 break;
10051 }
10052 break;
10053 }
10054
10055 /* data processing register */
10056 rd = insn & 7;
10057 rm = (insn >> 3) & 7;
10058 op = (insn >> 6) & 0xf;
10059 if (op == 2 || op == 3 || op == 4 || op == 7) {
10060 /* the shift/rotate ops want the operands backwards */
10061 val = rm;
10062 rm = rd;
10063 rd = val;
10064 val = 1;
10065 } else {
10066 val = 0;
10067 }
10068
396e467c 10069 if (op == 9) { /* neg */
7d1b0095 10070 tmp = tcg_temp_new_i32();
396e467c
FN
10071 tcg_gen_movi_i32(tmp, 0);
10072 } else if (op != 0xf) { /* mvn doesn't read its first operand */
10073 tmp = load_reg(s, rd);
10074 } else {
39d5492a 10075 TCGV_UNUSED_I32(tmp);
396e467c 10076 }
99c475ab 10077
396e467c 10078 tmp2 = load_reg(s, rm);
5899f386 10079 switch (op) {
99c475ab 10080 case 0x0: /* and */
396e467c 10081 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 10082 if (!s->condexec_mask)
396e467c 10083 gen_logic_CC(tmp);
99c475ab
FB
10084 break;
10085 case 0x1: /* eor */
396e467c 10086 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 10087 if (!s->condexec_mask)
396e467c 10088 gen_logic_CC(tmp);
99c475ab
FB
10089 break;
10090 case 0x2: /* lsl */
9ee6e8bb 10091 if (s->condexec_mask) {
365af80e 10092 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 10093 } else {
9ef39277 10094 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10095 gen_logic_CC(tmp2);
9ee6e8bb 10096 }
99c475ab
FB
10097 break;
10098 case 0x3: /* lsr */
9ee6e8bb 10099 if (s->condexec_mask) {
365af80e 10100 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 10101 } else {
9ef39277 10102 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10103 gen_logic_CC(tmp2);
9ee6e8bb 10104 }
99c475ab
FB
10105 break;
10106 case 0x4: /* asr */
9ee6e8bb 10107 if (s->condexec_mask) {
365af80e 10108 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 10109 } else {
9ef39277 10110 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10111 gen_logic_CC(tmp2);
9ee6e8bb 10112 }
99c475ab
FB
10113 break;
10114 case 0x5: /* adc */
49b4c31e 10115 if (s->condexec_mask) {
396e467c 10116 gen_adc(tmp, tmp2);
49b4c31e
RH
10117 } else {
10118 gen_adc_CC(tmp, tmp, tmp2);
10119 }
99c475ab
FB
10120 break;
10121 case 0x6: /* sbc */
2de68a49 10122 if (s->condexec_mask) {
396e467c 10123 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
10124 } else {
10125 gen_sbc_CC(tmp, tmp, tmp2);
10126 }
99c475ab
FB
10127 break;
10128 case 0x7: /* ror */
9ee6e8bb 10129 if (s->condexec_mask) {
f669df27
AJ
10130 tcg_gen_andi_i32(tmp, tmp, 0x1f);
10131 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 10132 } else {
9ef39277 10133 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10134 gen_logic_CC(tmp2);
9ee6e8bb 10135 }
99c475ab
FB
10136 break;
10137 case 0x8: /* tst */
396e467c
FN
10138 tcg_gen_and_i32(tmp, tmp, tmp2);
10139 gen_logic_CC(tmp);
99c475ab 10140 rd = 16;
5899f386 10141 break;
99c475ab 10142 case 0x9: /* neg */
9ee6e8bb 10143 if (s->condexec_mask)
396e467c 10144 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 10145 else
72485ec4 10146 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
10147 break;
10148 case 0xa: /* cmp */
72485ec4 10149 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
10150 rd = 16;
10151 break;
10152 case 0xb: /* cmn */
72485ec4 10153 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
10154 rd = 16;
10155 break;
10156 case 0xc: /* orr */
396e467c 10157 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 10158 if (!s->condexec_mask)
396e467c 10159 gen_logic_CC(tmp);
99c475ab
FB
10160 break;
10161 case 0xd: /* mul */
7b2919a0 10162 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 10163 if (!s->condexec_mask)
396e467c 10164 gen_logic_CC(tmp);
99c475ab
FB
10165 break;
10166 case 0xe: /* bic */
f669df27 10167 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 10168 if (!s->condexec_mask)
396e467c 10169 gen_logic_CC(tmp);
99c475ab
FB
10170 break;
10171 case 0xf: /* mvn */
396e467c 10172 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 10173 if (!s->condexec_mask)
396e467c 10174 gen_logic_CC(tmp2);
99c475ab 10175 val = 1;
5899f386 10176 rm = rd;
99c475ab
FB
10177 break;
10178 }
10179 if (rd != 16) {
396e467c
FN
10180 if (val) {
10181 store_reg(s, rm, tmp2);
10182 if (op != 0xf)
7d1b0095 10183 tcg_temp_free_i32(tmp);
396e467c
FN
10184 } else {
10185 store_reg(s, rd, tmp);
7d1b0095 10186 tcg_temp_free_i32(tmp2);
396e467c
FN
10187 }
10188 } else {
7d1b0095
PM
10189 tcg_temp_free_i32(tmp);
10190 tcg_temp_free_i32(tmp2);
99c475ab
FB
10191 }
10192 break;
10193
10194 case 5:
10195 /* load/store register offset. */
10196 rd = insn & 7;
10197 rn = (insn >> 3) & 7;
10198 rm = (insn >> 6) & 7;
10199 op = (insn >> 9) & 7;
b0109805 10200 addr = load_reg(s, rn);
b26eefb6 10201 tmp = load_reg(s, rm);
b0109805 10202 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10203 tcg_temp_free_i32(tmp);
99c475ab 10204
c40c8556 10205 if (op < 3) { /* store */
b0109805 10206 tmp = load_reg(s, rd);
c40c8556
PM
10207 } else {
10208 tmp = tcg_temp_new_i32();
10209 }
99c475ab
FB
10210
10211 switch (op) {
10212 case 0: /* str */
08307563 10213 gen_aa32_st32(tmp, addr, IS_USER(s));
99c475ab
FB
10214 break;
10215 case 1: /* strh */
08307563 10216 gen_aa32_st16(tmp, addr, IS_USER(s));
99c475ab
FB
10217 break;
10218 case 2: /* strb */
08307563 10219 gen_aa32_st8(tmp, addr, IS_USER(s));
99c475ab
FB
10220 break;
10221 case 3: /* ldrsb */
08307563 10222 gen_aa32_ld8s(tmp, addr, IS_USER(s));
99c475ab
FB
10223 break;
10224 case 4: /* ldr */
08307563 10225 gen_aa32_ld32u(tmp, addr, IS_USER(s));
99c475ab
FB
10226 break;
10227 case 5: /* ldrh */
08307563 10228 gen_aa32_ld16u(tmp, addr, IS_USER(s));
99c475ab
FB
10229 break;
10230 case 6: /* ldrb */
08307563 10231 gen_aa32_ld8u(tmp, addr, IS_USER(s));
99c475ab
FB
10232 break;
10233 case 7: /* ldrsh */
08307563 10234 gen_aa32_ld16s(tmp, addr, IS_USER(s));
99c475ab
FB
10235 break;
10236 }
c40c8556 10237 if (op >= 3) { /* load */
b0109805 10238 store_reg(s, rd, tmp);
c40c8556
PM
10239 } else {
10240 tcg_temp_free_i32(tmp);
10241 }
7d1b0095 10242 tcg_temp_free_i32(addr);
99c475ab
FB
10243 break;
10244
10245 case 6:
10246 /* load/store word immediate offset */
10247 rd = insn & 7;
10248 rn = (insn >> 3) & 7;
b0109805 10249 addr = load_reg(s, rn);
99c475ab 10250 val = (insn >> 4) & 0x7c;
b0109805 10251 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10252
10253 if (insn & (1 << 11)) {
10254 /* load */
c40c8556 10255 tmp = tcg_temp_new_i32();
08307563 10256 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 10257 store_reg(s, rd, tmp);
99c475ab
FB
10258 } else {
10259 /* store */
b0109805 10260 tmp = load_reg(s, rd);
08307563 10261 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 10262 tcg_temp_free_i32(tmp);
99c475ab 10263 }
7d1b0095 10264 tcg_temp_free_i32(addr);
99c475ab
FB
10265 break;
10266
10267 case 7:
10268 /* load/store byte immediate offset */
10269 rd = insn & 7;
10270 rn = (insn >> 3) & 7;
b0109805 10271 addr = load_reg(s, rn);
99c475ab 10272 val = (insn >> 6) & 0x1f;
b0109805 10273 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10274
10275 if (insn & (1 << 11)) {
10276 /* load */
c40c8556 10277 tmp = tcg_temp_new_i32();
08307563 10278 gen_aa32_ld8u(tmp, addr, IS_USER(s));
b0109805 10279 store_reg(s, rd, tmp);
99c475ab
FB
10280 } else {
10281 /* store */
b0109805 10282 tmp = load_reg(s, rd);
08307563 10283 gen_aa32_st8(tmp, addr, IS_USER(s));
c40c8556 10284 tcg_temp_free_i32(tmp);
99c475ab 10285 }
7d1b0095 10286 tcg_temp_free_i32(addr);
99c475ab
FB
10287 break;
10288
10289 case 8:
10290 /* load/store halfword immediate offset */
10291 rd = insn & 7;
10292 rn = (insn >> 3) & 7;
b0109805 10293 addr = load_reg(s, rn);
99c475ab 10294 val = (insn >> 5) & 0x3e;
b0109805 10295 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10296
10297 if (insn & (1 << 11)) {
10298 /* load */
c40c8556 10299 tmp = tcg_temp_new_i32();
08307563 10300 gen_aa32_ld16u(tmp, addr, IS_USER(s));
b0109805 10301 store_reg(s, rd, tmp);
99c475ab
FB
10302 } else {
10303 /* store */
b0109805 10304 tmp = load_reg(s, rd);
08307563 10305 gen_aa32_st16(tmp, addr, IS_USER(s));
c40c8556 10306 tcg_temp_free_i32(tmp);
99c475ab 10307 }
7d1b0095 10308 tcg_temp_free_i32(addr);
99c475ab
FB
10309 break;
10310
10311 case 9:
10312 /* load/store from stack */
10313 rd = (insn >> 8) & 7;
b0109805 10314 addr = load_reg(s, 13);
99c475ab 10315 val = (insn & 0xff) * 4;
b0109805 10316 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10317
10318 if (insn & (1 << 11)) {
10319 /* load */
c40c8556 10320 tmp = tcg_temp_new_i32();
08307563 10321 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 10322 store_reg(s, rd, tmp);
99c475ab
FB
10323 } else {
10324 /* store */
b0109805 10325 tmp = load_reg(s, rd);
08307563 10326 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 10327 tcg_temp_free_i32(tmp);
99c475ab 10328 }
7d1b0095 10329 tcg_temp_free_i32(addr);
99c475ab
FB
10330 break;
10331
10332 case 10:
10333 /* add to high reg */
10334 rd = (insn >> 8) & 7;
5899f386
FB
10335 if (insn & (1 << 11)) {
10336 /* SP */
5e3f878a 10337 tmp = load_reg(s, 13);
5899f386
FB
10338 } else {
10339 /* PC. bit 1 is ignored. */
7d1b0095 10340 tmp = tcg_temp_new_i32();
5e3f878a 10341 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 10342 }
99c475ab 10343 val = (insn & 0xff) * 4;
5e3f878a
PB
10344 tcg_gen_addi_i32(tmp, tmp, val);
10345 store_reg(s, rd, tmp);
99c475ab
FB
10346 break;
10347
10348 case 11:
10349 /* misc */
10350 op = (insn >> 8) & 0xf;
10351 switch (op) {
10352 case 0:
10353 /* adjust stack pointer */
b26eefb6 10354 tmp = load_reg(s, 13);
99c475ab
FB
10355 val = (insn & 0x7f) * 4;
10356 if (insn & (1 << 7))
6a0d8a1d 10357 val = -(int32_t)val;
b26eefb6
PB
10358 tcg_gen_addi_i32(tmp, tmp, val);
10359 store_reg(s, 13, tmp);
99c475ab
FB
10360 break;
10361
9ee6e8bb
PB
10362 case 2: /* sign/zero extend. */
10363 ARCH(6);
10364 rd = insn & 7;
10365 rm = (insn >> 3) & 7;
b0109805 10366 tmp = load_reg(s, rm);
9ee6e8bb 10367 switch ((insn >> 6) & 3) {
b0109805
PB
10368 case 0: gen_sxth(tmp); break;
10369 case 1: gen_sxtb(tmp); break;
10370 case 2: gen_uxth(tmp); break;
10371 case 3: gen_uxtb(tmp); break;
9ee6e8bb 10372 }
b0109805 10373 store_reg(s, rd, tmp);
9ee6e8bb 10374 break;
99c475ab
FB
10375 case 4: case 5: case 0xc: case 0xd:
10376 /* push/pop */
b0109805 10377 addr = load_reg(s, 13);
5899f386
FB
10378 if (insn & (1 << 8))
10379 offset = 4;
99c475ab 10380 else
5899f386
FB
10381 offset = 0;
10382 for (i = 0; i < 8; i++) {
10383 if (insn & (1 << i))
10384 offset += 4;
10385 }
10386 if ((insn & (1 << 11)) == 0) {
b0109805 10387 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 10388 }
99c475ab
FB
10389 for (i = 0; i < 8; i++) {
10390 if (insn & (1 << i)) {
10391 if (insn & (1 << 11)) {
10392 /* pop */
c40c8556 10393 tmp = tcg_temp_new_i32();
08307563 10394 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 10395 store_reg(s, i, tmp);
99c475ab
FB
10396 } else {
10397 /* push */
b0109805 10398 tmp = load_reg(s, i);
08307563 10399 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 10400 tcg_temp_free_i32(tmp);
99c475ab 10401 }
5899f386 10402 /* advance to the next address. */
b0109805 10403 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
10404 }
10405 }
39d5492a 10406 TCGV_UNUSED_I32(tmp);
99c475ab
FB
10407 if (insn & (1 << 8)) {
10408 if (insn & (1 << 11)) {
10409 /* pop pc */
c40c8556 10410 tmp = tcg_temp_new_i32();
08307563 10411 gen_aa32_ld32u(tmp, addr, IS_USER(s));
99c475ab
FB
10412 /* don't set the pc until the rest of the instruction
10413 has completed */
10414 } else {
10415 /* push lr */
b0109805 10416 tmp = load_reg(s, 14);
08307563 10417 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 10418 tcg_temp_free_i32(tmp);
99c475ab 10419 }
b0109805 10420 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 10421 }
5899f386 10422 if ((insn & (1 << 11)) == 0) {
b0109805 10423 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 10424 }
99c475ab 10425 /* write back the new stack pointer */
b0109805 10426 store_reg(s, 13, addr);
99c475ab 10427 /* set the new PC value */
be5e7a76
DES
10428 if ((insn & 0x0900) == 0x0900) {
10429 store_reg_from_load(env, s, 15, tmp);
10430 }
99c475ab
FB
10431 break;
10432
9ee6e8bb
PB
10433 case 1: case 3: case 9: case 11: /* czb */
10434 rm = insn & 7;
d9ba4830 10435 tmp = load_reg(s, rm);
9ee6e8bb
PB
10436 s->condlabel = gen_new_label();
10437 s->condjmp = 1;
10438 if (insn & (1 << 11))
cb63669a 10439 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 10440 else
cb63669a 10441 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 10442 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10443 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
10444 val = (uint32_t)s->pc + 2;
10445 val += offset;
10446 gen_jmp(s, val);
10447 break;
10448
10449 case 15: /* IT, nop-hint. */
10450 if ((insn & 0xf) == 0) {
10451 gen_nop_hint(s, (insn >> 4) & 0xf);
10452 break;
10453 }
10454 /* If Then. */
10455 s->condexec_cond = (insn >> 4) & 0xe;
10456 s->condexec_mask = insn & 0x1f;
10457 /* No actual code generated for this insn, just setup state. */
10458 break;
10459
06c949e6 10460 case 0xe: /* bkpt */
be5e7a76 10461 ARCH(5);
bc4a0de0 10462 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
10463 break;
10464
9ee6e8bb
PB
10465 case 0xa: /* rev */
10466 ARCH(6);
10467 rn = (insn >> 3) & 0x7;
10468 rd = insn & 0x7;
b0109805 10469 tmp = load_reg(s, rn);
9ee6e8bb 10470 switch ((insn >> 6) & 3) {
66896cb8 10471 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
10472 case 1: gen_rev16(tmp); break;
10473 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
10474 default: goto illegal_op;
10475 }
b0109805 10476 store_reg(s, rd, tmp);
9ee6e8bb
PB
10477 break;
10478
d9e028c1
PM
10479 case 6:
10480 switch ((insn >> 5) & 7) {
10481 case 2:
10482 /* setend */
10483 ARCH(6);
10962fd5
PM
10484 if (((insn >> 3) & 1) != s->bswap_code) {
10485 /* Dynamic endianness switching not implemented. */
e0c270d9 10486 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
d9e028c1
PM
10487 goto illegal_op;
10488 }
9ee6e8bb 10489 break;
d9e028c1
PM
10490 case 3:
10491 /* cps */
10492 ARCH(6);
10493 if (IS_USER(s)) {
10494 break;
8984bd2e 10495 }
d9e028c1
PM
10496 if (IS_M(env)) {
10497 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
10498 /* FAULTMASK */
10499 if (insn & 1) {
10500 addr = tcg_const_i32(19);
10501 gen_helper_v7m_msr(cpu_env, addr, tmp);
10502 tcg_temp_free_i32(addr);
10503 }
10504 /* PRIMASK */
10505 if (insn & 2) {
10506 addr = tcg_const_i32(16);
10507 gen_helper_v7m_msr(cpu_env, addr, tmp);
10508 tcg_temp_free_i32(addr);
10509 }
10510 tcg_temp_free_i32(tmp);
10511 gen_lookup_tb(s);
10512 } else {
10513 if (insn & (1 << 4)) {
10514 shift = CPSR_A | CPSR_I | CPSR_F;
10515 } else {
10516 shift = 0;
10517 }
10518 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 10519 }
d9e028c1
PM
10520 break;
10521 default:
10522 goto undef;
9ee6e8bb
PB
10523 }
10524 break;
10525
99c475ab
FB
10526 default:
10527 goto undef;
10528 }
10529 break;
10530
10531 case 12:
a7d3970d 10532 {
99c475ab 10533 /* load/store multiple */
39d5492a
PM
10534 TCGv_i32 loaded_var;
10535 TCGV_UNUSED_I32(loaded_var);
99c475ab 10536 rn = (insn >> 8) & 0x7;
b0109805 10537 addr = load_reg(s, rn);
99c475ab
FB
10538 for (i = 0; i < 8; i++) {
10539 if (insn & (1 << i)) {
99c475ab
FB
10540 if (insn & (1 << 11)) {
10541 /* load */
c40c8556 10542 tmp = tcg_temp_new_i32();
08307563 10543 gen_aa32_ld32u(tmp, addr, IS_USER(s));
a7d3970d
PM
10544 if (i == rn) {
10545 loaded_var = tmp;
10546 } else {
10547 store_reg(s, i, tmp);
10548 }
99c475ab
FB
10549 } else {
10550 /* store */
b0109805 10551 tmp = load_reg(s, i);
08307563 10552 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 10553 tcg_temp_free_i32(tmp);
99c475ab 10554 }
5899f386 10555 /* advance to the next address */
b0109805 10556 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
10557 }
10558 }
b0109805 10559 if ((insn & (1 << rn)) == 0) {
a7d3970d 10560 /* base reg not in list: base register writeback */
b0109805
PB
10561 store_reg(s, rn, addr);
10562 } else {
a7d3970d
PM
10563 /* base reg in list: if load, complete it now */
10564 if (insn & (1 << 11)) {
10565 store_reg(s, rn, loaded_var);
10566 }
7d1b0095 10567 tcg_temp_free_i32(addr);
b0109805 10568 }
99c475ab 10569 break;
a7d3970d 10570 }
99c475ab
FB
10571 case 13:
10572 /* conditional branch or swi */
10573 cond = (insn >> 8) & 0xf;
10574 if (cond == 0xe)
10575 goto undef;
10576
10577 if (cond == 0xf) {
10578 /* swi */
eaed129d 10579 gen_set_pc_im(s, s->pc);
9ee6e8bb 10580 s->is_jmp = DISAS_SWI;
99c475ab
FB
10581 break;
10582 }
10583 /* generate a conditional jump to next instruction */
e50e6a20 10584 s->condlabel = gen_new_label();
39fb730a 10585 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 10586 s->condjmp = 1;
99c475ab
FB
10587
10588 /* jump to the offset */
5899f386 10589 val = (uint32_t)s->pc + 2;
99c475ab 10590 offset = ((int32_t)insn << 24) >> 24;
5899f386 10591 val += offset << 1;
8aaca4c0 10592 gen_jmp(s, val);
99c475ab
FB
10593 break;
10594
10595 case 14:
358bf29e 10596 if (insn & (1 << 11)) {
9ee6e8bb
PB
10597 if (disas_thumb2_insn(env, s, insn))
10598 goto undef32;
358bf29e
PB
10599 break;
10600 }
9ee6e8bb 10601 /* unconditional branch */
99c475ab
FB
10602 val = (uint32_t)s->pc;
10603 offset = ((int32_t)insn << 21) >> 21;
10604 val += (offset << 1) + 2;
8aaca4c0 10605 gen_jmp(s, val);
99c475ab
FB
10606 break;
10607
10608 case 15:
9ee6e8bb 10609 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 10610 goto undef32;
9ee6e8bb 10611 break;
99c475ab
FB
10612 }
10613 return;
9ee6e8bb 10614undef32:
bc4a0de0 10615 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
10616 return;
10617illegal_op:
99c475ab 10618undef:
bc4a0de0 10619 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
10620}
10621
2c0262af
FB
10622/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
10623 basic block 'tb'. If search_pc is TRUE, also generate PC
10624 information for each intermediate instruction. */
5639c3f2 10625static inline void gen_intermediate_code_internal(ARMCPU *cpu,
2cfc5f17 10626 TranslationBlock *tb,
5639c3f2 10627 bool search_pc)
2c0262af 10628{
ed2803da 10629 CPUState *cs = CPU(cpu);
5639c3f2 10630 CPUARMState *env = &cpu->env;
2c0262af 10631 DisasContext dc1, *dc = &dc1;
a1d1bb31 10632 CPUBreakpoint *bp;
2c0262af
FB
10633 uint16_t *gen_opc_end;
10634 int j, lj;
0fa85d43 10635 target_ulong pc_start;
0a2461fa 10636 target_ulong next_page_start;
2e70f6ef
PB
10637 int num_insns;
10638 int max_insns;
3b46e624 10639
2c0262af 10640 /* generate intermediate code */
40f860cd
PM
10641
10642 /* The A64 decoder has its own top level loop, because it doesn't need
10643 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
10644 */
10645 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
10646 gen_intermediate_code_internal_a64(cpu, tb, search_pc);
10647 return;
10648 }
10649
0fa85d43 10650 pc_start = tb->pc;
3b46e624 10651
2c0262af
FB
10652 dc->tb = tb;
10653
92414b31 10654 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
10655
10656 dc->is_jmp = DISAS_NEXT;
10657 dc->pc = pc_start;
ed2803da 10658 dc->singlestep_enabled = cs->singlestep_enabled;
e50e6a20 10659 dc->condjmp = 0;
3926cc84 10660
40f860cd
PM
10661 dc->aarch64 = 0;
10662 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
10663 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
10664 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
10665 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
3926cc84 10666#if !defined(CONFIG_USER_ONLY)
40f860cd 10667 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
3926cc84 10668#endif
40f860cd
PM
10669 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
10670 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
10671 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
60322b39
PM
10672 dc->cp_regs = cpu->cp_regs;
10673 dc->current_pl = arm_current_pl(env);
a984e42c 10674 dc->features = env->features;
40f860cd 10675
a7812ae4
PB
10676 cpu_F0s = tcg_temp_new_i32();
10677 cpu_F1s = tcg_temp_new_i32();
10678 cpu_F0d = tcg_temp_new_i64();
10679 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
10680 cpu_V0 = cpu_F0d;
10681 cpu_V1 = cpu_F1d;
e677137d 10682 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 10683 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 10684 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 10685 lj = -1;
2e70f6ef
PB
10686 num_insns = 0;
10687 max_insns = tb->cflags & CF_COUNT_MASK;
10688 if (max_insns == 0)
10689 max_insns = CF_COUNT_MASK;
10690
806f352d 10691 gen_tb_start();
e12ce78d 10692
3849902c
PM
10693 tcg_clear_temp_count();
10694
e12ce78d
PM
10695 /* A note on handling of the condexec (IT) bits:
10696 *
10697 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 10698 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 10699 * (1) if the condexec bits are not already zero then we write
0ecb72a5 10700 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
10701 * to do it at the end of the block. (For example if we don't do this
10702 * it's hard to identify whether we can safely skip writing condexec
10703 * at the end of the TB, which we definitely want to do for the case
10704 * where a TB doesn't do anything with the IT state at all.)
10705 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 10706 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
10707 * This is done both for leaving the TB at the end, and for leaving
10708 * it because of an exception we know will happen, which is done in
10709 * gen_exception_insn(). The latter is necessary because we need to
10710 * leave the TB with the PC/IT state just prior to execution of the
10711 * instruction which caused the exception.
10712 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 10713 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d
PM
10714 * This is handled in the same way as restoration of the
10715 * PC in these situations: we will be called again with search_pc=1
10716 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
10717 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
10718 * this to restore the condexec bits.
e12ce78d
PM
10719 *
10720 * Note that there are no instructions which can read the condexec
10721 * bits, and none which can write non-static values to them, so
0ecb72a5 10722 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
10723 * middle of a TB.
10724 */
10725
9ee6e8bb
PB
10726 /* Reset the conditional execution bits immediately. This avoids
10727 complications trying to do it at the end of the block. */
98eac7ca 10728 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 10729 {
39d5492a 10730 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 10731 tcg_gen_movi_i32(tmp, 0);
d9ba4830 10732 store_cpu_field(tmp, condexec_bits);
8f01245e 10733 }
2c0262af 10734 do {
fbb4a2e3
PB
10735#ifdef CONFIG_USER_ONLY
10736 /* Intercept jump to the magic kernel page. */
40f860cd 10737 if (dc->pc >= 0xffff0000) {
fbb4a2e3
PB
10738 /* We always get here via a jump, so know we are not in a
10739 conditional execution block. */
10740 gen_exception(EXCP_KERNEL_TRAP);
10741 dc->is_jmp = DISAS_UPDATE;
10742 break;
10743 }
10744#else
9ee6e8bb
PB
10745 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
10746 /* We always get here via a jump, so know we are not in a
10747 conditional execution block. */
d9ba4830 10748 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
10749 dc->is_jmp = DISAS_UPDATE;
10750 break;
9ee6e8bb
PB
10751 }
10752#endif
10753
f0c3c505
AF
10754 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
10755 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
a1d1bb31 10756 if (bp->pc == dc->pc) {
bc4a0de0 10757 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
10758 /* Advance PC so that clearing the breakpoint will
10759 invalidate this TB. */
10760 dc->pc += 2;
10761 goto done_generating;
1fddef4b
FB
10762 }
10763 }
10764 }
2c0262af 10765 if (search_pc) {
92414b31 10766 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
10767 if (lj < j) {
10768 lj++;
10769 while (lj < j)
ab1103de 10770 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 10771 }
25983cad 10772 tcg_ctx.gen_opc_pc[lj] = dc->pc;
e12ce78d 10773 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
ab1103de 10774 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 10775 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 10776 }
e50e6a20 10777
2e70f6ef
PB
10778 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
10779 gen_io_start();
10780
fdefe51c 10781 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5642463a
PM
10782 tcg_gen_debug_insn_start(dc->pc);
10783 }
10784
40f860cd 10785 if (dc->thumb) {
9ee6e8bb
PB
10786 disas_thumb_insn(env, dc);
10787 if (dc->condexec_mask) {
10788 dc->condexec_cond = (dc->condexec_cond & 0xe)
10789 | ((dc->condexec_mask >> 4) & 1);
10790 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
10791 if (dc->condexec_mask == 0) {
10792 dc->condexec_cond = 0;
10793 }
10794 }
10795 } else {
10796 disas_arm_insn(env, dc);
10797 }
e50e6a20
FB
10798
10799 if (dc->condjmp && !dc->is_jmp) {
10800 gen_set_label(dc->condlabel);
10801 dc->condjmp = 0;
10802 }
3849902c
PM
10803
10804 if (tcg_check_temp_count()) {
0a2461fa
AG
10805 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
10806 dc->pc);
3849902c
PM
10807 }
10808
aaf2d97d 10809 /* Translation stops when a conditional branch is encountered.
e50e6a20 10810 * Otherwise the subsequent code could get translated several times.
b5ff1b31 10811 * Also stop translation when a page boundary is reached. This
bf20dc07 10812 * ensures prefetch aborts occur at the right place. */
2e70f6ef 10813 num_insns ++;
efd7f486 10814 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
ed2803da 10815 !cs->singlestep_enabled &&
1b530a6d 10816 !singlestep &&
2e70f6ef
PB
10817 dc->pc < next_page_start &&
10818 num_insns < max_insns);
10819
10820 if (tb->cflags & CF_LAST_IO) {
10821 if (dc->condjmp) {
10822 /* FIXME: This can theoretically happen with self-modifying
10823 code. */
a47dddd7 10824 cpu_abort(cs, "IO on conditional branch instruction");
2e70f6ef
PB
10825 }
10826 gen_io_end();
10827 }
9ee6e8bb 10828
b5ff1b31 10829 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
10830 instruction was a conditional branch or trap, and the PC has
10831 already been written. */
ed2803da 10832 if (unlikely(cs->singlestep_enabled)) {
8aaca4c0 10833 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 10834 if (dc->condjmp) {
9ee6e8bb
PB
10835 gen_set_condexec(dc);
10836 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 10837 gen_exception(EXCP_SWI);
9ee6e8bb 10838 } else {
d9ba4830 10839 gen_exception(EXCP_DEBUG);
9ee6e8bb 10840 }
e50e6a20
FB
10841 gen_set_label(dc->condlabel);
10842 }
10843 if (dc->condjmp || !dc->is_jmp) {
eaed129d 10844 gen_set_pc_im(dc, dc->pc);
e50e6a20 10845 dc->condjmp = 0;
8aaca4c0 10846 }
9ee6e8bb
PB
10847 gen_set_condexec(dc);
10848 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 10849 gen_exception(EXCP_SWI);
9ee6e8bb
PB
10850 } else {
10851 /* FIXME: Single stepping a WFI insn will not halt
10852 the CPU. */
d9ba4830 10853 gen_exception(EXCP_DEBUG);
9ee6e8bb 10854 }
8aaca4c0 10855 } else {
9ee6e8bb
PB
10856 /* While branches must always occur at the end of an IT block,
10857 there are a few other things that can cause us to terminate
65626741 10858 the TB in the middle of an IT block:
9ee6e8bb
PB
10859 - Exception generating instructions (bkpt, swi, undefined).
10860 - Page boundaries.
10861 - Hardware watchpoints.
10862 Hardware breakpoints have already been handled and skip this code.
10863 */
10864 gen_set_condexec(dc);
8aaca4c0 10865 switch(dc->is_jmp) {
8aaca4c0 10866 case DISAS_NEXT:
6e256c93 10867 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
10868 break;
10869 default:
10870 case DISAS_JUMP:
10871 case DISAS_UPDATE:
10872 /* indicate that the hash table must be used to find the next TB */
57fec1fe 10873 tcg_gen_exit_tb(0);
8aaca4c0
FB
10874 break;
10875 case DISAS_TB_JUMP:
10876 /* nothing more to generate */
10877 break;
9ee6e8bb 10878 case DISAS_WFI:
1ce94f81 10879 gen_helper_wfi(cpu_env);
9ee6e8bb 10880 break;
72c1d3af
PM
10881 case DISAS_WFE:
10882 gen_helper_wfe(cpu_env);
10883 break;
9ee6e8bb 10884 case DISAS_SWI:
d9ba4830 10885 gen_exception(EXCP_SWI);
9ee6e8bb 10886 break;
8aaca4c0 10887 }
e50e6a20
FB
10888 if (dc->condjmp) {
10889 gen_set_label(dc->condlabel);
9ee6e8bb 10890 gen_set_condexec(dc);
6e256c93 10891 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
10892 dc->condjmp = 0;
10893 }
2c0262af 10894 }
2e70f6ef 10895
9ee6e8bb 10896done_generating:
806f352d 10897 gen_tb_end(tb, num_insns);
efd7f486 10898 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2c0262af
FB
10899
10900#ifdef DEBUG_DISAS
8fec2b8c 10901 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
10902 qemu_log("----------------\n");
10903 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 10904 log_target_disas(env, pc_start, dc->pc - pc_start,
d8fd2954 10905 dc->thumb | (dc->bswap_code << 1));
93fcfe39 10906 qemu_log("\n");
2c0262af
FB
10907 }
10908#endif
b5ff1b31 10909 if (search_pc) {
92414b31 10910 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
b5ff1b31
FB
10911 lj++;
10912 while (lj <= j)
ab1103de 10913 tcg_ctx.gen_opc_instr_start[lj++] = 0;
b5ff1b31 10914 } else {
2c0262af 10915 tb->size = dc->pc - pc_start;
2e70f6ef 10916 tb->icount = num_insns;
b5ff1b31 10917 }
2c0262af
FB
10918}
10919
0ecb72a5 10920void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 10921{
5639c3f2 10922 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, false);
2c0262af
FB
10923}
10924
0ecb72a5 10925void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
2c0262af 10926{
5639c3f2 10927 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, true);
2c0262af
FB
10928}
10929
b5ff1b31
FB
10930static const char *cpu_mode_names[16] = {
10931 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10932 "???", "???", "???", "und", "???", "???", "???", "sys"
10933};
9ee6e8bb 10934
878096ee
AF
10935void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
10936 int flags)
2c0262af 10937{
878096ee
AF
10938 ARMCPU *cpu = ARM_CPU(cs);
10939 CPUARMState *env = &cpu->env;
2c0262af 10940 int i;
b5ff1b31 10941 uint32_t psr;
2c0262af
FB
10942
10943 for(i=0;i<16;i++) {
7fe48483 10944 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 10945 if ((i % 4) == 3)
7fe48483 10946 cpu_fprintf(f, "\n");
2c0262af 10947 else
7fe48483 10948 cpu_fprintf(f, " ");
2c0262af 10949 }
b5ff1b31 10950 psr = cpsr_read(env);
687fa640
TS
10951 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10952 psr,
b5ff1b31
FB
10953 psr & (1 << 31) ? 'N' : '-',
10954 psr & (1 << 30) ? 'Z' : '-',
10955 psr & (1 << 29) ? 'C' : '-',
10956 psr & (1 << 28) ? 'V' : '-',
5fafdf24 10957 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 10958 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 10959
f2617cfc
PM
10960 if (flags & CPU_DUMP_FPU) {
10961 int numvfpregs = 0;
10962 if (arm_feature(env, ARM_FEATURE_VFP)) {
10963 numvfpregs += 16;
10964 }
10965 if (arm_feature(env, ARM_FEATURE_VFP3)) {
10966 numvfpregs += 16;
10967 }
10968 for (i = 0; i < numvfpregs; i++) {
10969 uint64_t v = float64_val(env->vfp.regs[i]);
10970 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
10971 i * 2, (uint32_t)v,
10972 i * 2 + 1, (uint32_t)(v >> 32),
10973 i, v);
10974 }
10975 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 10976 }
2c0262af 10977}
a6b025d3 10978
0ecb72a5 10979void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 10980{
3926cc84
AG
10981 if (is_a64(env)) {
10982 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
40f860cd 10983 env->condexec_bits = 0;
3926cc84
AG
10984 } else {
10985 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
40f860cd 10986 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
3926cc84 10987 }
d2856f1a 10988}
This page took 2.811394 seconds and 4 git commands to generate.