]> Git Repo - qemu.git/blame - target-arm/translate.c
dump: eliminate DumpState.page_size ("guest's page size")
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
ccd38087 28#include "internals.h"
76cad711 29#include "disas/disas.h"
57fec1fe 30#include "tcg-op.h"
1de7afc9 31#include "qemu/log.h"
534df156 32#include "qemu/bitops.h"
1d854765 33#include "arm_ldst.h"
1497c961 34
2ef6175a
RH
35#include "exec/helper-proto.h"
36#include "exec/helper-gen.h"
2c0262af 37
be5e7a76
DES
38#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
39#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
40/* currently all emulated v5 cores are also v5TE, so don't bother */
41#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
42#define ENABLE_ARCH_5J 0
43#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
44#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
45#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
46#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
81e69fb0 47#define ENABLE_ARCH_8 arm_feature(env, ARM_FEATURE_V8)
b5ff1b31 48
86753403 49#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 50
f570c61e 51#include "translate.h"
e12ce78d
PM
52static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
53
b5ff1b31
FB
54#if defined(CONFIG_USER_ONLY)
55#define IS_USER(s) 1
56#else
57#define IS_USER(s) (s->user)
58#endif
59
3407ad0e 60TCGv_ptr cpu_env;
ad69471c 61/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 62static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 63static TCGv_i32 cpu_R[16];
66c374de 64static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
03d05e2d
PM
65static TCGv_i64 cpu_exclusive_addr;
66static TCGv_i64 cpu_exclusive_val;
426f5abc 67#ifdef CONFIG_USER_ONLY
03d05e2d 68static TCGv_i64 cpu_exclusive_test;
426f5abc
PB
69static TCGv_i32 cpu_exclusive_info;
70#endif
ad69471c 71
b26eefb6 72/* FIXME: These should be removed. */
39d5492a 73static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 74static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 75
022c62cb 76#include "exec/gen-icount.h"
2e70f6ef 77
155c3eac
FN
78static const char *regnames[] =
79 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
80 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
81
b26eefb6
PB
82/* initialize TCG globals. */
83void arm_translate_init(void)
84{
155c3eac
FN
85 int i;
86
a7812ae4
PB
87 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
88
155c3eac
FN
89 for (i = 0; i < 16; i++) {
90 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 91 offsetof(CPUARMState, regs[i]),
155c3eac
FN
92 regnames[i]);
93 }
66c374de
AJ
94 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
95 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
96 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
97 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
98
03d05e2d 99 cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 100 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
03d05e2d 101 cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 102 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 103#ifdef CONFIG_USER_ONLY
03d05e2d 104 cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 105 offsetof(CPUARMState, exclusive_test), "exclusive_test");
426f5abc 106 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 107 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 108#endif
155c3eac 109
14ade10f 110 a64_translate_init();
b26eefb6
PB
111}
112
39d5492a 113static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 114{
39d5492a 115 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
116 tcg_gen_ld_i32(tmp, cpu_env, offset);
117 return tmp;
118}
119
0ecb72a5 120#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 121
39d5492a 122static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
123{
124 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 125 tcg_temp_free_i32(var);
d9ba4830
PB
126}
127
128#define store_cpu_field(var, name) \
0ecb72a5 129 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 130
b26eefb6 131/* Set a variable to the value of a CPU register. */
39d5492a 132static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
133{
134 if (reg == 15) {
135 uint32_t addr;
b90372ad 136 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
137 if (s->thumb)
138 addr = (long)s->pc + 2;
139 else
140 addr = (long)s->pc + 4;
141 tcg_gen_movi_i32(var, addr);
142 } else {
155c3eac 143 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
144 }
145}
146
147/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 148static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 149{
39d5492a 150 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
151 load_reg_var(s, tmp, reg);
152 return tmp;
153}
154
155/* Set a CPU register. The source must be a temporary and will be
156 marked as dead. */
39d5492a 157static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
158{
159 if (reg == 15) {
160 tcg_gen_andi_i32(var, var, ~1);
161 s->is_jmp = DISAS_JUMP;
162 }
155c3eac 163 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 164 tcg_temp_free_i32(var);
b26eefb6
PB
165}
166
b26eefb6 167/* Value extensions. */
86831435
PB
168#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
169#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
170#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
171#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
172
1497c961
PB
173#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
174#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 175
b26eefb6 176
39d5492a 177static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 178{
39d5492a 179 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 180 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
181 tcg_temp_free_i32(tmp_mask);
182}
d9ba4830
PB
183/* Set NZCV flags from the high 4 bits of var. */
184#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
185
d4a2dc67 186static void gen_exception_internal(int excp)
d9ba4830 187{
d4a2dc67
PM
188 TCGv_i32 tcg_excp = tcg_const_i32(excp);
189
190 assert(excp_is_internal(excp));
191 gen_helper_exception_internal(cpu_env, tcg_excp);
192 tcg_temp_free_i32(tcg_excp);
193}
194
195static void gen_exception(int excp, uint32_t syndrome)
196{
197 TCGv_i32 tcg_excp = tcg_const_i32(excp);
198 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
199
200 gen_helper_exception_with_syndrome(cpu_env, tcg_excp, tcg_syn);
201 tcg_temp_free_i32(tcg_syn);
202 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
203}
204
39d5492a 205static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 206{
39d5492a
PM
207 TCGv_i32 tmp1 = tcg_temp_new_i32();
208 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
209 tcg_gen_ext16s_i32(tmp1, a);
210 tcg_gen_ext16s_i32(tmp2, b);
3670669c 211 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 212 tcg_temp_free_i32(tmp2);
3670669c
PB
213 tcg_gen_sari_i32(a, a, 16);
214 tcg_gen_sari_i32(b, b, 16);
215 tcg_gen_mul_i32(b, b, a);
216 tcg_gen_mov_i32(a, tmp1);
7d1b0095 217 tcg_temp_free_i32(tmp1);
3670669c
PB
218}
219
220/* Byteswap each halfword. */
39d5492a 221static void gen_rev16(TCGv_i32 var)
3670669c 222{
39d5492a 223 TCGv_i32 tmp = tcg_temp_new_i32();
3670669c
PB
224 tcg_gen_shri_i32(tmp, var, 8);
225 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
226 tcg_gen_shli_i32(var, var, 8);
227 tcg_gen_andi_i32(var, var, 0xff00ff00);
228 tcg_gen_or_i32(var, var, tmp);
7d1b0095 229 tcg_temp_free_i32(tmp);
3670669c
PB
230}
231
232/* Byteswap low halfword and sign extend. */
39d5492a 233static void gen_revsh(TCGv_i32 var)
3670669c 234{
1a855029
AJ
235 tcg_gen_ext16u_i32(var, var);
236 tcg_gen_bswap16_i32(var, var);
237 tcg_gen_ext16s_i32(var, var);
3670669c
PB
238}
239
240/* Unsigned bitfield extract. */
39d5492a 241static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
3670669c
PB
242{
243 if (shift)
244 tcg_gen_shri_i32(var, var, shift);
245 tcg_gen_andi_i32(var, var, mask);
246}
247
248/* Signed bitfield extract. */
39d5492a 249static void gen_sbfx(TCGv_i32 var, int shift, int width)
3670669c
PB
250{
251 uint32_t signbit;
252
253 if (shift)
254 tcg_gen_sari_i32(var, var, shift);
255 if (shift + width < 32) {
256 signbit = 1u << (width - 1);
257 tcg_gen_andi_i32(var, var, (1u << width) - 1);
258 tcg_gen_xori_i32(var, var, signbit);
259 tcg_gen_subi_i32(var, var, signbit);
260 }
261}
262
838fa72d 263/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 264static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 265{
838fa72d
AJ
266 TCGv_i64 tmp64 = tcg_temp_new_i64();
267
268 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 269 tcg_temp_free_i32(b);
838fa72d
AJ
270 tcg_gen_shli_i64(tmp64, tmp64, 32);
271 tcg_gen_add_i64(a, tmp64, a);
272
273 tcg_temp_free_i64(tmp64);
274 return a;
275}
276
277/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 278static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
279{
280 TCGv_i64 tmp64 = tcg_temp_new_i64();
281
282 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 283 tcg_temp_free_i32(b);
838fa72d
AJ
284 tcg_gen_shli_i64(tmp64, tmp64, 32);
285 tcg_gen_sub_i64(a, tmp64, a);
286
287 tcg_temp_free_i64(tmp64);
288 return a;
3670669c
PB
289}
290
5e3f878a 291/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 292static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 293{
39d5492a
PM
294 TCGv_i32 lo = tcg_temp_new_i32();
295 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 296 TCGv_i64 ret;
5e3f878a 297
831d7fe8 298 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 299 tcg_temp_free_i32(a);
7d1b0095 300 tcg_temp_free_i32(b);
831d7fe8
RH
301
302 ret = tcg_temp_new_i64();
303 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
304 tcg_temp_free_i32(lo);
305 tcg_temp_free_i32(hi);
831d7fe8
RH
306
307 return ret;
5e3f878a
PB
308}
309
39d5492a 310static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 311{
39d5492a
PM
312 TCGv_i32 lo = tcg_temp_new_i32();
313 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 314 TCGv_i64 ret;
5e3f878a 315
831d7fe8 316 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 317 tcg_temp_free_i32(a);
7d1b0095 318 tcg_temp_free_i32(b);
831d7fe8
RH
319
320 ret = tcg_temp_new_i64();
321 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
322 tcg_temp_free_i32(lo);
323 tcg_temp_free_i32(hi);
831d7fe8
RH
324
325 return ret;
5e3f878a
PB
326}
327
8f01245e 328/* Swap low and high halfwords. */
39d5492a 329static void gen_swap_half(TCGv_i32 var)
8f01245e 330{
39d5492a 331 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
332 tcg_gen_shri_i32(tmp, var, 16);
333 tcg_gen_shli_i32(var, var, 16);
334 tcg_gen_or_i32(var, var, tmp);
7d1b0095 335 tcg_temp_free_i32(tmp);
8f01245e
PB
336}
337
b26eefb6
PB
338/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
339 tmp = (t0 ^ t1) & 0x8000;
340 t0 &= ~0x8000;
341 t1 &= ~0x8000;
342 t0 = (t0 + t1) ^ tmp;
343 */
344
39d5492a 345static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 346{
39d5492a 347 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
348 tcg_gen_xor_i32(tmp, t0, t1);
349 tcg_gen_andi_i32(tmp, tmp, 0x8000);
350 tcg_gen_andi_i32(t0, t0, ~0x8000);
351 tcg_gen_andi_i32(t1, t1, ~0x8000);
352 tcg_gen_add_i32(t0, t0, t1);
353 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
354 tcg_temp_free_i32(tmp);
355 tcg_temp_free_i32(t1);
b26eefb6
PB
356}
357
358/* Set CF to the top bit of var. */
39d5492a 359static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 360{
66c374de 361 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
362}
363
364/* Set N and Z flags from var. */
39d5492a 365static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 366{
66c374de
AJ
367 tcg_gen_mov_i32(cpu_NF, var);
368 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
369}
370
371/* T0 += T1 + CF. */
39d5492a 372static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 373{
396e467c 374 tcg_gen_add_i32(t0, t0, t1);
66c374de 375 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
376}
377
e9bb4aa9 378/* dest = T0 + T1 + CF. */
39d5492a 379static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 380{
e9bb4aa9 381 tcg_gen_add_i32(dest, t0, t1);
66c374de 382 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
383}
384
3670669c 385/* dest = T0 - T1 + CF - 1. */
39d5492a 386static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 387{
3670669c 388 tcg_gen_sub_i32(dest, t0, t1);
66c374de 389 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 390 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
391}
392
72485ec4 393/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 394static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 395{
39d5492a 396 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
397 tcg_gen_movi_i32(tmp, 0);
398 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 399 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 400 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
401 tcg_gen_xor_i32(tmp, t0, t1);
402 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
403 tcg_temp_free_i32(tmp);
404 tcg_gen_mov_i32(dest, cpu_NF);
405}
406
49b4c31e 407/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 408static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 409{
39d5492a 410 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
411 if (TCG_TARGET_HAS_add2_i32) {
412 tcg_gen_movi_i32(tmp, 0);
413 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 414 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
415 } else {
416 TCGv_i64 q0 = tcg_temp_new_i64();
417 TCGv_i64 q1 = tcg_temp_new_i64();
418 tcg_gen_extu_i32_i64(q0, t0);
419 tcg_gen_extu_i32_i64(q1, t1);
420 tcg_gen_add_i64(q0, q0, q1);
421 tcg_gen_extu_i32_i64(q1, cpu_CF);
422 tcg_gen_add_i64(q0, q0, q1);
423 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
424 tcg_temp_free_i64(q0);
425 tcg_temp_free_i64(q1);
426 }
427 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
428 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
429 tcg_gen_xor_i32(tmp, t0, t1);
430 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
431 tcg_temp_free_i32(tmp);
432 tcg_gen_mov_i32(dest, cpu_NF);
433}
434
72485ec4 435/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 436static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 437{
39d5492a 438 TCGv_i32 tmp;
72485ec4
AJ
439 tcg_gen_sub_i32(cpu_NF, t0, t1);
440 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
441 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
442 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
443 tmp = tcg_temp_new_i32();
444 tcg_gen_xor_i32(tmp, t0, t1);
445 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
446 tcg_temp_free_i32(tmp);
447 tcg_gen_mov_i32(dest, cpu_NF);
448}
449
e77f0832 450/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 451static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 452{
39d5492a 453 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
454 tcg_gen_not_i32(tmp, t1);
455 gen_adc_CC(dest, t0, tmp);
39d5492a 456 tcg_temp_free_i32(tmp);
2de68a49
RH
457}
458
365af80e 459#define GEN_SHIFT(name) \
39d5492a 460static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 461{ \
39d5492a 462 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
463 tmp1 = tcg_temp_new_i32(); \
464 tcg_gen_andi_i32(tmp1, t1, 0xff); \
465 tmp2 = tcg_const_i32(0); \
466 tmp3 = tcg_const_i32(0x1f); \
467 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
468 tcg_temp_free_i32(tmp3); \
469 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
470 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
471 tcg_temp_free_i32(tmp2); \
472 tcg_temp_free_i32(tmp1); \
473}
474GEN_SHIFT(shl)
475GEN_SHIFT(shr)
476#undef GEN_SHIFT
477
39d5492a 478static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 479{
39d5492a 480 TCGv_i32 tmp1, tmp2;
365af80e
AJ
481 tmp1 = tcg_temp_new_i32();
482 tcg_gen_andi_i32(tmp1, t1, 0xff);
483 tmp2 = tcg_const_i32(0x1f);
484 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
485 tcg_temp_free_i32(tmp2);
486 tcg_gen_sar_i32(dest, t0, tmp1);
487 tcg_temp_free_i32(tmp1);
488}
489
39d5492a 490static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 491{
39d5492a
PM
492 TCGv_i32 c0 = tcg_const_i32(0);
493 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
494 tcg_gen_neg_i32(tmp, src);
495 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
496 tcg_temp_free_i32(c0);
497 tcg_temp_free_i32(tmp);
498}
ad69471c 499
39d5492a 500static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 501{
9a119ff6 502 if (shift == 0) {
66c374de 503 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 504 } else {
66c374de
AJ
505 tcg_gen_shri_i32(cpu_CF, var, shift);
506 if (shift != 31) {
507 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
508 }
9a119ff6 509 }
9a119ff6 510}
b26eefb6 511
9a119ff6 512/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
513static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
514 int shift, int flags)
9a119ff6
PB
515{
516 switch (shiftop) {
517 case 0: /* LSL */
518 if (shift != 0) {
519 if (flags)
520 shifter_out_im(var, 32 - shift);
521 tcg_gen_shli_i32(var, var, shift);
522 }
523 break;
524 case 1: /* LSR */
525 if (shift == 0) {
526 if (flags) {
66c374de 527 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
528 }
529 tcg_gen_movi_i32(var, 0);
530 } else {
531 if (flags)
532 shifter_out_im(var, shift - 1);
533 tcg_gen_shri_i32(var, var, shift);
534 }
535 break;
536 case 2: /* ASR */
537 if (shift == 0)
538 shift = 32;
539 if (flags)
540 shifter_out_im(var, shift - 1);
541 if (shift == 32)
542 shift = 31;
543 tcg_gen_sari_i32(var, var, shift);
544 break;
545 case 3: /* ROR/RRX */
546 if (shift != 0) {
547 if (flags)
548 shifter_out_im(var, shift - 1);
f669df27 549 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 550 } else {
39d5492a 551 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 552 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
553 if (flags)
554 shifter_out_im(var, 0);
555 tcg_gen_shri_i32(var, var, 1);
b26eefb6 556 tcg_gen_or_i32(var, var, tmp);
7d1b0095 557 tcg_temp_free_i32(tmp);
b26eefb6
PB
558 }
559 }
560};
561
39d5492a
PM
562static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
563 TCGv_i32 shift, int flags)
8984bd2e
PB
564{
565 if (flags) {
566 switch (shiftop) {
9ef39277
BS
567 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
568 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
569 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
570 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
571 }
572 } else {
573 switch (shiftop) {
365af80e
AJ
574 case 0:
575 gen_shl(var, var, shift);
576 break;
577 case 1:
578 gen_shr(var, var, shift);
579 break;
580 case 2:
581 gen_sar(var, var, shift);
582 break;
f669df27
AJ
583 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
584 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
585 }
586 }
7d1b0095 587 tcg_temp_free_i32(shift);
8984bd2e
PB
588}
589
6ddbc6e4
PB
590#define PAS_OP(pfx) \
591 switch (op2) { \
592 case 0: gen_pas_helper(glue(pfx,add16)); break; \
593 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
594 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
595 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
596 case 4: gen_pas_helper(glue(pfx,add8)); break; \
597 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
598 }
39d5492a 599static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 600{
a7812ae4 601 TCGv_ptr tmp;
6ddbc6e4
PB
602
603 switch (op1) {
604#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
605 case 1:
a7812ae4 606 tmp = tcg_temp_new_ptr();
0ecb72a5 607 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 608 PAS_OP(s)
b75263d6 609 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
610 break;
611 case 5:
a7812ae4 612 tmp = tcg_temp_new_ptr();
0ecb72a5 613 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 614 PAS_OP(u)
b75263d6 615 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
616 break;
617#undef gen_pas_helper
618#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
619 case 2:
620 PAS_OP(q);
621 break;
622 case 3:
623 PAS_OP(sh);
624 break;
625 case 6:
626 PAS_OP(uq);
627 break;
628 case 7:
629 PAS_OP(uh);
630 break;
631#undef gen_pas_helper
632 }
633}
9ee6e8bb
PB
634#undef PAS_OP
635
6ddbc6e4
PB
636/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
637#define PAS_OP(pfx) \
ed89a2f1 638 switch (op1) { \
6ddbc6e4
PB
639 case 0: gen_pas_helper(glue(pfx,add8)); break; \
640 case 1: gen_pas_helper(glue(pfx,add16)); break; \
641 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
642 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
643 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
644 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
645 }
39d5492a 646static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 647{
a7812ae4 648 TCGv_ptr tmp;
6ddbc6e4 649
ed89a2f1 650 switch (op2) {
6ddbc6e4
PB
651#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
652 case 0:
a7812ae4 653 tmp = tcg_temp_new_ptr();
0ecb72a5 654 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 655 PAS_OP(s)
b75263d6 656 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
657 break;
658 case 4:
a7812ae4 659 tmp = tcg_temp_new_ptr();
0ecb72a5 660 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 661 PAS_OP(u)
b75263d6 662 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
663 break;
664#undef gen_pas_helper
665#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
666 case 1:
667 PAS_OP(q);
668 break;
669 case 2:
670 PAS_OP(sh);
671 break;
672 case 5:
673 PAS_OP(uq);
674 break;
675 case 6:
676 PAS_OP(uh);
677 break;
678#undef gen_pas_helper
679 }
680}
9ee6e8bb
PB
681#undef PAS_OP
682
39fb730a
AG
683/*
684 * generate a conditional branch based on ARM condition code cc.
685 * This is common between ARM and Aarch64 targets.
686 */
687void arm_gen_test_cc(int cc, int label)
d9ba4830 688{
39d5492a 689 TCGv_i32 tmp;
d9ba4830
PB
690 int inv;
691
d9ba4830
PB
692 switch (cc) {
693 case 0: /* eq: Z */
66c374de 694 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
695 break;
696 case 1: /* ne: !Z */
66c374de 697 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
698 break;
699 case 2: /* cs: C */
66c374de 700 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
d9ba4830
PB
701 break;
702 case 3: /* cc: !C */
66c374de 703 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
d9ba4830
PB
704 break;
705 case 4: /* mi: N */
66c374de 706 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
d9ba4830
PB
707 break;
708 case 5: /* pl: !N */
66c374de 709 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
d9ba4830
PB
710 break;
711 case 6: /* vs: V */
66c374de 712 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
d9ba4830
PB
713 break;
714 case 7: /* vc: !V */
66c374de 715 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
d9ba4830
PB
716 break;
717 case 8: /* hi: C && !Z */
718 inv = gen_new_label();
66c374de
AJ
719 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
720 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
721 gen_set_label(inv);
722 break;
723 case 9: /* ls: !C || Z */
66c374de
AJ
724 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
725 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
726 break;
727 case 10: /* ge: N == V -> N ^ V == 0 */
66c374de
AJ
728 tmp = tcg_temp_new_i32();
729 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 730 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 731 tcg_temp_free_i32(tmp);
d9ba4830
PB
732 break;
733 case 11: /* lt: N != V -> N ^ V != 0 */
66c374de
AJ
734 tmp = tcg_temp_new_i32();
735 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 736 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 737 tcg_temp_free_i32(tmp);
d9ba4830
PB
738 break;
739 case 12: /* gt: !Z && N == V */
740 inv = gen_new_label();
66c374de
AJ
741 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
742 tmp = tcg_temp_new_i32();
743 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 744 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 745 tcg_temp_free_i32(tmp);
d9ba4830
PB
746 gen_set_label(inv);
747 break;
748 case 13: /* le: Z || N != V */
66c374de
AJ
749 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
750 tmp = tcg_temp_new_i32();
751 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 752 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 753 tcg_temp_free_i32(tmp);
d9ba4830
PB
754 break;
755 default:
756 fprintf(stderr, "Bad condition code 0x%x\n", cc);
757 abort();
758 }
d9ba4830 759}
2c0262af 760
b1d8e52e 761static const uint8_t table_logic_cc[16] = {
2c0262af
FB
762 1, /* and */
763 1, /* xor */
764 0, /* sub */
765 0, /* rsb */
766 0, /* add */
767 0, /* adc */
768 0, /* sbc */
769 0, /* rsc */
770 1, /* andl */
771 1, /* xorl */
772 0, /* cmp */
773 0, /* cmn */
774 1, /* orr */
775 1, /* mov */
776 1, /* bic */
777 1, /* mvn */
778};
3b46e624 779
d9ba4830
PB
780/* Set PC and Thumb state from an immediate address. */
781static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 782{
39d5492a 783 TCGv_i32 tmp;
99c475ab 784
b26eefb6 785 s->is_jmp = DISAS_UPDATE;
d9ba4830 786 if (s->thumb != (addr & 1)) {
7d1b0095 787 tmp = tcg_temp_new_i32();
d9ba4830 788 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 789 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 790 tcg_temp_free_i32(tmp);
d9ba4830 791 }
155c3eac 792 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
793}
794
795/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 796static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 797{
d9ba4830 798 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
799 tcg_gen_andi_i32(cpu_R[15], var, ~1);
800 tcg_gen_andi_i32(var, var, 1);
801 store_cpu_field(var, thumb);
d9ba4830
PB
802}
803
21aeb343
JR
804/* Variant of store_reg which uses branch&exchange logic when storing
805 to r15 in ARM architecture v7 and above. The source must be a temporary
806 and will be marked as dead. */
0ecb72a5 807static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
39d5492a 808 int reg, TCGv_i32 var)
21aeb343
JR
809{
810 if (reg == 15 && ENABLE_ARCH_7) {
811 gen_bx(s, var);
812 } else {
813 store_reg(s, reg, var);
814 }
815}
816
be5e7a76
DES
817/* Variant of store_reg which uses branch&exchange logic when storing
818 * to r15 in ARM architecture v5T and above. This is used for storing
819 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
820 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
0ecb72a5 821static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
39d5492a 822 int reg, TCGv_i32 var)
be5e7a76
DES
823{
824 if (reg == 15 && ENABLE_ARCH_5) {
825 gen_bx(s, var);
826 } else {
827 store_reg(s, reg, var);
828 }
829}
830
08307563
PM
831/* Abstractions of "generate code to do a guest load/store for
832 * AArch32", where a vaddr is always 32 bits (and is zero
833 * extended if we're a 64 bit core) and data is also
834 * 32 bits unless specifically doing a 64 bit access.
835 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 836 * that the address argument is TCGv_i32 rather than TCGv.
08307563
PM
837 */
838#if TARGET_LONG_BITS == 32
839
09f78135
RH
840#define DO_GEN_LD(SUFF, OPC) \
841static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563 842{ \
09f78135 843 tcg_gen_qemu_ld_i32(val, addr, index, OPC); \
08307563
PM
844}
845
09f78135
RH
846#define DO_GEN_ST(SUFF, OPC) \
847static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563 848{ \
09f78135 849 tcg_gen_qemu_st_i32(val, addr, index, OPC); \
08307563
PM
850}
851
852static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
853{
09f78135 854 tcg_gen_qemu_ld_i64(val, addr, index, MO_TEQ);
08307563
PM
855}
856
857static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
858{
09f78135 859 tcg_gen_qemu_st_i64(val, addr, index, MO_TEQ);
08307563
PM
860}
861
862#else
863
09f78135
RH
864#define DO_GEN_LD(SUFF, OPC) \
865static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563
PM
866{ \
867 TCGv addr64 = tcg_temp_new(); \
08307563 868 tcg_gen_extu_i32_i64(addr64, addr); \
09f78135 869 tcg_gen_qemu_ld_i32(val, addr64, index, OPC); \
08307563 870 tcg_temp_free(addr64); \
08307563
PM
871}
872
09f78135
RH
873#define DO_GEN_ST(SUFF, OPC) \
874static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563
PM
875{ \
876 TCGv addr64 = tcg_temp_new(); \
08307563 877 tcg_gen_extu_i32_i64(addr64, addr); \
09f78135 878 tcg_gen_qemu_st_i32(val, addr64, index, OPC); \
08307563 879 tcg_temp_free(addr64); \
08307563
PM
880}
881
882static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
883{
884 TCGv addr64 = tcg_temp_new();
885 tcg_gen_extu_i32_i64(addr64, addr);
09f78135 886 tcg_gen_qemu_ld_i64(val, addr64, index, MO_TEQ);
08307563
PM
887 tcg_temp_free(addr64);
888}
889
890static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
891{
892 TCGv addr64 = tcg_temp_new();
893 tcg_gen_extu_i32_i64(addr64, addr);
09f78135 894 tcg_gen_qemu_st_i64(val, addr64, index, MO_TEQ);
08307563
PM
895 tcg_temp_free(addr64);
896}
897
898#endif
899
09f78135
RH
900DO_GEN_LD(8s, MO_SB)
901DO_GEN_LD(8u, MO_UB)
902DO_GEN_LD(16s, MO_TESW)
903DO_GEN_LD(16u, MO_TEUW)
904DO_GEN_LD(32u, MO_TEUL)
905DO_GEN_ST(8, MO_UB)
906DO_GEN_ST(16, MO_TEUW)
907DO_GEN_ST(32, MO_TEUL)
08307563 908
eaed129d 909static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
5e3f878a 910{
40f860cd 911 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
912}
913
d4a2dc67
PM
914static inline void
915gen_set_condexec (DisasContext *s)
916{
917 if (s->condexec_mask) {
918 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
919 TCGv_i32 tmp = tcg_temp_new_i32();
920 tcg_gen_movi_i32(tmp, val);
921 store_cpu_field(tmp, condexec_bits);
922 }
923}
924
925static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
926{
927 gen_set_condexec(s);
928 gen_set_pc_im(s, s->pc - offset);
929 gen_exception_internal(excp);
930 s->is_jmp = DISAS_JUMP;
931}
932
933static void gen_exception_insn(DisasContext *s, int offset, int excp, int syn)
934{
935 gen_set_condexec(s);
936 gen_set_pc_im(s, s->pc - offset);
937 gen_exception(excp, syn);
938 s->is_jmp = DISAS_JUMP;
939}
940
b5ff1b31
FB
941/* Force a TB lookup after an instruction that changes the CPU state. */
942static inline void gen_lookup_tb(DisasContext *s)
943{
a6445c52 944 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
945 s->is_jmp = DISAS_UPDATE;
946}
947
b0109805 948static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 949 TCGv_i32 var)
2c0262af 950{
1e8d4eec 951 int val, rm, shift, shiftop;
39d5492a 952 TCGv_i32 offset;
2c0262af
FB
953
954 if (!(insn & (1 << 25))) {
955 /* immediate */
956 val = insn & 0xfff;
957 if (!(insn & (1 << 23)))
958 val = -val;
537730b9 959 if (val != 0)
b0109805 960 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
961 } else {
962 /* shift/register */
963 rm = (insn) & 0xf;
964 shift = (insn >> 7) & 0x1f;
1e8d4eec 965 shiftop = (insn >> 5) & 3;
b26eefb6 966 offset = load_reg(s, rm);
9a119ff6 967 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 968 if (!(insn & (1 << 23)))
b0109805 969 tcg_gen_sub_i32(var, var, offset);
2c0262af 970 else
b0109805 971 tcg_gen_add_i32(var, var, offset);
7d1b0095 972 tcg_temp_free_i32(offset);
2c0262af
FB
973 }
974}
975
191f9a93 976static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 977 int extra, TCGv_i32 var)
2c0262af
FB
978{
979 int val, rm;
39d5492a 980 TCGv_i32 offset;
3b46e624 981
2c0262af
FB
982 if (insn & (1 << 22)) {
983 /* immediate */
984 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
985 if (!(insn & (1 << 23)))
986 val = -val;
18acad92 987 val += extra;
537730b9 988 if (val != 0)
b0109805 989 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
990 } else {
991 /* register */
191f9a93 992 if (extra)
b0109805 993 tcg_gen_addi_i32(var, var, extra);
2c0262af 994 rm = (insn) & 0xf;
b26eefb6 995 offset = load_reg(s, rm);
2c0262af 996 if (!(insn & (1 << 23)))
b0109805 997 tcg_gen_sub_i32(var, var, offset);
2c0262af 998 else
b0109805 999 tcg_gen_add_i32(var, var, offset);
7d1b0095 1000 tcg_temp_free_i32(offset);
2c0262af
FB
1001 }
1002}
1003
5aaebd13
PM
1004static TCGv_ptr get_fpstatus_ptr(int neon)
1005{
1006 TCGv_ptr statusptr = tcg_temp_new_ptr();
1007 int offset;
1008 if (neon) {
0ecb72a5 1009 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1010 } else {
0ecb72a5 1011 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1012 }
1013 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1014 return statusptr;
1015}
1016
4373f3ce
PB
1017#define VFP_OP2(name) \
1018static inline void gen_vfp_##name(int dp) \
1019{ \
ae1857ec
PM
1020 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1021 if (dp) { \
1022 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1023 } else { \
1024 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1025 } \
1026 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1027}
1028
4373f3ce
PB
1029VFP_OP2(add)
1030VFP_OP2(sub)
1031VFP_OP2(mul)
1032VFP_OP2(div)
1033
1034#undef VFP_OP2
1035
605a6aed
PM
1036static inline void gen_vfp_F1_mul(int dp)
1037{
1038 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1039 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1040 if (dp) {
ae1857ec 1041 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1042 } else {
ae1857ec 1043 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1044 }
ae1857ec 1045 tcg_temp_free_ptr(fpst);
605a6aed
PM
1046}
1047
1048static inline void gen_vfp_F1_neg(int dp)
1049{
1050 /* Like gen_vfp_neg() but put result in F1 */
1051 if (dp) {
1052 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1053 } else {
1054 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1055 }
1056}
1057
4373f3ce
PB
1058static inline void gen_vfp_abs(int dp)
1059{
1060 if (dp)
1061 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1062 else
1063 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1064}
1065
1066static inline void gen_vfp_neg(int dp)
1067{
1068 if (dp)
1069 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1070 else
1071 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1072}
1073
1074static inline void gen_vfp_sqrt(int dp)
1075{
1076 if (dp)
1077 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1078 else
1079 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1080}
1081
1082static inline void gen_vfp_cmp(int dp)
1083{
1084 if (dp)
1085 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1086 else
1087 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1088}
1089
1090static inline void gen_vfp_cmpe(int dp)
1091{
1092 if (dp)
1093 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1094 else
1095 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1096}
1097
1098static inline void gen_vfp_F1_ld0(int dp)
1099{
1100 if (dp)
5b340b51 1101 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1102 else
5b340b51 1103 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1104}
1105
5500b06c
PM
1106#define VFP_GEN_ITOF(name) \
1107static inline void gen_vfp_##name(int dp, int neon) \
1108{ \
5aaebd13 1109 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1110 if (dp) { \
1111 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1112 } else { \
1113 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1114 } \
b7fa9214 1115 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1116}
1117
5500b06c
PM
1118VFP_GEN_ITOF(uito)
1119VFP_GEN_ITOF(sito)
1120#undef VFP_GEN_ITOF
4373f3ce 1121
5500b06c
PM
1122#define VFP_GEN_FTOI(name) \
1123static inline void gen_vfp_##name(int dp, int neon) \
1124{ \
5aaebd13 1125 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1126 if (dp) { \
1127 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1128 } else { \
1129 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1130 } \
b7fa9214 1131 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1132}
1133
5500b06c
PM
1134VFP_GEN_FTOI(toui)
1135VFP_GEN_FTOI(touiz)
1136VFP_GEN_FTOI(tosi)
1137VFP_GEN_FTOI(tosiz)
1138#undef VFP_GEN_FTOI
4373f3ce 1139
16d5b3ca 1140#define VFP_GEN_FIX(name, round) \
5500b06c 1141static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1142{ \
39d5492a 1143 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1144 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1145 if (dp) { \
16d5b3ca
WN
1146 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1147 statusptr); \
5500b06c 1148 } else { \
16d5b3ca
WN
1149 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1150 statusptr); \
5500b06c 1151 } \
b75263d6 1152 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1153 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1154}
16d5b3ca
WN
1155VFP_GEN_FIX(tosh, _round_to_zero)
1156VFP_GEN_FIX(tosl, _round_to_zero)
1157VFP_GEN_FIX(touh, _round_to_zero)
1158VFP_GEN_FIX(toul, _round_to_zero)
1159VFP_GEN_FIX(shto, )
1160VFP_GEN_FIX(slto, )
1161VFP_GEN_FIX(uhto, )
1162VFP_GEN_FIX(ulto, )
4373f3ce 1163#undef VFP_GEN_FIX
9ee6e8bb 1164
39d5492a 1165static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1166{
08307563 1167 if (dp) {
6ce2faf4 1168 gen_aa32_ld64(cpu_F0d, addr, get_mem_index(s));
08307563 1169 } else {
6ce2faf4 1170 gen_aa32_ld32u(cpu_F0s, addr, get_mem_index(s));
08307563 1171 }
b5ff1b31
FB
1172}
1173
39d5492a 1174static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1175{
08307563 1176 if (dp) {
6ce2faf4 1177 gen_aa32_st64(cpu_F0d, addr, get_mem_index(s));
08307563 1178 } else {
6ce2faf4 1179 gen_aa32_st32(cpu_F0s, addr, get_mem_index(s));
08307563 1180 }
b5ff1b31
FB
1181}
1182
8e96005d
FB
1183static inline long
1184vfp_reg_offset (int dp, int reg)
1185{
1186 if (dp)
1187 return offsetof(CPUARMState, vfp.regs[reg]);
1188 else if (reg & 1) {
1189 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1190 + offsetof(CPU_DoubleU, l.upper);
1191 } else {
1192 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1193 + offsetof(CPU_DoubleU, l.lower);
1194 }
1195}
9ee6e8bb
PB
1196
1197/* Return the offset of a 32-bit piece of a NEON register.
1198 zero is the least significant end of the register. */
1199static inline long
1200neon_reg_offset (int reg, int n)
1201{
1202 int sreg;
1203 sreg = reg * 2 + n;
1204 return vfp_reg_offset(0, sreg);
1205}
1206
39d5492a 1207static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1208{
39d5492a 1209 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1210 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1211 return tmp;
1212}
1213
39d5492a 1214static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1215{
1216 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1217 tcg_temp_free_i32(var);
8f8e3aa4
PB
1218}
1219
a7812ae4 1220static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1221{
1222 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1223}
1224
a7812ae4 1225static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1226{
1227 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1228}
1229
4373f3ce
PB
1230#define tcg_gen_ld_f32 tcg_gen_ld_i32
1231#define tcg_gen_ld_f64 tcg_gen_ld_i64
1232#define tcg_gen_st_f32 tcg_gen_st_i32
1233#define tcg_gen_st_f64 tcg_gen_st_i64
1234
b7bcbe95
FB
1235static inline void gen_mov_F0_vreg(int dp, int reg)
1236{
1237 if (dp)
4373f3ce 1238 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1239 else
4373f3ce 1240 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1241}
1242
1243static inline void gen_mov_F1_vreg(int dp, int reg)
1244{
1245 if (dp)
4373f3ce 1246 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1247 else
4373f3ce 1248 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1249}
1250
1251static inline void gen_mov_vreg_F0(int dp, int reg)
1252{
1253 if (dp)
4373f3ce 1254 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1255 else
4373f3ce 1256 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1257}
1258
18c9b560
AZ
1259#define ARM_CP_RW_BIT (1 << 20)
1260
a7812ae4 1261static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1262{
0ecb72a5 1263 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1264}
1265
a7812ae4 1266static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1267{
0ecb72a5 1268 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1269}
1270
39d5492a 1271static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1272{
39d5492a 1273 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1274 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1275 return var;
e677137d
PB
1276}
1277
39d5492a 1278static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1279{
0ecb72a5 1280 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1281 tcg_temp_free_i32(var);
e677137d
PB
1282}
1283
1284static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1285{
1286 iwmmxt_store_reg(cpu_M0, rn);
1287}
1288
1289static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1290{
1291 iwmmxt_load_reg(cpu_M0, rn);
1292}
1293
1294static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1295{
1296 iwmmxt_load_reg(cpu_V1, rn);
1297 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1298}
1299
1300static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1301{
1302 iwmmxt_load_reg(cpu_V1, rn);
1303 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1304}
1305
1306static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1307{
1308 iwmmxt_load_reg(cpu_V1, rn);
1309 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1310}
1311
1312#define IWMMXT_OP(name) \
1313static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1314{ \
1315 iwmmxt_load_reg(cpu_V1, rn); \
1316 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1317}
1318
477955bd
PM
1319#define IWMMXT_OP_ENV(name) \
1320static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1321{ \
1322 iwmmxt_load_reg(cpu_V1, rn); \
1323 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1324}
1325
1326#define IWMMXT_OP_ENV_SIZE(name) \
1327IWMMXT_OP_ENV(name##b) \
1328IWMMXT_OP_ENV(name##w) \
1329IWMMXT_OP_ENV(name##l)
e677137d 1330
477955bd 1331#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1332static inline void gen_op_iwmmxt_##name##_M0(void) \
1333{ \
477955bd 1334 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1335}
1336
1337IWMMXT_OP(maddsq)
1338IWMMXT_OP(madduq)
1339IWMMXT_OP(sadb)
1340IWMMXT_OP(sadw)
1341IWMMXT_OP(mulslw)
1342IWMMXT_OP(mulshw)
1343IWMMXT_OP(mululw)
1344IWMMXT_OP(muluhw)
1345IWMMXT_OP(macsw)
1346IWMMXT_OP(macuw)
1347
477955bd
PM
1348IWMMXT_OP_ENV_SIZE(unpackl)
1349IWMMXT_OP_ENV_SIZE(unpackh)
1350
1351IWMMXT_OP_ENV1(unpacklub)
1352IWMMXT_OP_ENV1(unpackluw)
1353IWMMXT_OP_ENV1(unpacklul)
1354IWMMXT_OP_ENV1(unpackhub)
1355IWMMXT_OP_ENV1(unpackhuw)
1356IWMMXT_OP_ENV1(unpackhul)
1357IWMMXT_OP_ENV1(unpacklsb)
1358IWMMXT_OP_ENV1(unpacklsw)
1359IWMMXT_OP_ENV1(unpacklsl)
1360IWMMXT_OP_ENV1(unpackhsb)
1361IWMMXT_OP_ENV1(unpackhsw)
1362IWMMXT_OP_ENV1(unpackhsl)
1363
1364IWMMXT_OP_ENV_SIZE(cmpeq)
1365IWMMXT_OP_ENV_SIZE(cmpgtu)
1366IWMMXT_OP_ENV_SIZE(cmpgts)
1367
1368IWMMXT_OP_ENV_SIZE(mins)
1369IWMMXT_OP_ENV_SIZE(minu)
1370IWMMXT_OP_ENV_SIZE(maxs)
1371IWMMXT_OP_ENV_SIZE(maxu)
1372
1373IWMMXT_OP_ENV_SIZE(subn)
1374IWMMXT_OP_ENV_SIZE(addn)
1375IWMMXT_OP_ENV_SIZE(subu)
1376IWMMXT_OP_ENV_SIZE(addu)
1377IWMMXT_OP_ENV_SIZE(subs)
1378IWMMXT_OP_ENV_SIZE(adds)
1379
1380IWMMXT_OP_ENV(avgb0)
1381IWMMXT_OP_ENV(avgb1)
1382IWMMXT_OP_ENV(avgw0)
1383IWMMXT_OP_ENV(avgw1)
e677137d 1384
477955bd
PM
1385IWMMXT_OP_ENV(packuw)
1386IWMMXT_OP_ENV(packul)
1387IWMMXT_OP_ENV(packuq)
1388IWMMXT_OP_ENV(packsw)
1389IWMMXT_OP_ENV(packsl)
1390IWMMXT_OP_ENV(packsq)
e677137d 1391
e677137d
PB
1392static void gen_op_iwmmxt_set_mup(void)
1393{
39d5492a 1394 TCGv_i32 tmp;
e677137d
PB
1395 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1396 tcg_gen_ori_i32(tmp, tmp, 2);
1397 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1398}
1399
1400static void gen_op_iwmmxt_set_cup(void)
1401{
39d5492a 1402 TCGv_i32 tmp;
e677137d
PB
1403 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1404 tcg_gen_ori_i32(tmp, tmp, 1);
1405 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1406}
1407
1408static void gen_op_iwmmxt_setpsr_nz(void)
1409{
39d5492a 1410 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1411 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1412 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1413}
1414
1415static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1416{
1417 iwmmxt_load_reg(cpu_V1, rn);
86831435 1418 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1419 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1420}
1421
39d5492a
PM
1422static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1423 TCGv_i32 dest)
18c9b560
AZ
1424{
1425 int rd;
1426 uint32_t offset;
39d5492a 1427 TCGv_i32 tmp;
18c9b560
AZ
1428
1429 rd = (insn >> 16) & 0xf;
da6b5335 1430 tmp = load_reg(s, rd);
18c9b560
AZ
1431
1432 offset = (insn & 0xff) << ((insn >> 7) & 2);
1433 if (insn & (1 << 24)) {
1434 /* Pre indexed */
1435 if (insn & (1 << 23))
da6b5335 1436 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1437 else
da6b5335
FN
1438 tcg_gen_addi_i32(tmp, tmp, -offset);
1439 tcg_gen_mov_i32(dest, tmp);
18c9b560 1440 if (insn & (1 << 21))
da6b5335
FN
1441 store_reg(s, rd, tmp);
1442 else
7d1b0095 1443 tcg_temp_free_i32(tmp);
18c9b560
AZ
1444 } else if (insn & (1 << 21)) {
1445 /* Post indexed */
da6b5335 1446 tcg_gen_mov_i32(dest, tmp);
18c9b560 1447 if (insn & (1 << 23))
da6b5335 1448 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1449 else
da6b5335
FN
1450 tcg_gen_addi_i32(tmp, tmp, -offset);
1451 store_reg(s, rd, tmp);
18c9b560
AZ
1452 } else if (!(insn & (1 << 23)))
1453 return 1;
1454 return 0;
1455}
1456
39d5492a 1457static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1458{
1459 int rd = (insn >> 0) & 0xf;
39d5492a 1460 TCGv_i32 tmp;
18c9b560 1461
da6b5335
FN
1462 if (insn & (1 << 8)) {
1463 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1464 return 1;
da6b5335
FN
1465 } else {
1466 tmp = iwmmxt_load_creg(rd);
1467 }
1468 } else {
7d1b0095 1469 tmp = tcg_temp_new_i32();
da6b5335
FN
1470 iwmmxt_load_reg(cpu_V0, rd);
1471 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1472 }
1473 tcg_gen_andi_i32(tmp, tmp, mask);
1474 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1475 tcg_temp_free_i32(tmp);
18c9b560
AZ
1476 return 0;
1477}
1478
a1c7273b 1479/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1480 (ie. an undefined instruction). */
0ecb72a5 1481static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
1482{
1483 int rd, wrd;
1484 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1485 TCGv_i32 addr;
1486 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1487
1488 if ((insn & 0x0e000e00) == 0x0c000000) {
1489 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1490 wrd = insn & 0xf;
1491 rdlo = (insn >> 12) & 0xf;
1492 rdhi = (insn >> 16) & 0xf;
1493 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1494 iwmmxt_load_reg(cpu_V0, wrd);
1495 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1496 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1497 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1498 } else { /* TMCRR */
da6b5335
FN
1499 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1500 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1501 gen_op_iwmmxt_set_mup();
1502 }
1503 return 0;
1504 }
1505
1506 wrd = (insn >> 12) & 0xf;
7d1b0095 1507 addr = tcg_temp_new_i32();
da6b5335 1508 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1509 tcg_temp_free_i32(addr);
18c9b560 1510 return 1;
da6b5335 1511 }
18c9b560
AZ
1512 if (insn & ARM_CP_RW_BIT) {
1513 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1514 tmp = tcg_temp_new_i32();
6ce2faf4 1515 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
da6b5335 1516 iwmmxt_store_creg(wrd, tmp);
18c9b560 1517 } else {
e677137d
PB
1518 i = 1;
1519 if (insn & (1 << 8)) {
1520 if (insn & (1 << 22)) { /* WLDRD */
6ce2faf4 1521 gen_aa32_ld64(cpu_M0, addr, get_mem_index(s));
e677137d
PB
1522 i = 0;
1523 } else { /* WLDRW wRd */
29531141 1524 tmp = tcg_temp_new_i32();
6ce2faf4 1525 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
e677137d
PB
1526 }
1527 } else {
29531141 1528 tmp = tcg_temp_new_i32();
e677137d 1529 if (insn & (1 << 22)) { /* WLDRH */
6ce2faf4 1530 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
e677137d 1531 } else { /* WLDRB */
6ce2faf4 1532 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
e677137d
PB
1533 }
1534 }
1535 if (i) {
1536 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1537 tcg_temp_free_i32(tmp);
e677137d 1538 }
18c9b560
AZ
1539 gen_op_iwmmxt_movq_wRn_M0(wrd);
1540 }
1541 } else {
1542 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1543 tmp = iwmmxt_load_creg(wrd);
6ce2faf4 1544 gen_aa32_st32(tmp, addr, get_mem_index(s));
18c9b560
AZ
1545 } else {
1546 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1547 tmp = tcg_temp_new_i32();
e677137d
PB
1548 if (insn & (1 << 8)) {
1549 if (insn & (1 << 22)) { /* WSTRD */
6ce2faf4 1550 gen_aa32_st64(cpu_M0, addr, get_mem_index(s));
e677137d
PB
1551 } else { /* WSTRW wRd */
1552 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
6ce2faf4 1553 gen_aa32_st32(tmp, addr, get_mem_index(s));
e677137d
PB
1554 }
1555 } else {
1556 if (insn & (1 << 22)) { /* WSTRH */
1557 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
6ce2faf4 1558 gen_aa32_st16(tmp, addr, get_mem_index(s));
e677137d
PB
1559 } else { /* WSTRB */
1560 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
6ce2faf4 1561 gen_aa32_st8(tmp, addr, get_mem_index(s));
e677137d
PB
1562 }
1563 }
18c9b560 1564 }
29531141 1565 tcg_temp_free_i32(tmp);
18c9b560 1566 }
7d1b0095 1567 tcg_temp_free_i32(addr);
18c9b560
AZ
1568 return 0;
1569 }
1570
1571 if ((insn & 0x0f000000) != 0x0e000000)
1572 return 1;
1573
1574 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1575 case 0x000: /* WOR */
1576 wrd = (insn >> 12) & 0xf;
1577 rd0 = (insn >> 0) & 0xf;
1578 rd1 = (insn >> 16) & 0xf;
1579 gen_op_iwmmxt_movq_M0_wRn(rd0);
1580 gen_op_iwmmxt_orq_M0_wRn(rd1);
1581 gen_op_iwmmxt_setpsr_nz();
1582 gen_op_iwmmxt_movq_wRn_M0(wrd);
1583 gen_op_iwmmxt_set_mup();
1584 gen_op_iwmmxt_set_cup();
1585 break;
1586 case 0x011: /* TMCR */
1587 if (insn & 0xf)
1588 return 1;
1589 rd = (insn >> 12) & 0xf;
1590 wrd = (insn >> 16) & 0xf;
1591 switch (wrd) {
1592 case ARM_IWMMXT_wCID:
1593 case ARM_IWMMXT_wCASF:
1594 break;
1595 case ARM_IWMMXT_wCon:
1596 gen_op_iwmmxt_set_cup();
1597 /* Fall through. */
1598 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1599 tmp = iwmmxt_load_creg(wrd);
1600 tmp2 = load_reg(s, rd);
f669df27 1601 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1602 tcg_temp_free_i32(tmp2);
da6b5335 1603 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1604 break;
1605 case ARM_IWMMXT_wCGR0:
1606 case ARM_IWMMXT_wCGR1:
1607 case ARM_IWMMXT_wCGR2:
1608 case ARM_IWMMXT_wCGR3:
1609 gen_op_iwmmxt_set_cup();
da6b5335
FN
1610 tmp = load_reg(s, rd);
1611 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1612 break;
1613 default:
1614 return 1;
1615 }
1616 break;
1617 case 0x100: /* WXOR */
1618 wrd = (insn >> 12) & 0xf;
1619 rd0 = (insn >> 0) & 0xf;
1620 rd1 = (insn >> 16) & 0xf;
1621 gen_op_iwmmxt_movq_M0_wRn(rd0);
1622 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1623 gen_op_iwmmxt_setpsr_nz();
1624 gen_op_iwmmxt_movq_wRn_M0(wrd);
1625 gen_op_iwmmxt_set_mup();
1626 gen_op_iwmmxt_set_cup();
1627 break;
1628 case 0x111: /* TMRC */
1629 if (insn & 0xf)
1630 return 1;
1631 rd = (insn >> 12) & 0xf;
1632 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1633 tmp = iwmmxt_load_creg(wrd);
1634 store_reg(s, rd, tmp);
18c9b560
AZ
1635 break;
1636 case 0x300: /* WANDN */
1637 wrd = (insn >> 12) & 0xf;
1638 rd0 = (insn >> 0) & 0xf;
1639 rd1 = (insn >> 16) & 0xf;
1640 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1641 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1642 gen_op_iwmmxt_andq_M0_wRn(rd1);
1643 gen_op_iwmmxt_setpsr_nz();
1644 gen_op_iwmmxt_movq_wRn_M0(wrd);
1645 gen_op_iwmmxt_set_mup();
1646 gen_op_iwmmxt_set_cup();
1647 break;
1648 case 0x200: /* WAND */
1649 wrd = (insn >> 12) & 0xf;
1650 rd0 = (insn >> 0) & 0xf;
1651 rd1 = (insn >> 16) & 0xf;
1652 gen_op_iwmmxt_movq_M0_wRn(rd0);
1653 gen_op_iwmmxt_andq_M0_wRn(rd1);
1654 gen_op_iwmmxt_setpsr_nz();
1655 gen_op_iwmmxt_movq_wRn_M0(wrd);
1656 gen_op_iwmmxt_set_mup();
1657 gen_op_iwmmxt_set_cup();
1658 break;
1659 case 0x810: case 0xa10: /* WMADD */
1660 wrd = (insn >> 12) & 0xf;
1661 rd0 = (insn >> 0) & 0xf;
1662 rd1 = (insn >> 16) & 0xf;
1663 gen_op_iwmmxt_movq_M0_wRn(rd0);
1664 if (insn & (1 << 21))
1665 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1666 else
1667 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1668 gen_op_iwmmxt_movq_wRn_M0(wrd);
1669 gen_op_iwmmxt_set_mup();
1670 break;
1671 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1672 wrd = (insn >> 12) & 0xf;
1673 rd0 = (insn >> 16) & 0xf;
1674 rd1 = (insn >> 0) & 0xf;
1675 gen_op_iwmmxt_movq_M0_wRn(rd0);
1676 switch ((insn >> 22) & 3) {
1677 case 0:
1678 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1679 break;
1680 case 1:
1681 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1682 break;
1683 case 2:
1684 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1685 break;
1686 case 3:
1687 return 1;
1688 }
1689 gen_op_iwmmxt_movq_wRn_M0(wrd);
1690 gen_op_iwmmxt_set_mup();
1691 gen_op_iwmmxt_set_cup();
1692 break;
1693 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1694 wrd = (insn >> 12) & 0xf;
1695 rd0 = (insn >> 16) & 0xf;
1696 rd1 = (insn >> 0) & 0xf;
1697 gen_op_iwmmxt_movq_M0_wRn(rd0);
1698 switch ((insn >> 22) & 3) {
1699 case 0:
1700 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1701 break;
1702 case 1:
1703 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1704 break;
1705 case 2:
1706 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1707 break;
1708 case 3:
1709 return 1;
1710 }
1711 gen_op_iwmmxt_movq_wRn_M0(wrd);
1712 gen_op_iwmmxt_set_mup();
1713 gen_op_iwmmxt_set_cup();
1714 break;
1715 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1716 wrd = (insn >> 12) & 0xf;
1717 rd0 = (insn >> 16) & 0xf;
1718 rd1 = (insn >> 0) & 0xf;
1719 gen_op_iwmmxt_movq_M0_wRn(rd0);
1720 if (insn & (1 << 22))
1721 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1722 else
1723 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1724 if (!(insn & (1 << 20)))
1725 gen_op_iwmmxt_addl_M0_wRn(wrd);
1726 gen_op_iwmmxt_movq_wRn_M0(wrd);
1727 gen_op_iwmmxt_set_mup();
1728 break;
1729 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1730 wrd = (insn >> 12) & 0xf;
1731 rd0 = (insn >> 16) & 0xf;
1732 rd1 = (insn >> 0) & 0xf;
1733 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1734 if (insn & (1 << 21)) {
1735 if (insn & (1 << 20))
1736 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1737 else
1738 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1739 } else {
1740 if (insn & (1 << 20))
1741 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1742 else
1743 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1744 }
18c9b560
AZ
1745 gen_op_iwmmxt_movq_wRn_M0(wrd);
1746 gen_op_iwmmxt_set_mup();
1747 break;
1748 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1749 wrd = (insn >> 12) & 0xf;
1750 rd0 = (insn >> 16) & 0xf;
1751 rd1 = (insn >> 0) & 0xf;
1752 gen_op_iwmmxt_movq_M0_wRn(rd0);
1753 if (insn & (1 << 21))
1754 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1755 else
1756 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1757 if (!(insn & (1 << 20))) {
e677137d
PB
1758 iwmmxt_load_reg(cpu_V1, wrd);
1759 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1760 }
1761 gen_op_iwmmxt_movq_wRn_M0(wrd);
1762 gen_op_iwmmxt_set_mup();
1763 break;
1764 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1765 wrd = (insn >> 12) & 0xf;
1766 rd0 = (insn >> 16) & 0xf;
1767 rd1 = (insn >> 0) & 0xf;
1768 gen_op_iwmmxt_movq_M0_wRn(rd0);
1769 switch ((insn >> 22) & 3) {
1770 case 0:
1771 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1772 break;
1773 case 1:
1774 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1775 break;
1776 case 2:
1777 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1778 break;
1779 case 3:
1780 return 1;
1781 }
1782 gen_op_iwmmxt_movq_wRn_M0(wrd);
1783 gen_op_iwmmxt_set_mup();
1784 gen_op_iwmmxt_set_cup();
1785 break;
1786 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1787 wrd = (insn >> 12) & 0xf;
1788 rd0 = (insn >> 16) & 0xf;
1789 rd1 = (insn >> 0) & 0xf;
1790 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1791 if (insn & (1 << 22)) {
1792 if (insn & (1 << 20))
1793 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1794 else
1795 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1796 } else {
1797 if (insn & (1 << 20))
1798 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1799 else
1800 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1801 }
18c9b560
AZ
1802 gen_op_iwmmxt_movq_wRn_M0(wrd);
1803 gen_op_iwmmxt_set_mup();
1804 gen_op_iwmmxt_set_cup();
1805 break;
1806 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1807 wrd = (insn >> 12) & 0xf;
1808 rd0 = (insn >> 16) & 0xf;
1809 rd1 = (insn >> 0) & 0xf;
1810 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1811 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1812 tcg_gen_andi_i32(tmp, tmp, 7);
1813 iwmmxt_load_reg(cpu_V1, rd1);
1814 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1815 tcg_temp_free_i32(tmp);
18c9b560
AZ
1816 gen_op_iwmmxt_movq_wRn_M0(wrd);
1817 gen_op_iwmmxt_set_mup();
1818 break;
1819 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1820 if (((insn >> 6) & 3) == 3)
1821 return 1;
18c9b560
AZ
1822 rd = (insn >> 12) & 0xf;
1823 wrd = (insn >> 16) & 0xf;
da6b5335 1824 tmp = load_reg(s, rd);
18c9b560
AZ
1825 gen_op_iwmmxt_movq_M0_wRn(wrd);
1826 switch ((insn >> 6) & 3) {
1827 case 0:
da6b5335
FN
1828 tmp2 = tcg_const_i32(0xff);
1829 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1830 break;
1831 case 1:
da6b5335
FN
1832 tmp2 = tcg_const_i32(0xffff);
1833 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1834 break;
1835 case 2:
da6b5335
FN
1836 tmp2 = tcg_const_i32(0xffffffff);
1837 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1838 break;
da6b5335 1839 default:
39d5492a
PM
1840 TCGV_UNUSED_I32(tmp2);
1841 TCGV_UNUSED_I32(tmp3);
18c9b560 1842 }
da6b5335 1843 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
1844 tcg_temp_free_i32(tmp3);
1845 tcg_temp_free_i32(tmp2);
7d1b0095 1846 tcg_temp_free_i32(tmp);
18c9b560
AZ
1847 gen_op_iwmmxt_movq_wRn_M0(wrd);
1848 gen_op_iwmmxt_set_mup();
1849 break;
1850 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1851 rd = (insn >> 12) & 0xf;
1852 wrd = (insn >> 16) & 0xf;
da6b5335 1853 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1854 return 1;
1855 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1856 tmp = tcg_temp_new_i32();
18c9b560
AZ
1857 switch ((insn >> 22) & 3) {
1858 case 0:
da6b5335
FN
1859 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1860 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1861 if (insn & 8) {
1862 tcg_gen_ext8s_i32(tmp, tmp);
1863 } else {
1864 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1865 }
1866 break;
1867 case 1:
da6b5335
FN
1868 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1869 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1870 if (insn & 8) {
1871 tcg_gen_ext16s_i32(tmp, tmp);
1872 } else {
1873 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1874 }
1875 break;
1876 case 2:
da6b5335
FN
1877 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1878 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1879 break;
18c9b560 1880 }
da6b5335 1881 store_reg(s, rd, tmp);
18c9b560
AZ
1882 break;
1883 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1884 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1885 return 1;
da6b5335 1886 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1887 switch ((insn >> 22) & 3) {
1888 case 0:
da6b5335 1889 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1890 break;
1891 case 1:
da6b5335 1892 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1893 break;
1894 case 2:
da6b5335 1895 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1896 break;
18c9b560 1897 }
da6b5335
FN
1898 tcg_gen_shli_i32(tmp, tmp, 28);
1899 gen_set_nzcv(tmp);
7d1b0095 1900 tcg_temp_free_i32(tmp);
18c9b560
AZ
1901 break;
1902 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1903 if (((insn >> 6) & 3) == 3)
1904 return 1;
18c9b560
AZ
1905 rd = (insn >> 12) & 0xf;
1906 wrd = (insn >> 16) & 0xf;
da6b5335 1907 tmp = load_reg(s, rd);
18c9b560
AZ
1908 switch ((insn >> 6) & 3) {
1909 case 0:
da6b5335 1910 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1911 break;
1912 case 1:
da6b5335 1913 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1914 break;
1915 case 2:
da6b5335 1916 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1917 break;
18c9b560 1918 }
7d1b0095 1919 tcg_temp_free_i32(tmp);
18c9b560
AZ
1920 gen_op_iwmmxt_movq_wRn_M0(wrd);
1921 gen_op_iwmmxt_set_mup();
1922 break;
1923 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1924 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1925 return 1;
da6b5335 1926 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1927 tmp2 = tcg_temp_new_i32();
da6b5335 1928 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1929 switch ((insn >> 22) & 3) {
1930 case 0:
1931 for (i = 0; i < 7; i ++) {
da6b5335
FN
1932 tcg_gen_shli_i32(tmp2, tmp2, 4);
1933 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1934 }
1935 break;
1936 case 1:
1937 for (i = 0; i < 3; i ++) {
da6b5335
FN
1938 tcg_gen_shli_i32(tmp2, tmp2, 8);
1939 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1940 }
1941 break;
1942 case 2:
da6b5335
FN
1943 tcg_gen_shli_i32(tmp2, tmp2, 16);
1944 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1945 break;
18c9b560 1946 }
da6b5335 1947 gen_set_nzcv(tmp);
7d1b0095
PM
1948 tcg_temp_free_i32(tmp2);
1949 tcg_temp_free_i32(tmp);
18c9b560
AZ
1950 break;
1951 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1952 wrd = (insn >> 12) & 0xf;
1953 rd0 = (insn >> 16) & 0xf;
1954 gen_op_iwmmxt_movq_M0_wRn(rd0);
1955 switch ((insn >> 22) & 3) {
1956 case 0:
e677137d 1957 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1958 break;
1959 case 1:
e677137d 1960 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1961 break;
1962 case 2:
e677137d 1963 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1964 break;
1965 case 3:
1966 return 1;
1967 }
1968 gen_op_iwmmxt_movq_wRn_M0(wrd);
1969 gen_op_iwmmxt_set_mup();
1970 break;
1971 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1972 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1973 return 1;
da6b5335 1974 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1975 tmp2 = tcg_temp_new_i32();
da6b5335 1976 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1977 switch ((insn >> 22) & 3) {
1978 case 0:
1979 for (i = 0; i < 7; i ++) {
da6b5335
FN
1980 tcg_gen_shli_i32(tmp2, tmp2, 4);
1981 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1982 }
1983 break;
1984 case 1:
1985 for (i = 0; i < 3; i ++) {
da6b5335
FN
1986 tcg_gen_shli_i32(tmp2, tmp2, 8);
1987 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1988 }
1989 break;
1990 case 2:
da6b5335
FN
1991 tcg_gen_shli_i32(tmp2, tmp2, 16);
1992 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1993 break;
18c9b560 1994 }
da6b5335 1995 gen_set_nzcv(tmp);
7d1b0095
PM
1996 tcg_temp_free_i32(tmp2);
1997 tcg_temp_free_i32(tmp);
18c9b560
AZ
1998 break;
1999 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2000 rd = (insn >> 12) & 0xf;
2001 rd0 = (insn >> 16) & 0xf;
da6b5335 2002 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2003 return 1;
2004 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2005 tmp = tcg_temp_new_i32();
18c9b560
AZ
2006 switch ((insn >> 22) & 3) {
2007 case 0:
da6b5335 2008 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2009 break;
2010 case 1:
da6b5335 2011 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2012 break;
2013 case 2:
da6b5335 2014 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2015 break;
18c9b560 2016 }
da6b5335 2017 store_reg(s, rd, tmp);
18c9b560
AZ
2018 break;
2019 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2020 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2021 wrd = (insn >> 12) & 0xf;
2022 rd0 = (insn >> 16) & 0xf;
2023 rd1 = (insn >> 0) & 0xf;
2024 gen_op_iwmmxt_movq_M0_wRn(rd0);
2025 switch ((insn >> 22) & 3) {
2026 case 0:
2027 if (insn & (1 << 21))
2028 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2029 else
2030 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2031 break;
2032 case 1:
2033 if (insn & (1 << 21))
2034 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2035 else
2036 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2037 break;
2038 case 2:
2039 if (insn & (1 << 21))
2040 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2041 else
2042 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2043 break;
2044 case 3:
2045 return 1;
2046 }
2047 gen_op_iwmmxt_movq_wRn_M0(wrd);
2048 gen_op_iwmmxt_set_mup();
2049 gen_op_iwmmxt_set_cup();
2050 break;
2051 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2052 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2053 wrd = (insn >> 12) & 0xf;
2054 rd0 = (insn >> 16) & 0xf;
2055 gen_op_iwmmxt_movq_M0_wRn(rd0);
2056 switch ((insn >> 22) & 3) {
2057 case 0:
2058 if (insn & (1 << 21))
2059 gen_op_iwmmxt_unpacklsb_M0();
2060 else
2061 gen_op_iwmmxt_unpacklub_M0();
2062 break;
2063 case 1:
2064 if (insn & (1 << 21))
2065 gen_op_iwmmxt_unpacklsw_M0();
2066 else
2067 gen_op_iwmmxt_unpackluw_M0();
2068 break;
2069 case 2:
2070 if (insn & (1 << 21))
2071 gen_op_iwmmxt_unpacklsl_M0();
2072 else
2073 gen_op_iwmmxt_unpacklul_M0();
2074 break;
2075 case 3:
2076 return 1;
2077 }
2078 gen_op_iwmmxt_movq_wRn_M0(wrd);
2079 gen_op_iwmmxt_set_mup();
2080 gen_op_iwmmxt_set_cup();
2081 break;
2082 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2083 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2084 wrd = (insn >> 12) & 0xf;
2085 rd0 = (insn >> 16) & 0xf;
2086 gen_op_iwmmxt_movq_M0_wRn(rd0);
2087 switch ((insn >> 22) & 3) {
2088 case 0:
2089 if (insn & (1 << 21))
2090 gen_op_iwmmxt_unpackhsb_M0();
2091 else
2092 gen_op_iwmmxt_unpackhub_M0();
2093 break;
2094 case 1:
2095 if (insn & (1 << 21))
2096 gen_op_iwmmxt_unpackhsw_M0();
2097 else
2098 gen_op_iwmmxt_unpackhuw_M0();
2099 break;
2100 case 2:
2101 if (insn & (1 << 21))
2102 gen_op_iwmmxt_unpackhsl_M0();
2103 else
2104 gen_op_iwmmxt_unpackhul_M0();
2105 break;
2106 case 3:
2107 return 1;
2108 }
2109 gen_op_iwmmxt_movq_wRn_M0(wrd);
2110 gen_op_iwmmxt_set_mup();
2111 gen_op_iwmmxt_set_cup();
2112 break;
2113 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2114 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2115 if (((insn >> 22) & 3) == 0)
2116 return 1;
18c9b560
AZ
2117 wrd = (insn >> 12) & 0xf;
2118 rd0 = (insn >> 16) & 0xf;
2119 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2120 tmp = tcg_temp_new_i32();
da6b5335 2121 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2122 tcg_temp_free_i32(tmp);
18c9b560 2123 return 1;
da6b5335 2124 }
18c9b560 2125 switch ((insn >> 22) & 3) {
18c9b560 2126 case 1:
477955bd 2127 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2128 break;
2129 case 2:
477955bd 2130 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2131 break;
2132 case 3:
477955bd 2133 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2134 break;
2135 }
7d1b0095 2136 tcg_temp_free_i32(tmp);
18c9b560
AZ
2137 gen_op_iwmmxt_movq_wRn_M0(wrd);
2138 gen_op_iwmmxt_set_mup();
2139 gen_op_iwmmxt_set_cup();
2140 break;
2141 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2142 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2143 if (((insn >> 22) & 3) == 0)
2144 return 1;
18c9b560
AZ
2145 wrd = (insn >> 12) & 0xf;
2146 rd0 = (insn >> 16) & 0xf;
2147 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2148 tmp = tcg_temp_new_i32();
da6b5335 2149 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2150 tcg_temp_free_i32(tmp);
18c9b560 2151 return 1;
da6b5335 2152 }
18c9b560 2153 switch ((insn >> 22) & 3) {
18c9b560 2154 case 1:
477955bd 2155 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2156 break;
2157 case 2:
477955bd 2158 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2159 break;
2160 case 3:
477955bd 2161 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2162 break;
2163 }
7d1b0095 2164 tcg_temp_free_i32(tmp);
18c9b560
AZ
2165 gen_op_iwmmxt_movq_wRn_M0(wrd);
2166 gen_op_iwmmxt_set_mup();
2167 gen_op_iwmmxt_set_cup();
2168 break;
2169 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2170 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2171 if (((insn >> 22) & 3) == 0)
2172 return 1;
18c9b560
AZ
2173 wrd = (insn >> 12) & 0xf;
2174 rd0 = (insn >> 16) & 0xf;
2175 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2176 tmp = tcg_temp_new_i32();
da6b5335 2177 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2178 tcg_temp_free_i32(tmp);
18c9b560 2179 return 1;
da6b5335 2180 }
18c9b560 2181 switch ((insn >> 22) & 3) {
18c9b560 2182 case 1:
477955bd 2183 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2184 break;
2185 case 2:
477955bd 2186 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2187 break;
2188 case 3:
477955bd 2189 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2190 break;
2191 }
7d1b0095 2192 tcg_temp_free_i32(tmp);
18c9b560
AZ
2193 gen_op_iwmmxt_movq_wRn_M0(wrd);
2194 gen_op_iwmmxt_set_mup();
2195 gen_op_iwmmxt_set_cup();
2196 break;
2197 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2198 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2199 if (((insn >> 22) & 3) == 0)
2200 return 1;
18c9b560
AZ
2201 wrd = (insn >> 12) & 0xf;
2202 rd0 = (insn >> 16) & 0xf;
2203 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2204 tmp = tcg_temp_new_i32();
18c9b560 2205 switch ((insn >> 22) & 3) {
18c9b560 2206 case 1:
da6b5335 2207 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2208 tcg_temp_free_i32(tmp);
18c9b560 2209 return 1;
da6b5335 2210 }
477955bd 2211 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2212 break;
2213 case 2:
da6b5335 2214 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2215 tcg_temp_free_i32(tmp);
18c9b560 2216 return 1;
da6b5335 2217 }
477955bd 2218 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2219 break;
2220 case 3:
da6b5335 2221 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2222 tcg_temp_free_i32(tmp);
18c9b560 2223 return 1;
da6b5335 2224 }
477955bd 2225 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2226 break;
2227 }
7d1b0095 2228 tcg_temp_free_i32(tmp);
18c9b560
AZ
2229 gen_op_iwmmxt_movq_wRn_M0(wrd);
2230 gen_op_iwmmxt_set_mup();
2231 gen_op_iwmmxt_set_cup();
2232 break;
2233 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2234 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2235 wrd = (insn >> 12) & 0xf;
2236 rd0 = (insn >> 16) & 0xf;
2237 rd1 = (insn >> 0) & 0xf;
2238 gen_op_iwmmxt_movq_M0_wRn(rd0);
2239 switch ((insn >> 22) & 3) {
2240 case 0:
2241 if (insn & (1 << 21))
2242 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2243 else
2244 gen_op_iwmmxt_minub_M0_wRn(rd1);
2245 break;
2246 case 1:
2247 if (insn & (1 << 21))
2248 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2249 else
2250 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2251 break;
2252 case 2:
2253 if (insn & (1 << 21))
2254 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2255 else
2256 gen_op_iwmmxt_minul_M0_wRn(rd1);
2257 break;
2258 case 3:
2259 return 1;
2260 }
2261 gen_op_iwmmxt_movq_wRn_M0(wrd);
2262 gen_op_iwmmxt_set_mup();
2263 break;
2264 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2265 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2266 wrd = (insn >> 12) & 0xf;
2267 rd0 = (insn >> 16) & 0xf;
2268 rd1 = (insn >> 0) & 0xf;
2269 gen_op_iwmmxt_movq_M0_wRn(rd0);
2270 switch ((insn >> 22) & 3) {
2271 case 0:
2272 if (insn & (1 << 21))
2273 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2274 else
2275 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2276 break;
2277 case 1:
2278 if (insn & (1 << 21))
2279 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2280 else
2281 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2282 break;
2283 case 2:
2284 if (insn & (1 << 21))
2285 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2286 else
2287 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2288 break;
2289 case 3:
2290 return 1;
2291 }
2292 gen_op_iwmmxt_movq_wRn_M0(wrd);
2293 gen_op_iwmmxt_set_mup();
2294 break;
2295 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2296 case 0x402: case 0x502: case 0x602: case 0x702:
2297 wrd = (insn >> 12) & 0xf;
2298 rd0 = (insn >> 16) & 0xf;
2299 rd1 = (insn >> 0) & 0xf;
2300 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2301 tmp = tcg_const_i32((insn >> 20) & 3);
2302 iwmmxt_load_reg(cpu_V1, rd1);
2303 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2304 tcg_temp_free_i32(tmp);
18c9b560
AZ
2305 gen_op_iwmmxt_movq_wRn_M0(wrd);
2306 gen_op_iwmmxt_set_mup();
2307 break;
2308 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2309 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2310 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2311 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2312 wrd = (insn >> 12) & 0xf;
2313 rd0 = (insn >> 16) & 0xf;
2314 rd1 = (insn >> 0) & 0xf;
2315 gen_op_iwmmxt_movq_M0_wRn(rd0);
2316 switch ((insn >> 20) & 0xf) {
2317 case 0x0:
2318 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2319 break;
2320 case 0x1:
2321 gen_op_iwmmxt_subub_M0_wRn(rd1);
2322 break;
2323 case 0x3:
2324 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2325 break;
2326 case 0x4:
2327 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2328 break;
2329 case 0x5:
2330 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2331 break;
2332 case 0x7:
2333 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2334 break;
2335 case 0x8:
2336 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2337 break;
2338 case 0x9:
2339 gen_op_iwmmxt_subul_M0_wRn(rd1);
2340 break;
2341 case 0xb:
2342 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2343 break;
2344 default:
2345 return 1;
2346 }
2347 gen_op_iwmmxt_movq_wRn_M0(wrd);
2348 gen_op_iwmmxt_set_mup();
2349 gen_op_iwmmxt_set_cup();
2350 break;
2351 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2352 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2353 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2354 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2355 wrd = (insn >> 12) & 0xf;
2356 rd0 = (insn >> 16) & 0xf;
2357 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2358 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2359 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2360 tcg_temp_free_i32(tmp);
18c9b560
AZ
2361 gen_op_iwmmxt_movq_wRn_M0(wrd);
2362 gen_op_iwmmxt_set_mup();
2363 gen_op_iwmmxt_set_cup();
2364 break;
2365 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2366 case 0x418: case 0x518: case 0x618: case 0x718:
2367 case 0x818: case 0x918: case 0xa18: case 0xb18:
2368 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2369 wrd = (insn >> 12) & 0xf;
2370 rd0 = (insn >> 16) & 0xf;
2371 rd1 = (insn >> 0) & 0xf;
2372 gen_op_iwmmxt_movq_M0_wRn(rd0);
2373 switch ((insn >> 20) & 0xf) {
2374 case 0x0:
2375 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2376 break;
2377 case 0x1:
2378 gen_op_iwmmxt_addub_M0_wRn(rd1);
2379 break;
2380 case 0x3:
2381 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2382 break;
2383 case 0x4:
2384 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2385 break;
2386 case 0x5:
2387 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2388 break;
2389 case 0x7:
2390 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2391 break;
2392 case 0x8:
2393 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2394 break;
2395 case 0x9:
2396 gen_op_iwmmxt_addul_M0_wRn(rd1);
2397 break;
2398 case 0xb:
2399 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2400 break;
2401 default:
2402 return 1;
2403 }
2404 gen_op_iwmmxt_movq_wRn_M0(wrd);
2405 gen_op_iwmmxt_set_mup();
2406 gen_op_iwmmxt_set_cup();
2407 break;
2408 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2409 case 0x408: case 0x508: case 0x608: case 0x708:
2410 case 0x808: case 0x908: case 0xa08: case 0xb08:
2411 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2412 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2413 return 1;
18c9b560
AZ
2414 wrd = (insn >> 12) & 0xf;
2415 rd0 = (insn >> 16) & 0xf;
2416 rd1 = (insn >> 0) & 0xf;
2417 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2418 switch ((insn >> 22) & 3) {
18c9b560
AZ
2419 case 1:
2420 if (insn & (1 << 21))
2421 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2422 else
2423 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2424 break;
2425 case 2:
2426 if (insn & (1 << 21))
2427 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2428 else
2429 gen_op_iwmmxt_packul_M0_wRn(rd1);
2430 break;
2431 case 3:
2432 if (insn & (1 << 21))
2433 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2434 else
2435 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2436 break;
2437 }
2438 gen_op_iwmmxt_movq_wRn_M0(wrd);
2439 gen_op_iwmmxt_set_mup();
2440 gen_op_iwmmxt_set_cup();
2441 break;
2442 case 0x201: case 0x203: case 0x205: case 0x207:
2443 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2444 case 0x211: case 0x213: case 0x215: case 0x217:
2445 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2446 wrd = (insn >> 5) & 0xf;
2447 rd0 = (insn >> 12) & 0xf;
2448 rd1 = (insn >> 0) & 0xf;
2449 if (rd0 == 0xf || rd1 == 0xf)
2450 return 1;
2451 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2452 tmp = load_reg(s, rd0);
2453 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2454 switch ((insn >> 16) & 0xf) {
2455 case 0x0: /* TMIA */
da6b5335 2456 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2457 break;
2458 case 0x8: /* TMIAPH */
da6b5335 2459 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2460 break;
2461 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2462 if (insn & (1 << 16))
da6b5335 2463 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2464 if (insn & (1 << 17))
da6b5335
FN
2465 tcg_gen_shri_i32(tmp2, tmp2, 16);
2466 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2467 break;
2468 default:
7d1b0095
PM
2469 tcg_temp_free_i32(tmp2);
2470 tcg_temp_free_i32(tmp);
18c9b560
AZ
2471 return 1;
2472 }
7d1b0095
PM
2473 tcg_temp_free_i32(tmp2);
2474 tcg_temp_free_i32(tmp);
18c9b560
AZ
2475 gen_op_iwmmxt_movq_wRn_M0(wrd);
2476 gen_op_iwmmxt_set_mup();
2477 break;
2478 default:
2479 return 1;
2480 }
2481
2482 return 0;
2483}
2484
a1c7273b 2485/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2486 (ie. an undefined instruction). */
0ecb72a5 2487static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
2488{
2489 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2490 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2491
2492 if ((insn & 0x0ff00f10) == 0x0e200010) {
2493 /* Multiply with Internal Accumulate Format */
2494 rd0 = (insn >> 12) & 0xf;
2495 rd1 = insn & 0xf;
2496 acc = (insn >> 5) & 7;
2497
2498 if (acc != 0)
2499 return 1;
2500
3a554c0f
FN
2501 tmp = load_reg(s, rd0);
2502 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2503 switch ((insn >> 16) & 0xf) {
2504 case 0x0: /* MIA */
3a554c0f 2505 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2506 break;
2507 case 0x8: /* MIAPH */
3a554c0f 2508 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2509 break;
2510 case 0xc: /* MIABB */
2511 case 0xd: /* MIABT */
2512 case 0xe: /* MIATB */
2513 case 0xf: /* MIATT */
18c9b560 2514 if (insn & (1 << 16))
3a554c0f 2515 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2516 if (insn & (1 << 17))
3a554c0f
FN
2517 tcg_gen_shri_i32(tmp2, tmp2, 16);
2518 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2519 break;
2520 default:
2521 return 1;
2522 }
7d1b0095
PM
2523 tcg_temp_free_i32(tmp2);
2524 tcg_temp_free_i32(tmp);
18c9b560
AZ
2525
2526 gen_op_iwmmxt_movq_wRn_M0(acc);
2527 return 0;
2528 }
2529
2530 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2531 /* Internal Accumulator Access Format */
2532 rdhi = (insn >> 16) & 0xf;
2533 rdlo = (insn >> 12) & 0xf;
2534 acc = insn & 7;
2535
2536 if (acc != 0)
2537 return 1;
2538
2539 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2540 iwmmxt_load_reg(cpu_V0, acc);
2541 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2542 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2543 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2544 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2545 } else { /* MAR */
3a554c0f
FN
2546 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2547 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2548 }
2549 return 0;
2550 }
2551
2552 return 1;
2553}
2554
9ee6e8bb
PB
2555#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2556#define VFP_SREG(insn, bigbit, smallbit) \
2557 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2558#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2559 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2560 reg = (((insn) >> (bigbit)) & 0x0f) \
2561 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2562 } else { \
2563 if (insn & (1 << (smallbit))) \
2564 return 1; \
2565 reg = ((insn) >> (bigbit)) & 0x0f; \
2566 }} while (0)
2567
2568#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2569#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2570#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2571#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2572#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2573#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2574
4373f3ce 2575/* Move between integer and VFP cores. */
39d5492a 2576static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2577{
39d5492a 2578 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2579 tcg_gen_mov_i32(tmp, cpu_F0s);
2580 return tmp;
2581}
2582
39d5492a 2583static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2584{
2585 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2586 tcg_temp_free_i32(tmp);
4373f3ce
PB
2587}
2588
39d5492a 2589static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2590{
39d5492a 2591 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2592 if (shift)
2593 tcg_gen_shri_i32(var, var, shift);
86831435 2594 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2595 tcg_gen_shli_i32(tmp, var, 8);
2596 tcg_gen_or_i32(var, var, tmp);
2597 tcg_gen_shli_i32(tmp, var, 16);
2598 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2599 tcg_temp_free_i32(tmp);
ad69471c
PB
2600}
2601
39d5492a 2602static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2603{
39d5492a 2604 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2605 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2606 tcg_gen_shli_i32(tmp, var, 16);
2607 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2608 tcg_temp_free_i32(tmp);
ad69471c
PB
2609}
2610
39d5492a 2611static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2612{
39d5492a 2613 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2614 tcg_gen_andi_i32(var, var, 0xffff0000);
2615 tcg_gen_shri_i32(tmp, var, 16);
2616 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2617 tcg_temp_free_i32(tmp);
ad69471c
PB
2618}
2619
39d5492a 2620static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2621{
2622 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2623 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2624 switch (size) {
2625 case 0:
6ce2faf4 2626 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
8e18cde3
PM
2627 gen_neon_dup_u8(tmp, 0);
2628 break;
2629 case 1:
6ce2faf4 2630 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
8e18cde3
PM
2631 gen_neon_dup_low16(tmp);
2632 break;
2633 case 2:
6ce2faf4 2634 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8e18cde3
PM
2635 break;
2636 default: /* Avoid compiler warnings. */
2637 abort();
2638 }
2639 return tmp;
2640}
2641
04731fb5
WN
2642static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2643 uint32_t dp)
2644{
2645 uint32_t cc = extract32(insn, 20, 2);
2646
2647 if (dp) {
2648 TCGv_i64 frn, frm, dest;
2649 TCGv_i64 tmp, zero, zf, nf, vf;
2650
2651 zero = tcg_const_i64(0);
2652
2653 frn = tcg_temp_new_i64();
2654 frm = tcg_temp_new_i64();
2655 dest = tcg_temp_new_i64();
2656
2657 zf = tcg_temp_new_i64();
2658 nf = tcg_temp_new_i64();
2659 vf = tcg_temp_new_i64();
2660
2661 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2662 tcg_gen_ext_i32_i64(nf, cpu_NF);
2663 tcg_gen_ext_i32_i64(vf, cpu_VF);
2664
2665 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2666 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2667 switch (cc) {
2668 case 0: /* eq: Z */
2669 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2670 frn, frm);
2671 break;
2672 case 1: /* vs: V */
2673 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2674 frn, frm);
2675 break;
2676 case 2: /* ge: N == V -> N ^ V == 0 */
2677 tmp = tcg_temp_new_i64();
2678 tcg_gen_xor_i64(tmp, vf, nf);
2679 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2680 frn, frm);
2681 tcg_temp_free_i64(tmp);
2682 break;
2683 case 3: /* gt: !Z && N == V */
2684 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2685 frn, frm);
2686 tmp = tcg_temp_new_i64();
2687 tcg_gen_xor_i64(tmp, vf, nf);
2688 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2689 dest, frm);
2690 tcg_temp_free_i64(tmp);
2691 break;
2692 }
2693 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2694 tcg_temp_free_i64(frn);
2695 tcg_temp_free_i64(frm);
2696 tcg_temp_free_i64(dest);
2697
2698 tcg_temp_free_i64(zf);
2699 tcg_temp_free_i64(nf);
2700 tcg_temp_free_i64(vf);
2701
2702 tcg_temp_free_i64(zero);
2703 } else {
2704 TCGv_i32 frn, frm, dest;
2705 TCGv_i32 tmp, zero;
2706
2707 zero = tcg_const_i32(0);
2708
2709 frn = tcg_temp_new_i32();
2710 frm = tcg_temp_new_i32();
2711 dest = tcg_temp_new_i32();
2712 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2713 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2714 switch (cc) {
2715 case 0: /* eq: Z */
2716 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2717 frn, frm);
2718 break;
2719 case 1: /* vs: V */
2720 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2721 frn, frm);
2722 break;
2723 case 2: /* ge: N == V -> N ^ V == 0 */
2724 tmp = tcg_temp_new_i32();
2725 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2726 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2727 frn, frm);
2728 tcg_temp_free_i32(tmp);
2729 break;
2730 case 3: /* gt: !Z && N == V */
2731 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2732 frn, frm);
2733 tmp = tcg_temp_new_i32();
2734 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2735 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2736 dest, frm);
2737 tcg_temp_free_i32(tmp);
2738 break;
2739 }
2740 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2741 tcg_temp_free_i32(frn);
2742 tcg_temp_free_i32(frm);
2743 tcg_temp_free_i32(dest);
2744
2745 tcg_temp_free_i32(zero);
2746 }
2747
2748 return 0;
2749}
2750
40cfacdd
WN
2751static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2752 uint32_t rm, uint32_t dp)
2753{
2754 uint32_t vmin = extract32(insn, 6, 1);
2755 TCGv_ptr fpst = get_fpstatus_ptr(0);
2756
2757 if (dp) {
2758 TCGv_i64 frn, frm, dest;
2759
2760 frn = tcg_temp_new_i64();
2761 frm = tcg_temp_new_i64();
2762 dest = tcg_temp_new_i64();
2763
2764 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2765 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2766 if (vmin) {
f71a2ae5 2767 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 2768 } else {
f71a2ae5 2769 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
2770 }
2771 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2772 tcg_temp_free_i64(frn);
2773 tcg_temp_free_i64(frm);
2774 tcg_temp_free_i64(dest);
2775 } else {
2776 TCGv_i32 frn, frm, dest;
2777
2778 frn = tcg_temp_new_i32();
2779 frm = tcg_temp_new_i32();
2780 dest = tcg_temp_new_i32();
2781
2782 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2783 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2784 if (vmin) {
f71a2ae5 2785 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 2786 } else {
f71a2ae5 2787 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
2788 }
2789 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2790 tcg_temp_free_i32(frn);
2791 tcg_temp_free_i32(frm);
2792 tcg_temp_free_i32(dest);
2793 }
2794
2795 tcg_temp_free_ptr(fpst);
2796 return 0;
2797}
2798
7655f39b
WN
2799static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2800 int rounding)
2801{
2802 TCGv_ptr fpst = get_fpstatus_ptr(0);
2803 TCGv_i32 tcg_rmode;
2804
2805 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2806 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2807
2808 if (dp) {
2809 TCGv_i64 tcg_op;
2810 TCGv_i64 tcg_res;
2811 tcg_op = tcg_temp_new_i64();
2812 tcg_res = tcg_temp_new_i64();
2813 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2814 gen_helper_rintd(tcg_res, tcg_op, fpst);
2815 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2816 tcg_temp_free_i64(tcg_op);
2817 tcg_temp_free_i64(tcg_res);
2818 } else {
2819 TCGv_i32 tcg_op;
2820 TCGv_i32 tcg_res;
2821 tcg_op = tcg_temp_new_i32();
2822 tcg_res = tcg_temp_new_i32();
2823 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2824 gen_helper_rints(tcg_res, tcg_op, fpst);
2825 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2826 tcg_temp_free_i32(tcg_op);
2827 tcg_temp_free_i32(tcg_res);
2828 }
2829
2830 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2831 tcg_temp_free_i32(tcg_rmode);
2832
2833 tcg_temp_free_ptr(fpst);
2834 return 0;
2835}
2836
c9975a83
WN
2837static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2838 int rounding)
2839{
2840 bool is_signed = extract32(insn, 7, 1);
2841 TCGv_ptr fpst = get_fpstatus_ptr(0);
2842 TCGv_i32 tcg_rmode, tcg_shift;
2843
2844 tcg_shift = tcg_const_i32(0);
2845
2846 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2847 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2848
2849 if (dp) {
2850 TCGv_i64 tcg_double, tcg_res;
2851 TCGv_i32 tcg_tmp;
2852 /* Rd is encoded as a single precision register even when the source
2853 * is double precision.
2854 */
2855 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
2856 tcg_double = tcg_temp_new_i64();
2857 tcg_res = tcg_temp_new_i64();
2858 tcg_tmp = tcg_temp_new_i32();
2859 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
2860 if (is_signed) {
2861 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
2862 } else {
2863 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
2864 }
2865 tcg_gen_trunc_i64_i32(tcg_tmp, tcg_res);
2866 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
2867 tcg_temp_free_i32(tcg_tmp);
2868 tcg_temp_free_i64(tcg_res);
2869 tcg_temp_free_i64(tcg_double);
2870 } else {
2871 TCGv_i32 tcg_single, tcg_res;
2872 tcg_single = tcg_temp_new_i32();
2873 tcg_res = tcg_temp_new_i32();
2874 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
2875 if (is_signed) {
2876 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
2877 } else {
2878 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
2879 }
2880 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
2881 tcg_temp_free_i32(tcg_res);
2882 tcg_temp_free_i32(tcg_single);
2883 }
2884
2885 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2886 tcg_temp_free_i32(tcg_rmode);
2887
2888 tcg_temp_free_i32(tcg_shift);
2889
2890 tcg_temp_free_ptr(fpst);
2891
2892 return 0;
2893}
7655f39b
WN
2894
2895/* Table for converting the most common AArch32 encoding of
2896 * rounding mode to arm_fprounding order (which matches the
2897 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
2898 */
2899static const uint8_t fp_decode_rm[] = {
2900 FPROUNDING_TIEAWAY,
2901 FPROUNDING_TIEEVEN,
2902 FPROUNDING_POSINF,
2903 FPROUNDING_NEGINF,
2904};
2905
04731fb5
WN
2906static int disas_vfp_v8_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
2907{
2908 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
2909
2910 if (!arm_feature(env, ARM_FEATURE_V8)) {
2911 return 1;
2912 }
2913
2914 if (dp) {
2915 VFP_DREG_D(rd, insn);
2916 VFP_DREG_N(rn, insn);
2917 VFP_DREG_M(rm, insn);
2918 } else {
2919 rd = VFP_SREG_D(insn);
2920 rn = VFP_SREG_N(insn);
2921 rm = VFP_SREG_M(insn);
2922 }
2923
2924 if ((insn & 0x0f800e50) == 0x0e000a00) {
2925 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
2926 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
2927 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
2928 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
2929 /* VRINTA, VRINTN, VRINTP, VRINTM */
2930 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
2931 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
2932 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
2933 /* VCVTA, VCVTN, VCVTP, VCVTM */
2934 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
2935 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
2936 }
2937 return 1;
2938}
2939
a1c7273b 2940/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 2941 (ie. an undefined instruction). */
0ecb72a5 2942static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
b7bcbe95
FB
2943{
2944 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2945 int dp, veclen;
39d5492a
PM
2946 TCGv_i32 addr;
2947 TCGv_i32 tmp;
2948 TCGv_i32 tmp2;
b7bcbe95 2949
40f137e1
PB
2950 if (!arm_feature(env, ARM_FEATURE_VFP))
2951 return 1;
2952
2c7ffc41
PM
2953 /* FIXME: this access check should not take precedence over UNDEF
2954 * for invalid encodings; we will generate incorrect syndrome information
2955 * for attempts to execute invalid vfp/neon encodings with FP disabled.
2956 */
2957 if (!s->cpacr_fpen) {
2958 gen_exception_insn(s, 4, EXCP_UDEF,
2959 syn_fp_access_trap(1, 0xe, s->thumb));
2960 return 0;
2961 }
2962
5df8bac1 2963 if (!s->vfp_enabled) {
9ee6e8bb 2964 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2965 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2966 return 1;
2967 rn = (insn >> 16) & 0xf;
a50c0f51
PM
2968 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
2969 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 2970 return 1;
a50c0f51 2971 }
40f137e1 2972 }
6a57f3eb
WN
2973
2974 if (extract32(insn, 28, 4) == 0xf) {
2975 /* Encodings with T=1 (Thumb) or unconditional (ARM):
2976 * only used in v8 and above.
2977 */
04731fb5 2978 return disas_vfp_v8_insn(env, s, insn);
6a57f3eb
WN
2979 }
2980
b7bcbe95
FB
2981 dp = ((insn & 0xf00) == 0xb00);
2982 switch ((insn >> 24) & 0xf) {
2983 case 0xe:
2984 if (insn & (1 << 4)) {
2985 /* single register transfer */
b7bcbe95
FB
2986 rd = (insn >> 12) & 0xf;
2987 if (dp) {
9ee6e8bb
PB
2988 int size;
2989 int pass;
2990
2991 VFP_DREG_N(rn, insn);
2992 if (insn & 0xf)
b7bcbe95 2993 return 1;
9ee6e8bb
PB
2994 if (insn & 0x00c00060
2995 && !arm_feature(env, ARM_FEATURE_NEON))
2996 return 1;
2997
2998 pass = (insn >> 21) & 1;
2999 if (insn & (1 << 22)) {
3000 size = 0;
3001 offset = ((insn >> 5) & 3) * 8;
3002 } else if (insn & (1 << 5)) {
3003 size = 1;
3004 offset = (insn & (1 << 6)) ? 16 : 0;
3005 } else {
3006 size = 2;
3007 offset = 0;
3008 }
18c9b560 3009 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3010 /* vfp->arm */
ad69471c 3011 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3012 switch (size) {
3013 case 0:
9ee6e8bb 3014 if (offset)
ad69471c 3015 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3016 if (insn & (1 << 23))
ad69471c 3017 gen_uxtb(tmp);
9ee6e8bb 3018 else
ad69471c 3019 gen_sxtb(tmp);
9ee6e8bb
PB
3020 break;
3021 case 1:
9ee6e8bb
PB
3022 if (insn & (1 << 23)) {
3023 if (offset) {
ad69471c 3024 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3025 } else {
ad69471c 3026 gen_uxth(tmp);
9ee6e8bb
PB
3027 }
3028 } else {
3029 if (offset) {
ad69471c 3030 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3031 } else {
ad69471c 3032 gen_sxth(tmp);
9ee6e8bb
PB
3033 }
3034 }
3035 break;
3036 case 2:
9ee6e8bb
PB
3037 break;
3038 }
ad69471c 3039 store_reg(s, rd, tmp);
b7bcbe95
FB
3040 } else {
3041 /* arm->vfp */
ad69471c 3042 tmp = load_reg(s, rd);
9ee6e8bb
PB
3043 if (insn & (1 << 23)) {
3044 /* VDUP */
3045 if (size == 0) {
ad69471c 3046 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3047 } else if (size == 1) {
ad69471c 3048 gen_neon_dup_low16(tmp);
9ee6e8bb 3049 }
cbbccffc 3050 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3051 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3052 tcg_gen_mov_i32(tmp2, tmp);
3053 neon_store_reg(rn, n, tmp2);
3054 }
3055 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3056 } else {
3057 /* VMOV */
3058 switch (size) {
3059 case 0:
ad69471c 3060 tmp2 = neon_load_reg(rn, pass);
d593c48e 3061 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3062 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3063 break;
3064 case 1:
ad69471c 3065 tmp2 = neon_load_reg(rn, pass);
d593c48e 3066 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3067 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3068 break;
3069 case 2:
9ee6e8bb
PB
3070 break;
3071 }
ad69471c 3072 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3073 }
b7bcbe95 3074 }
9ee6e8bb
PB
3075 } else { /* !dp */
3076 if ((insn & 0x6f) != 0x00)
3077 return 1;
3078 rn = VFP_SREG_N(insn);
18c9b560 3079 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3080 /* vfp->arm */
3081 if (insn & (1 << 21)) {
3082 /* system register */
40f137e1 3083 rn >>= 1;
9ee6e8bb 3084
b7bcbe95 3085 switch (rn) {
40f137e1 3086 case ARM_VFP_FPSID:
4373f3ce 3087 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3088 VFP3 restricts all id registers to privileged
3089 accesses. */
3090 if (IS_USER(s)
3091 && arm_feature(env, ARM_FEATURE_VFP3))
3092 return 1;
4373f3ce 3093 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3094 break;
40f137e1 3095 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3096 if (IS_USER(s))
3097 return 1;
4373f3ce 3098 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3099 break;
40f137e1
PB
3100 case ARM_VFP_FPINST:
3101 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3102 /* Not present in VFP3. */
3103 if (IS_USER(s)
3104 || arm_feature(env, ARM_FEATURE_VFP3))
3105 return 1;
4373f3ce 3106 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3107 break;
40f137e1 3108 case ARM_VFP_FPSCR:
601d70b9 3109 if (rd == 15) {
4373f3ce
PB
3110 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3111 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3112 } else {
7d1b0095 3113 tmp = tcg_temp_new_i32();
4373f3ce
PB
3114 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3115 }
b7bcbe95 3116 break;
a50c0f51
PM
3117 case ARM_VFP_MVFR2:
3118 if (!arm_feature(env, ARM_FEATURE_V8)) {
3119 return 1;
3120 }
3121 /* fall through */
9ee6e8bb
PB
3122 case ARM_VFP_MVFR0:
3123 case ARM_VFP_MVFR1:
3124 if (IS_USER(s)
06ed5d66 3125 || !arm_feature(env, ARM_FEATURE_MVFR))
9ee6e8bb 3126 return 1;
4373f3ce 3127 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3128 break;
b7bcbe95
FB
3129 default:
3130 return 1;
3131 }
3132 } else {
3133 gen_mov_F0_vreg(0, rn);
4373f3ce 3134 tmp = gen_vfp_mrs();
b7bcbe95
FB
3135 }
3136 if (rd == 15) {
b5ff1b31 3137 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3138 gen_set_nzcv(tmp);
7d1b0095 3139 tcg_temp_free_i32(tmp);
4373f3ce
PB
3140 } else {
3141 store_reg(s, rd, tmp);
3142 }
b7bcbe95
FB
3143 } else {
3144 /* arm->vfp */
b7bcbe95 3145 if (insn & (1 << 21)) {
40f137e1 3146 rn >>= 1;
b7bcbe95
FB
3147 /* system register */
3148 switch (rn) {
40f137e1 3149 case ARM_VFP_FPSID:
9ee6e8bb
PB
3150 case ARM_VFP_MVFR0:
3151 case ARM_VFP_MVFR1:
b7bcbe95
FB
3152 /* Writes are ignored. */
3153 break;
40f137e1 3154 case ARM_VFP_FPSCR:
e4c1cfa5 3155 tmp = load_reg(s, rd);
4373f3ce 3156 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3157 tcg_temp_free_i32(tmp);
b5ff1b31 3158 gen_lookup_tb(s);
b7bcbe95 3159 break;
40f137e1 3160 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3161 if (IS_USER(s))
3162 return 1;
71b3c3de
JR
3163 /* TODO: VFP subarchitecture support.
3164 * For now, keep the EN bit only */
e4c1cfa5 3165 tmp = load_reg(s, rd);
71b3c3de 3166 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3167 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3168 gen_lookup_tb(s);
3169 break;
3170 case ARM_VFP_FPINST:
3171 case ARM_VFP_FPINST2:
e4c1cfa5 3172 tmp = load_reg(s, rd);
4373f3ce 3173 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3174 break;
b7bcbe95
FB
3175 default:
3176 return 1;
3177 }
3178 } else {
e4c1cfa5 3179 tmp = load_reg(s, rd);
4373f3ce 3180 gen_vfp_msr(tmp);
b7bcbe95
FB
3181 gen_mov_vreg_F0(0, rn);
3182 }
3183 }
3184 }
3185 } else {
3186 /* data processing */
3187 /* The opcode is in bits 23, 21, 20 and 6. */
3188 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3189 if (dp) {
3190 if (op == 15) {
3191 /* rn is opcode */
3192 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3193 } else {
3194 /* rn is register number */
9ee6e8bb 3195 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3196 }
3197
239c20c7
WN
3198 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3199 ((rn & 0x1e) == 0x6))) {
3200 /* Integer or single/half precision destination. */
9ee6e8bb 3201 rd = VFP_SREG_D(insn);
b7bcbe95 3202 } else {
9ee6e8bb 3203 VFP_DREG_D(rd, insn);
b7bcbe95 3204 }
04595bf6 3205 if (op == 15 &&
239c20c7
WN
3206 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3207 ((rn & 0x1e) == 0x4))) {
3208 /* VCVT from int or half precision is always from S reg
3209 * regardless of dp bit. VCVT with immediate frac_bits
3210 * has same format as SREG_M.
04595bf6
PM
3211 */
3212 rm = VFP_SREG_M(insn);
b7bcbe95 3213 } else {
9ee6e8bb 3214 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3215 }
3216 } else {
9ee6e8bb 3217 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3218 if (op == 15 && rn == 15) {
3219 /* Double precision destination. */
9ee6e8bb
PB
3220 VFP_DREG_D(rd, insn);
3221 } else {
3222 rd = VFP_SREG_D(insn);
3223 }
04595bf6
PM
3224 /* NB that we implicitly rely on the encoding for the frac_bits
3225 * in VCVT of fixed to float being the same as that of an SREG_M
3226 */
9ee6e8bb 3227 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3228 }
3229
69d1fc22 3230 veclen = s->vec_len;
b7bcbe95
FB
3231 if (op == 15 && rn > 3)
3232 veclen = 0;
3233
3234 /* Shut up compiler warnings. */
3235 delta_m = 0;
3236 delta_d = 0;
3237 bank_mask = 0;
3b46e624 3238
b7bcbe95
FB
3239 if (veclen > 0) {
3240 if (dp)
3241 bank_mask = 0xc;
3242 else
3243 bank_mask = 0x18;
3244
3245 /* Figure out what type of vector operation this is. */
3246 if ((rd & bank_mask) == 0) {
3247 /* scalar */
3248 veclen = 0;
3249 } else {
3250 if (dp)
69d1fc22 3251 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3252 else
69d1fc22 3253 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3254
3255 if ((rm & bank_mask) == 0) {
3256 /* mixed scalar/vector */
3257 delta_m = 0;
3258 } else {
3259 /* vector */
3260 delta_m = delta_d;
3261 }
3262 }
3263 }
3264
3265 /* Load the initial operands. */
3266 if (op == 15) {
3267 switch (rn) {
3268 case 16:
3269 case 17:
3270 /* Integer source */
3271 gen_mov_F0_vreg(0, rm);
3272 break;
3273 case 8:
3274 case 9:
3275 /* Compare */
3276 gen_mov_F0_vreg(dp, rd);
3277 gen_mov_F1_vreg(dp, rm);
3278 break;
3279 case 10:
3280 case 11:
3281 /* Compare with zero */
3282 gen_mov_F0_vreg(dp, rd);
3283 gen_vfp_F1_ld0(dp);
3284 break;
9ee6e8bb
PB
3285 case 20:
3286 case 21:
3287 case 22:
3288 case 23:
644ad806
PB
3289 case 28:
3290 case 29:
3291 case 30:
3292 case 31:
9ee6e8bb
PB
3293 /* Source and destination the same. */
3294 gen_mov_F0_vreg(dp, rd);
3295 break;
6e0c0ed1
PM
3296 case 4:
3297 case 5:
3298 case 6:
3299 case 7:
239c20c7
WN
3300 /* VCVTB, VCVTT: only present with the halfprec extension
3301 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3302 * (we choose to UNDEF)
6e0c0ed1 3303 */
239c20c7
WN
3304 if ((dp && !arm_feature(env, ARM_FEATURE_V8)) ||
3305 !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3306 return 1;
3307 }
239c20c7
WN
3308 if (!extract32(rn, 1, 1)) {
3309 /* Half precision source. */
3310 gen_mov_F0_vreg(0, rm);
3311 break;
3312 }
6e0c0ed1 3313 /* Otherwise fall through */
b7bcbe95
FB
3314 default:
3315 /* One source operand. */
3316 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3317 break;
b7bcbe95
FB
3318 }
3319 } else {
3320 /* Two source operands. */
3321 gen_mov_F0_vreg(dp, rn);
3322 gen_mov_F1_vreg(dp, rm);
3323 }
3324
3325 for (;;) {
3326 /* Perform the calculation. */
3327 switch (op) {
605a6aed
PM
3328 case 0: /* VMLA: fd + (fn * fm) */
3329 /* Note that order of inputs to the add matters for NaNs */
3330 gen_vfp_F1_mul(dp);
3331 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3332 gen_vfp_add(dp);
3333 break;
605a6aed 3334 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3335 gen_vfp_mul(dp);
605a6aed
PM
3336 gen_vfp_F1_neg(dp);
3337 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3338 gen_vfp_add(dp);
3339 break;
605a6aed
PM
3340 case 2: /* VNMLS: -fd + (fn * fm) */
3341 /* Note that it isn't valid to replace (-A + B) with (B - A)
3342 * or similar plausible looking simplifications
3343 * because this will give wrong results for NaNs.
3344 */
3345 gen_vfp_F1_mul(dp);
3346 gen_mov_F0_vreg(dp, rd);
3347 gen_vfp_neg(dp);
3348 gen_vfp_add(dp);
b7bcbe95 3349 break;
605a6aed 3350 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3351 gen_vfp_mul(dp);
605a6aed
PM
3352 gen_vfp_F1_neg(dp);
3353 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3354 gen_vfp_neg(dp);
605a6aed 3355 gen_vfp_add(dp);
b7bcbe95
FB
3356 break;
3357 case 4: /* mul: fn * fm */
3358 gen_vfp_mul(dp);
3359 break;
3360 case 5: /* nmul: -(fn * fm) */
3361 gen_vfp_mul(dp);
3362 gen_vfp_neg(dp);
3363 break;
3364 case 6: /* add: fn + fm */
3365 gen_vfp_add(dp);
3366 break;
3367 case 7: /* sub: fn - fm */
3368 gen_vfp_sub(dp);
3369 break;
3370 case 8: /* div: fn / fm */
3371 gen_vfp_div(dp);
3372 break;
da97f52c
PM
3373 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3374 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3375 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3376 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3377 /* These are fused multiply-add, and must be done as one
3378 * floating point operation with no rounding between the
3379 * multiplication and addition steps.
3380 * NB that doing the negations here as separate steps is
3381 * correct : an input NaN should come out with its sign bit
3382 * flipped if it is a negated-input.
3383 */
3384 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
3385 return 1;
3386 }
3387 if (dp) {
3388 TCGv_ptr fpst;
3389 TCGv_i64 frd;
3390 if (op & 1) {
3391 /* VFNMS, VFMS */
3392 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3393 }
3394 frd = tcg_temp_new_i64();
3395 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3396 if (op & 2) {
3397 /* VFNMA, VFNMS */
3398 gen_helper_vfp_negd(frd, frd);
3399 }
3400 fpst = get_fpstatus_ptr(0);
3401 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3402 cpu_F1d, frd, fpst);
3403 tcg_temp_free_ptr(fpst);
3404 tcg_temp_free_i64(frd);
3405 } else {
3406 TCGv_ptr fpst;
3407 TCGv_i32 frd;
3408 if (op & 1) {
3409 /* VFNMS, VFMS */
3410 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3411 }
3412 frd = tcg_temp_new_i32();
3413 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3414 if (op & 2) {
3415 gen_helper_vfp_negs(frd, frd);
3416 }
3417 fpst = get_fpstatus_ptr(0);
3418 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3419 cpu_F1s, frd, fpst);
3420 tcg_temp_free_ptr(fpst);
3421 tcg_temp_free_i32(frd);
3422 }
3423 break;
9ee6e8bb
PB
3424 case 14: /* fconst */
3425 if (!arm_feature(env, ARM_FEATURE_VFP3))
3426 return 1;
3427
3428 n = (insn << 12) & 0x80000000;
3429 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3430 if (dp) {
3431 if (i & 0x40)
3432 i |= 0x3f80;
3433 else
3434 i |= 0x4000;
3435 n |= i << 16;
4373f3ce 3436 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3437 } else {
3438 if (i & 0x40)
3439 i |= 0x780;
3440 else
3441 i |= 0x800;
3442 n |= i << 19;
5b340b51 3443 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3444 }
9ee6e8bb 3445 break;
b7bcbe95
FB
3446 case 15: /* extension space */
3447 switch (rn) {
3448 case 0: /* cpy */
3449 /* no-op */
3450 break;
3451 case 1: /* abs */
3452 gen_vfp_abs(dp);
3453 break;
3454 case 2: /* neg */
3455 gen_vfp_neg(dp);
3456 break;
3457 case 3: /* sqrt */
3458 gen_vfp_sqrt(dp);
3459 break;
239c20c7 3460 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
60011498
PB
3461 tmp = gen_vfp_mrs();
3462 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3463 if (dp) {
3464 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3465 cpu_env);
3466 } else {
3467 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3468 cpu_env);
3469 }
7d1b0095 3470 tcg_temp_free_i32(tmp);
60011498 3471 break;
239c20c7 3472 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
60011498
PB
3473 tmp = gen_vfp_mrs();
3474 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3475 if (dp) {
3476 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3477 cpu_env);
3478 } else {
3479 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3480 cpu_env);
3481 }
7d1b0095 3482 tcg_temp_free_i32(tmp);
60011498 3483 break;
239c20c7 3484 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
7d1b0095 3485 tmp = tcg_temp_new_i32();
239c20c7
WN
3486 if (dp) {
3487 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3488 cpu_env);
3489 } else {
3490 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3491 cpu_env);
3492 }
60011498
PB
3493 gen_mov_F0_vreg(0, rd);
3494 tmp2 = gen_vfp_mrs();
3495 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3496 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3497 tcg_temp_free_i32(tmp2);
60011498
PB
3498 gen_vfp_msr(tmp);
3499 break;
239c20c7 3500 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
7d1b0095 3501 tmp = tcg_temp_new_i32();
239c20c7
WN
3502 if (dp) {
3503 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3504 cpu_env);
3505 } else {
3506 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3507 cpu_env);
3508 }
60011498
PB
3509 tcg_gen_shli_i32(tmp, tmp, 16);
3510 gen_mov_F0_vreg(0, rd);
3511 tmp2 = gen_vfp_mrs();
3512 tcg_gen_ext16u_i32(tmp2, tmp2);
3513 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3514 tcg_temp_free_i32(tmp2);
60011498
PB
3515 gen_vfp_msr(tmp);
3516 break;
b7bcbe95
FB
3517 case 8: /* cmp */
3518 gen_vfp_cmp(dp);
3519 break;
3520 case 9: /* cmpe */
3521 gen_vfp_cmpe(dp);
3522 break;
3523 case 10: /* cmpz */
3524 gen_vfp_cmp(dp);
3525 break;
3526 case 11: /* cmpez */
3527 gen_vfp_F1_ld0(dp);
3528 gen_vfp_cmpe(dp);
3529 break;
664c6733
WN
3530 case 12: /* vrintr */
3531 {
3532 TCGv_ptr fpst = get_fpstatus_ptr(0);
3533 if (dp) {
3534 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3535 } else {
3536 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3537 }
3538 tcg_temp_free_ptr(fpst);
3539 break;
3540 }
a290c62a
WN
3541 case 13: /* vrintz */
3542 {
3543 TCGv_ptr fpst = get_fpstatus_ptr(0);
3544 TCGv_i32 tcg_rmode;
3545 tcg_rmode = tcg_const_i32(float_round_to_zero);
3546 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3547 if (dp) {
3548 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3549 } else {
3550 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3551 }
3552 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3553 tcg_temp_free_i32(tcg_rmode);
3554 tcg_temp_free_ptr(fpst);
3555 break;
3556 }
4e82bc01
WN
3557 case 14: /* vrintx */
3558 {
3559 TCGv_ptr fpst = get_fpstatus_ptr(0);
3560 if (dp) {
3561 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3562 } else {
3563 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3564 }
3565 tcg_temp_free_ptr(fpst);
3566 break;
3567 }
b7bcbe95
FB
3568 case 15: /* single<->double conversion */
3569 if (dp)
4373f3ce 3570 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3571 else
4373f3ce 3572 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3573 break;
3574 case 16: /* fuito */
5500b06c 3575 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3576 break;
3577 case 17: /* fsito */
5500b06c 3578 gen_vfp_sito(dp, 0);
b7bcbe95 3579 break;
9ee6e8bb
PB
3580 case 20: /* fshto */
3581 if (!arm_feature(env, ARM_FEATURE_VFP3))
3582 return 1;
5500b06c 3583 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3584 break;
3585 case 21: /* fslto */
3586 if (!arm_feature(env, ARM_FEATURE_VFP3))
3587 return 1;
5500b06c 3588 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3589 break;
3590 case 22: /* fuhto */
3591 if (!arm_feature(env, ARM_FEATURE_VFP3))
3592 return 1;
5500b06c 3593 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3594 break;
3595 case 23: /* fulto */
3596 if (!arm_feature(env, ARM_FEATURE_VFP3))
3597 return 1;
5500b06c 3598 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3599 break;
b7bcbe95 3600 case 24: /* ftoui */
5500b06c 3601 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3602 break;
3603 case 25: /* ftouiz */
5500b06c 3604 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3605 break;
3606 case 26: /* ftosi */
5500b06c 3607 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3608 break;
3609 case 27: /* ftosiz */
5500b06c 3610 gen_vfp_tosiz(dp, 0);
b7bcbe95 3611 break;
9ee6e8bb
PB
3612 case 28: /* ftosh */
3613 if (!arm_feature(env, ARM_FEATURE_VFP3))
3614 return 1;
5500b06c 3615 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3616 break;
3617 case 29: /* ftosl */
3618 if (!arm_feature(env, ARM_FEATURE_VFP3))
3619 return 1;
5500b06c 3620 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3621 break;
3622 case 30: /* ftouh */
3623 if (!arm_feature(env, ARM_FEATURE_VFP3))
3624 return 1;
5500b06c 3625 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3626 break;
3627 case 31: /* ftoul */
3628 if (!arm_feature(env, ARM_FEATURE_VFP3))
3629 return 1;
5500b06c 3630 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3631 break;
b7bcbe95 3632 default: /* undefined */
b7bcbe95
FB
3633 return 1;
3634 }
3635 break;
3636 default: /* undefined */
b7bcbe95
FB
3637 return 1;
3638 }
3639
3640 /* Write back the result. */
239c20c7
WN
3641 if (op == 15 && (rn >= 8 && rn <= 11)) {
3642 /* Comparison, do nothing. */
3643 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3644 (rn & 0x1e) == 0x6)) {
3645 /* VCVT double to int: always integer result.
3646 * VCVT double to half precision is always a single
3647 * precision result.
3648 */
b7bcbe95 3649 gen_mov_vreg_F0(0, rd);
239c20c7 3650 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
3651 /* conversion */
3652 gen_mov_vreg_F0(!dp, rd);
239c20c7 3653 } else {
b7bcbe95 3654 gen_mov_vreg_F0(dp, rd);
239c20c7 3655 }
b7bcbe95
FB
3656
3657 /* break out of the loop if we have finished */
3658 if (veclen == 0)
3659 break;
3660
3661 if (op == 15 && delta_m == 0) {
3662 /* single source one-many */
3663 while (veclen--) {
3664 rd = ((rd + delta_d) & (bank_mask - 1))
3665 | (rd & bank_mask);
3666 gen_mov_vreg_F0(dp, rd);
3667 }
3668 break;
3669 }
3670 /* Setup the next operands. */
3671 veclen--;
3672 rd = ((rd + delta_d) & (bank_mask - 1))
3673 | (rd & bank_mask);
3674
3675 if (op == 15) {
3676 /* One source operand. */
3677 rm = ((rm + delta_m) & (bank_mask - 1))
3678 | (rm & bank_mask);
3679 gen_mov_F0_vreg(dp, rm);
3680 } else {
3681 /* Two source operands. */
3682 rn = ((rn + delta_d) & (bank_mask - 1))
3683 | (rn & bank_mask);
3684 gen_mov_F0_vreg(dp, rn);
3685 if (delta_m) {
3686 rm = ((rm + delta_m) & (bank_mask - 1))
3687 | (rm & bank_mask);
3688 gen_mov_F1_vreg(dp, rm);
3689 }
3690 }
3691 }
3692 }
3693 break;
3694 case 0xc:
3695 case 0xd:
8387da81 3696 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3697 /* two-register transfer */
3698 rn = (insn >> 16) & 0xf;
3699 rd = (insn >> 12) & 0xf;
3700 if (dp) {
9ee6e8bb
PB
3701 VFP_DREG_M(rm, insn);
3702 } else {
3703 rm = VFP_SREG_M(insn);
3704 }
b7bcbe95 3705
18c9b560 3706 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3707 /* vfp->arm */
3708 if (dp) {
4373f3ce
PB
3709 gen_mov_F0_vreg(0, rm * 2);
3710 tmp = gen_vfp_mrs();
3711 store_reg(s, rd, tmp);
3712 gen_mov_F0_vreg(0, rm * 2 + 1);
3713 tmp = gen_vfp_mrs();
3714 store_reg(s, rn, tmp);
b7bcbe95
FB
3715 } else {
3716 gen_mov_F0_vreg(0, rm);
4373f3ce 3717 tmp = gen_vfp_mrs();
8387da81 3718 store_reg(s, rd, tmp);
b7bcbe95 3719 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3720 tmp = gen_vfp_mrs();
8387da81 3721 store_reg(s, rn, tmp);
b7bcbe95
FB
3722 }
3723 } else {
3724 /* arm->vfp */
3725 if (dp) {
4373f3ce
PB
3726 tmp = load_reg(s, rd);
3727 gen_vfp_msr(tmp);
3728 gen_mov_vreg_F0(0, rm * 2);
3729 tmp = load_reg(s, rn);
3730 gen_vfp_msr(tmp);
3731 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3732 } else {
8387da81 3733 tmp = load_reg(s, rd);
4373f3ce 3734 gen_vfp_msr(tmp);
b7bcbe95 3735 gen_mov_vreg_F0(0, rm);
8387da81 3736 tmp = load_reg(s, rn);
4373f3ce 3737 gen_vfp_msr(tmp);
b7bcbe95
FB
3738 gen_mov_vreg_F0(0, rm + 1);
3739 }
3740 }
3741 } else {
3742 /* Load/store */
3743 rn = (insn >> 16) & 0xf;
3744 if (dp)
9ee6e8bb 3745 VFP_DREG_D(rd, insn);
b7bcbe95 3746 else
9ee6e8bb 3747 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3748 if ((insn & 0x01200000) == 0x01000000) {
3749 /* Single load/store */
3750 offset = (insn & 0xff) << 2;
3751 if ((insn & (1 << 23)) == 0)
3752 offset = -offset;
934814f1
PM
3753 if (s->thumb && rn == 15) {
3754 /* This is actually UNPREDICTABLE */
3755 addr = tcg_temp_new_i32();
3756 tcg_gen_movi_i32(addr, s->pc & ~2);
3757 } else {
3758 addr = load_reg(s, rn);
3759 }
312eea9f 3760 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3761 if (insn & (1 << 20)) {
312eea9f 3762 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3763 gen_mov_vreg_F0(dp, rd);
3764 } else {
3765 gen_mov_F0_vreg(dp, rd);
312eea9f 3766 gen_vfp_st(s, dp, addr);
b7bcbe95 3767 }
7d1b0095 3768 tcg_temp_free_i32(addr);
b7bcbe95
FB
3769 } else {
3770 /* load/store multiple */
934814f1 3771 int w = insn & (1 << 21);
b7bcbe95
FB
3772 if (dp)
3773 n = (insn >> 1) & 0x7f;
3774 else
3775 n = insn & 0xff;
3776
934814f1
PM
3777 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3778 /* P == U , W == 1 => UNDEF */
3779 return 1;
3780 }
3781 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3782 /* UNPREDICTABLE cases for bad immediates: we choose to
3783 * UNDEF to avoid generating huge numbers of TCG ops
3784 */
3785 return 1;
3786 }
3787 if (rn == 15 && w) {
3788 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3789 return 1;
3790 }
3791
3792 if (s->thumb && rn == 15) {
3793 /* This is actually UNPREDICTABLE */
3794 addr = tcg_temp_new_i32();
3795 tcg_gen_movi_i32(addr, s->pc & ~2);
3796 } else {
3797 addr = load_reg(s, rn);
3798 }
b7bcbe95 3799 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3800 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3801
3802 if (dp)
3803 offset = 8;
3804 else
3805 offset = 4;
3806 for (i = 0; i < n; i++) {
18c9b560 3807 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3808 /* load */
312eea9f 3809 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3810 gen_mov_vreg_F0(dp, rd + i);
3811 } else {
3812 /* store */
3813 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3814 gen_vfp_st(s, dp, addr);
b7bcbe95 3815 }
312eea9f 3816 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3817 }
934814f1 3818 if (w) {
b7bcbe95
FB
3819 /* writeback */
3820 if (insn & (1 << 24))
3821 offset = -offset * n;
3822 else if (dp && (insn & 1))
3823 offset = 4;
3824 else
3825 offset = 0;
3826
3827 if (offset != 0)
312eea9f
FN
3828 tcg_gen_addi_i32(addr, addr, offset);
3829 store_reg(s, rn, addr);
3830 } else {
7d1b0095 3831 tcg_temp_free_i32(addr);
b7bcbe95
FB
3832 }
3833 }
3834 }
3835 break;
3836 default:
3837 /* Should never happen. */
3838 return 1;
3839 }
3840 return 0;
3841}
3842
0a2461fa 3843static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
c53be334 3844{
6e256c93
FB
3845 TranslationBlock *tb;
3846
3847 tb = s->tb;
3848 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3849 tcg_gen_goto_tb(n);
eaed129d 3850 gen_set_pc_im(s, dest);
8cfd0495 3851 tcg_gen_exit_tb((uintptr_t)tb + n);
6e256c93 3852 } else {
eaed129d 3853 gen_set_pc_im(s, dest);
57fec1fe 3854 tcg_gen_exit_tb(0);
6e256c93 3855 }
c53be334
FB
3856}
3857
8aaca4c0
FB
3858static inline void gen_jmp (DisasContext *s, uint32_t dest)
3859{
551bd27f 3860 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3861 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3862 if (s->thumb)
d9ba4830
PB
3863 dest |= 1;
3864 gen_bx_im(s, dest);
8aaca4c0 3865 } else {
6e256c93 3866 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3867 s->is_jmp = DISAS_TB_JUMP;
3868 }
3869}
3870
39d5492a 3871static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 3872{
ee097184 3873 if (x)
d9ba4830 3874 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3875 else
d9ba4830 3876 gen_sxth(t0);
ee097184 3877 if (y)
d9ba4830 3878 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3879 else
d9ba4830
PB
3880 gen_sxth(t1);
3881 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3882}
3883
3884/* Return the mask of PSR bits set by a MSR instruction. */
0ecb72a5 3885static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3886 uint32_t mask;
3887
3888 mask = 0;
3889 if (flags & (1 << 0))
3890 mask |= 0xff;
3891 if (flags & (1 << 1))
3892 mask |= 0xff00;
3893 if (flags & (1 << 2))
3894 mask |= 0xff0000;
3895 if (flags & (1 << 3))
3896 mask |= 0xff000000;
9ee6e8bb 3897
2ae23e75 3898 /* Mask out undefined bits. */
9ee6e8bb 3899 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3900 if (!arm_feature(env, ARM_FEATURE_V4T))
3901 mask &= ~CPSR_T;
3902 if (!arm_feature(env, ARM_FEATURE_V5))
3903 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3904 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3905 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3906 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3907 mask &= ~CPSR_IT;
9ee6e8bb 3908 /* Mask out execution state bits. */
2ae23e75 3909 if (!spsr)
e160c51c 3910 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3911 /* Mask out privileged bits. */
3912 if (IS_USER(s))
9ee6e8bb 3913 mask &= CPSR_USER;
b5ff1b31
FB
3914 return mask;
3915}
3916
2fbac54b 3917/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 3918static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 3919{
39d5492a 3920 TCGv_i32 tmp;
b5ff1b31
FB
3921 if (spsr) {
3922 /* ??? This is also undefined in system mode. */
3923 if (IS_USER(s))
3924 return 1;
d9ba4830
PB
3925
3926 tmp = load_cpu_field(spsr);
3927 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3928 tcg_gen_andi_i32(t0, t0, mask);
3929 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3930 store_cpu_field(tmp, spsr);
b5ff1b31 3931 } else {
2fbac54b 3932 gen_set_cpsr(t0, mask);
b5ff1b31 3933 }
7d1b0095 3934 tcg_temp_free_i32(t0);
b5ff1b31
FB
3935 gen_lookup_tb(s);
3936 return 0;
3937}
3938
2fbac54b
FN
3939/* Returns nonzero if access to the PSR is not permitted. */
3940static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3941{
39d5492a 3942 TCGv_i32 tmp;
7d1b0095 3943 tmp = tcg_temp_new_i32();
2fbac54b
FN
3944 tcg_gen_movi_i32(tmp, val);
3945 return gen_set_psr(s, mask, spsr, tmp);
3946}
3947
e9bb4aa9 3948/* Generate an old-style exception return. Marks pc as dead. */
39d5492a 3949static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
b5ff1b31 3950{
39d5492a 3951 TCGv_i32 tmp;
e9bb4aa9 3952 store_reg(s, 15, pc);
d9ba4830
PB
3953 tmp = load_cpu_field(spsr);
3954 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3955 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3956 s->is_jmp = DISAS_UPDATE;
3957}
3958
b0109805 3959/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 3960static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 3961{
b0109805 3962 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3963 tcg_temp_free_i32(cpsr);
b0109805 3964 store_reg(s, 15, pc);
9ee6e8bb
PB
3965 s->is_jmp = DISAS_UPDATE;
3966}
3b46e624 3967
9ee6e8bb
PB
3968static void gen_nop_hint(DisasContext *s, int val)
3969{
3970 switch (val) {
3971 case 3: /* wfi */
eaed129d 3972 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
3973 s->is_jmp = DISAS_WFI;
3974 break;
3975 case 2: /* wfe */
72c1d3af
PM
3976 gen_set_pc_im(s, s->pc);
3977 s->is_jmp = DISAS_WFE;
3978 break;
9ee6e8bb 3979 case 4: /* sev */
12b10571
MR
3980 case 5: /* sevl */
3981 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
3982 default: /* nop */
3983 break;
3984 }
3985}
99c475ab 3986
ad69471c 3987#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3988
39d5492a 3989static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
3990{
3991 switch (size) {
dd8fbd78
FN
3992 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3993 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3994 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3995 default: abort();
9ee6e8bb 3996 }
9ee6e8bb
PB
3997}
3998
39d5492a 3999static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4000{
4001 switch (size) {
dd8fbd78
FN
4002 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4003 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4004 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4005 default: return;
4006 }
4007}
4008
4009/* 32-bit pairwise ops end up the same as the elementwise versions. */
4010#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4011#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4012#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4013#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4014
ad69471c
PB
4015#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4016 switch ((size << 1) | u) { \
4017 case 0: \
dd8fbd78 4018 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4019 break; \
4020 case 1: \
dd8fbd78 4021 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4022 break; \
4023 case 2: \
dd8fbd78 4024 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4025 break; \
4026 case 3: \
dd8fbd78 4027 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4028 break; \
4029 case 4: \
dd8fbd78 4030 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4031 break; \
4032 case 5: \
dd8fbd78 4033 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4034 break; \
4035 default: return 1; \
4036 }} while (0)
9ee6e8bb
PB
4037
4038#define GEN_NEON_INTEGER_OP(name) do { \
4039 switch ((size << 1) | u) { \
ad69471c 4040 case 0: \
dd8fbd78 4041 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4042 break; \
4043 case 1: \
dd8fbd78 4044 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4045 break; \
4046 case 2: \
dd8fbd78 4047 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4048 break; \
4049 case 3: \
dd8fbd78 4050 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4051 break; \
4052 case 4: \
dd8fbd78 4053 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4054 break; \
4055 case 5: \
dd8fbd78 4056 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4057 break; \
9ee6e8bb
PB
4058 default: return 1; \
4059 }} while (0)
4060
39d5492a 4061static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4062{
39d5492a 4063 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4064 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4065 return tmp;
9ee6e8bb
PB
4066}
4067
39d5492a 4068static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4069{
dd8fbd78 4070 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4071 tcg_temp_free_i32(var);
9ee6e8bb
PB
4072}
4073
39d5492a 4074static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4075{
39d5492a 4076 TCGv_i32 tmp;
9ee6e8bb 4077 if (size == 1) {
0fad6efc
PM
4078 tmp = neon_load_reg(reg & 7, reg >> 4);
4079 if (reg & 8) {
dd8fbd78 4080 gen_neon_dup_high16(tmp);
0fad6efc
PM
4081 } else {
4082 gen_neon_dup_low16(tmp);
dd8fbd78 4083 }
0fad6efc
PM
4084 } else {
4085 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4086 }
dd8fbd78 4087 return tmp;
9ee6e8bb
PB
4088}
4089
02acedf9 4090static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4091{
39d5492a 4092 TCGv_i32 tmp, tmp2;
600b828c 4093 if (!q && size == 2) {
02acedf9
PM
4094 return 1;
4095 }
4096 tmp = tcg_const_i32(rd);
4097 tmp2 = tcg_const_i32(rm);
4098 if (q) {
4099 switch (size) {
4100 case 0:
02da0b2d 4101 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4102 break;
4103 case 1:
02da0b2d 4104 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4105 break;
4106 case 2:
02da0b2d 4107 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
4108 break;
4109 default:
4110 abort();
4111 }
4112 } else {
4113 switch (size) {
4114 case 0:
02da0b2d 4115 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4116 break;
4117 case 1:
02da0b2d 4118 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4119 break;
4120 default:
4121 abort();
4122 }
4123 }
4124 tcg_temp_free_i32(tmp);
4125 tcg_temp_free_i32(tmp2);
4126 return 0;
19457615
FN
4127}
4128
d68a6f3a 4129static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4130{
39d5492a 4131 TCGv_i32 tmp, tmp2;
600b828c 4132 if (!q && size == 2) {
d68a6f3a
PM
4133 return 1;
4134 }
4135 tmp = tcg_const_i32(rd);
4136 tmp2 = tcg_const_i32(rm);
4137 if (q) {
4138 switch (size) {
4139 case 0:
02da0b2d 4140 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4141 break;
4142 case 1:
02da0b2d 4143 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4144 break;
4145 case 2:
02da0b2d 4146 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
4147 break;
4148 default:
4149 abort();
4150 }
4151 } else {
4152 switch (size) {
4153 case 0:
02da0b2d 4154 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4155 break;
4156 case 1:
02da0b2d 4157 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4158 break;
4159 default:
4160 abort();
4161 }
4162 }
4163 tcg_temp_free_i32(tmp);
4164 tcg_temp_free_i32(tmp2);
4165 return 0;
19457615
FN
4166}
4167
39d5492a 4168static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4169{
39d5492a 4170 TCGv_i32 rd, tmp;
19457615 4171
7d1b0095
PM
4172 rd = tcg_temp_new_i32();
4173 tmp = tcg_temp_new_i32();
19457615
FN
4174
4175 tcg_gen_shli_i32(rd, t0, 8);
4176 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4177 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4178 tcg_gen_or_i32(rd, rd, tmp);
4179
4180 tcg_gen_shri_i32(t1, t1, 8);
4181 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4182 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4183 tcg_gen_or_i32(t1, t1, tmp);
4184 tcg_gen_mov_i32(t0, rd);
4185
7d1b0095
PM
4186 tcg_temp_free_i32(tmp);
4187 tcg_temp_free_i32(rd);
19457615
FN
4188}
4189
39d5492a 4190static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4191{
39d5492a 4192 TCGv_i32 rd, tmp;
19457615 4193
7d1b0095
PM
4194 rd = tcg_temp_new_i32();
4195 tmp = tcg_temp_new_i32();
19457615
FN
4196
4197 tcg_gen_shli_i32(rd, t0, 16);
4198 tcg_gen_andi_i32(tmp, t1, 0xffff);
4199 tcg_gen_or_i32(rd, rd, tmp);
4200 tcg_gen_shri_i32(t1, t1, 16);
4201 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4202 tcg_gen_or_i32(t1, t1, tmp);
4203 tcg_gen_mov_i32(t0, rd);
4204
7d1b0095
PM
4205 tcg_temp_free_i32(tmp);
4206 tcg_temp_free_i32(rd);
19457615
FN
4207}
4208
4209
9ee6e8bb
PB
4210static struct {
4211 int nregs;
4212 int interleave;
4213 int spacing;
4214} neon_ls_element_type[11] = {
4215 {4, 4, 1},
4216 {4, 4, 2},
4217 {4, 1, 1},
4218 {4, 2, 1},
4219 {3, 3, 1},
4220 {3, 3, 2},
4221 {3, 1, 1},
4222 {1, 1, 1},
4223 {2, 2, 1},
4224 {2, 2, 2},
4225 {2, 1, 1}
4226};
4227
4228/* Translate a NEON load/store element instruction. Return nonzero if the
4229 instruction is invalid. */
0ecb72a5 4230static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4231{
4232 int rd, rn, rm;
4233 int op;
4234 int nregs;
4235 int interleave;
84496233 4236 int spacing;
9ee6e8bb
PB
4237 int stride;
4238 int size;
4239 int reg;
4240 int pass;
4241 int load;
4242 int shift;
9ee6e8bb 4243 int n;
39d5492a
PM
4244 TCGv_i32 addr;
4245 TCGv_i32 tmp;
4246 TCGv_i32 tmp2;
84496233 4247 TCGv_i64 tmp64;
9ee6e8bb 4248
2c7ffc41
PM
4249 /* FIXME: this access check should not take precedence over UNDEF
4250 * for invalid encodings; we will generate incorrect syndrome information
4251 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4252 */
4253 if (!s->cpacr_fpen) {
4254 gen_exception_insn(s, 4, EXCP_UDEF,
4255 syn_fp_access_trap(1, 0xe, s->thumb));
4256 return 0;
4257 }
4258
5df8bac1 4259 if (!s->vfp_enabled)
9ee6e8bb
PB
4260 return 1;
4261 VFP_DREG_D(rd, insn);
4262 rn = (insn >> 16) & 0xf;
4263 rm = insn & 0xf;
4264 load = (insn & (1 << 21)) != 0;
4265 if ((insn & (1 << 23)) == 0) {
4266 /* Load store all elements. */
4267 op = (insn >> 8) & 0xf;
4268 size = (insn >> 6) & 3;
84496233 4269 if (op > 10)
9ee6e8bb 4270 return 1;
f2dd89d0
PM
4271 /* Catch UNDEF cases for bad values of align field */
4272 switch (op & 0xc) {
4273 case 4:
4274 if (((insn >> 5) & 1) == 1) {
4275 return 1;
4276 }
4277 break;
4278 case 8:
4279 if (((insn >> 4) & 3) == 3) {
4280 return 1;
4281 }
4282 break;
4283 default:
4284 break;
4285 }
9ee6e8bb
PB
4286 nregs = neon_ls_element_type[op].nregs;
4287 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4288 spacing = neon_ls_element_type[op].spacing;
4289 if (size == 3 && (interleave | spacing) != 1)
4290 return 1;
e318a60b 4291 addr = tcg_temp_new_i32();
dcc65026 4292 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4293 stride = (1 << size) * interleave;
4294 for (reg = 0; reg < nregs; reg++) {
4295 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4296 load_reg_var(s, addr, rn);
4297 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4298 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4299 load_reg_var(s, addr, rn);
4300 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4301 }
84496233 4302 if (size == 3) {
8ed1237d 4303 tmp64 = tcg_temp_new_i64();
84496233 4304 if (load) {
6ce2faf4 4305 gen_aa32_ld64(tmp64, addr, get_mem_index(s));
84496233 4306 neon_store_reg64(tmp64, rd);
84496233 4307 } else {
84496233 4308 neon_load_reg64(tmp64, rd);
6ce2faf4 4309 gen_aa32_st64(tmp64, addr, get_mem_index(s));
84496233 4310 }
8ed1237d 4311 tcg_temp_free_i64(tmp64);
84496233
JR
4312 tcg_gen_addi_i32(addr, addr, stride);
4313 } else {
4314 for (pass = 0; pass < 2; pass++) {
4315 if (size == 2) {
4316 if (load) {
58ab8e96 4317 tmp = tcg_temp_new_i32();
6ce2faf4 4318 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
84496233
JR
4319 neon_store_reg(rd, pass, tmp);
4320 } else {
4321 tmp = neon_load_reg(rd, pass);
6ce2faf4 4322 gen_aa32_st32(tmp, addr, get_mem_index(s));
58ab8e96 4323 tcg_temp_free_i32(tmp);
84496233 4324 }
1b2b1e54 4325 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4326 } else if (size == 1) {
4327 if (load) {
58ab8e96 4328 tmp = tcg_temp_new_i32();
6ce2faf4 4329 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
84496233 4330 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4331 tmp2 = tcg_temp_new_i32();
6ce2faf4 4332 gen_aa32_ld16u(tmp2, addr, get_mem_index(s));
84496233 4333 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4334 tcg_gen_shli_i32(tmp2, tmp2, 16);
4335 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4336 tcg_temp_free_i32(tmp2);
84496233
JR
4337 neon_store_reg(rd, pass, tmp);
4338 } else {
4339 tmp = neon_load_reg(rd, pass);
7d1b0095 4340 tmp2 = tcg_temp_new_i32();
84496233 4341 tcg_gen_shri_i32(tmp2, tmp, 16);
6ce2faf4 4342 gen_aa32_st16(tmp, addr, get_mem_index(s));
58ab8e96 4343 tcg_temp_free_i32(tmp);
84496233 4344 tcg_gen_addi_i32(addr, addr, stride);
6ce2faf4 4345 gen_aa32_st16(tmp2, addr, get_mem_index(s));
58ab8e96 4346 tcg_temp_free_i32(tmp2);
1b2b1e54 4347 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4348 }
84496233
JR
4349 } else /* size == 0 */ {
4350 if (load) {
39d5492a 4351 TCGV_UNUSED_I32(tmp2);
84496233 4352 for (n = 0; n < 4; n++) {
58ab8e96 4353 tmp = tcg_temp_new_i32();
6ce2faf4 4354 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
84496233
JR
4355 tcg_gen_addi_i32(addr, addr, stride);
4356 if (n == 0) {
4357 tmp2 = tmp;
4358 } else {
41ba8341
PB
4359 tcg_gen_shli_i32(tmp, tmp, n * 8);
4360 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4361 tcg_temp_free_i32(tmp);
84496233 4362 }
9ee6e8bb 4363 }
84496233
JR
4364 neon_store_reg(rd, pass, tmp2);
4365 } else {
4366 tmp2 = neon_load_reg(rd, pass);
4367 for (n = 0; n < 4; n++) {
7d1b0095 4368 tmp = tcg_temp_new_i32();
84496233
JR
4369 if (n == 0) {
4370 tcg_gen_mov_i32(tmp, tmp2);
4371 } else {
4372 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4373 }
6ce2faf4 4374 gen_aa32_st8(tmp, addr, get_mem_index(s));
58ab8e96 4375 tcg_temp_free_i32(tmp);
84496233
JR
4376 tcg_gen_addi_i32(addr, addr, stride);
4377 }
7d1b0095 4378 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4379 }
4380 }
4381 }
4382 }
84496233 4383 rd += spacing;
9ee6e8bb 4384 }
e318a60b 4385 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4386 stride = nregs * 8;
4387 } else {
4388 size = (insn >> 10) & 3;
4389 if (size == 3) {
4390 /* Load single element to all lanes. */
8e18cde3
PM
4391 int a = (insn >> 4) & 1;
4392 if (!load) {
9ee6e8bb 4393 return 1;
8e18cde3 4394 }
9ee6e8bb
PB
4395 size = (insn >> 6) & 3;
4396 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4397
4398 if (size == 3) {
4399 if (nregs != 4 || a == 0) {
9ee6e8bb 4400 return 1;
99c475ab 4401 }
8e18cde3
PM
4402 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4403 size = 2;
4404 }
4405 if (nregs == 1 && a == 1 && size == 0) {
4406 return 1;
4407 }
4408 if (nregs == 3 && a == 1) {
4409 return 1;
4410 }
e318a60b 4411 addr = tcg_temp_new_i32();
8e18cde3
PM
4412 load_reg_var(s, addr, rn);
4413 if (nregs == 1) {
4414 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4415 tmp = gen_load_and_replicate(s, addr, size);
4416 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4417 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4418 if (insn & (1 << 5)) {
4419 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4420 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4421 }
4422 tcg_temp_free_i32(tmp);
4423 } else {
4424 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4425 stride = (insn & (1 << 5)) ? 2 : 1;
4426 for (reg = 0; reg < nregs; reg++) {
4427 tmp = gen_load_and_replicate(s, addr, size);
4428 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4429 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4430 tcg_temp_free_i32(tmp);
4431 tcg_gen_addi_i32(addr, addr, 1 << size);
4432 rd += stride;
4433 }
9ee6e8bb 4434 }
e318a60b 4435 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4436 stride = (1 << size) * nregs;
4437 } else {
4438 /* Single element. */
93262b16 4439 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4440 pass = (insn >> 7) & 1;
4441 switch (size) {
4442 case 0:
4443 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4444 stride = 1;
4445 break;
4446 case 1:
4447 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4448 stride = (insn & (1 << 5)) ? 2 : 1;
4449 break;
4450 case 2:
4451 shift = 0;
9ee6e8bb
PB
4452 stride = (insn & (1 << 6)) ? 2 : 1;
4453 break;
4454 default:
4455 abort();
4456 }
4457 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4458 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4459 switch (nregs) {
4460 case 1:
4461 if (((idx & (1 << size)) != 0) ||
4462 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4463 return 1;
4464 }
4465 break;
4466 case 3:
4467 if ((idx & 1) != 0) {
4468 return 1;
4469 }
4470 /* fall through */
4471 case 2:
4472 if (size == 2 && (idx & 2) != 0) {
4473 return 1;
4474 }
4475 break;
4476 case 4:
4477 if ((size == 2) && ((idx & 3) == 3)) {
4478 return 1;
4479 }
4480 break;
4481 default:
4482 abort();
4483 }
4484 if ((rd + stride * (nregs - 1)) > 31) {
4485 /* Attempts to write off the end of the register file
4486 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4487 * the neon_load_reg() would write off the end of the array.
4488 */
4489 return 1;
4490 }
e318a60b 4491 addr = tcg_temp_new_i32();
dcc65026 4492 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4493 for (reg = 0; reg < nregs; reg++) {
4494 if (load) {
58ab8e96 4495 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
4496 switch (size) {
4497 case 0:
6ce2faf4 4498 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4499 break;
4500 case 1:
6ce2faf4 4501 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4502 break;
4503 case 2:
6ce2faf4 4504 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9ee6e8bb 4505 break;
a50f5b91
PB
4506 default: /* Avoid compiler warnings. */
4507 abort();
9ee6e8bb
PB
4508 }
4509 if (size != 2) {
8f8e3aa4 4510 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4511 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4512 shift, size ? 16 : 8);
7d1b0095 4513 tcg_temp_free_i32(tmp2);
9ee6e8bb 4514 }
8f8e3aa4 4515 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4516 } else { /* Store */
8f8e3aa4
PB
4517 tmp = neon_load_reg(rd, pass);
4518 if (shift)
4519 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4520 switch (size) {
4521 case 0:
6ce2faf4 4522 gen_aa32_st8(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4523 break;
4524 case 1:
6ce2faf4 4525 gen_aa32_st16(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4526 break;
4527 case 2:
6ce2faf4 4528 gen_aa32_st32(tmp, addr, get_mem_index(s));
9ee6e8bb 4529 break;
99c475ab 4530 }
58ab8e96 4531 tcg_temp_free_i32(tmp);
99c475ab 4532 }
9ee6e8bb 4533 rd += stride;
1b2b1e54 4534 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4535 }
e318a60b 4536 tcg_temp_free_i32(addr);
9ee6e8bb 4537 stride = nregs * (1 << size);
99c475ab 4538 }
9ee6e8bb
PB
4539 }
4540 if (rm != 15) {
39d5492a 4541 TCGv_i32 base;
b26eefb6
PB
4542
4543 base = load_reg(s, rn);
9ee6e8bb 4544 if (rm == 13) {
b26eefb6 4545 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4546 } else {
39d5492a 4547 TCGv_i32 index;
b26eefb6
PB
4548 index = load_reg(s, rm);
4549 tcg_gen_add_i32(base, base, index);
7d1b0095 4550 tcg_temp_free_i32(index);
9ee6e8bb 4551 }
b26eefb6 4552 store_reg(s, rn, base);
9ee6e8bb
PB
4553 }
4554 return 0;
4555}
3b46e624 4556
8f8e3aa4 4557/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 4558static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
4559{
4560 tcg_gen_and_i32(t, t, c);
f669df27 4561 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4562 tcg_gen_or_i32(dest, t, f);
4563}
4564
39d5492a 4565static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4566{
4567 switch (size) {
4568 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4569 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4570 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4571 default: abort();
4572 }
4573}
4574
39d5492a 4575static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4576{
4577 switch (size) {
02da0b2d
PM
4578 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4579 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4580 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4581 default: abort();
4582 }
4583}
4584
39d5492a 4585static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4586{
4587 switch (size) {
02da0b2d
PM
4588 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4589 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4590 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4591 default: abort();
4592 }
4593}
4594
39d5492a 4595static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
4596{
4597 switch (size) {
02da0b2d
PM
4598 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4599 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4600 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4601 default: abort();
4602 }
4603}
4604
39d5492a 4605static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
4606 int q, int u)
4607{
4608 if (q) {
4609 if (u) {
4610 switch (size) {
4611 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4612 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4613 default: abort();
4614 }
4615 } else {
4616 switch (size) {
4617 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4618 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4619 default: abort();
4620 }
4621 }
4622 } else {
4623 if (u) {
4624 switch (size) {
b408a9b0
CL
4625 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4626 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4627 default: abort();
4628 }
4629 } else {
4630 switch (size) {
4631 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4632 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4633 default: abort();
4634 }
4635 }
4636 }
4637}
4638
39d5492a 4639static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
4640{
4641 if (u) {
4642 switch (size) {
4643 case 0: gen_helper_neon_widen_u8(dest, src); break;
4644 case 1: gen_helper_neon_widen_u16(dest, src); break;
4645 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4646 default: abort();
4647 }
4648 } else {
4649 switch (size) {
4650 case 0: gen_helper_neon_widen_s8(dest, src); break;
4651 case 1: gen_helper_neon_widen_s16(dest, src); break;
4652 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4653 default: abort();
4654 }
4655 }
7d1b0095 4656 tcg_temp_free_i32(src);
ad69471c
PB
4657}
4658
4659static inline void gen_neon_addl(int size)
4660{
4661 switch (size) {
4662 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4663 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4664 case 2: tcg_gen_add_i64(CPU_V001); break;
4665 default: abort();
4666 }
4667}
4668
4669static inline void gen_neon_subl(int size)
4670{
4671 switch (size) {
4672 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4673 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4674 case 2: tcg_gen_sub_i64(CPU_V001); break;
4675 default: abort();
4676 }
4677}
4678
a7812ae4 4679static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4680{
4681 switch (size) {
4682 case 0: gen_helper_neon_negl_u16(var, var); break;
4683 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
4684 case 2:
4685 tcg_gen_neg_i64(var, var);
4686 break;
ad69471c
PB
4687 default: abort();
4688 }
4689}
4690
a7812ae4 4691static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4692{
4693 switch (size) {
02da0b2d
PM
4694 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4695 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4696 default: abort();
4697 }
4698}
4699
39d5492a
PM
4700static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4701 int size, int u)
ad69471c 4702{
a7812ae4 4703 TCGv_i64 tmp;
ad69471c
PB
4704
4705 switch ((size << 1) | u) {
4706 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4707 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4708 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4709 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4710 case 4:
4711 tmp = gen_muls_i64_i32(a, b);
4712 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4713 tcg_temp_free_i64(tmp);
ad69471c
PB
4714 break;
4715 case 5:
4716 tmp = gen_mulu_i64_i32(a, b);
4717 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4718 tcg_temp_free_i64(tmp);
ad69471c
PB
4719 break;
4720 default: abort();
4721 }
c6067f04
CL
4722
4723 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4724 Don't forget to clean them now. */
4725 if (size < 2) {
7d1b0095
PM
4726 tcg_temp_free_i32(a);
4727 tcg_temp_free_i32(b);
c6067f04 4728 }
ad69471c
PB
4729}
4730
39d5492a
PM
4731static void gen_neon_narrow_op(int op, int u, int size,
4732 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
4733{
4734 if (op) {
4735 if (u) {
4736 gen_neon_unarrow_sats(size, dest, src);
4737 } else {
4738 gen_neon_narrow(size, dest, src);
4739 }
4740 } else {
4741 if (u) {
4742 gen_neon_narrow_satu(size, dest, src);
4743 } else {
4744 gen_neon_narrow_sats(size, dest, src);
4745 }
4746 }
4747}
4748
62698be3
PM
4749/* Symbolic constants for op fields for Neon 3-register same-length.
4750 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4751 * table A7-9.
4752 */
4753#define NEON_3R_VHADD 0
4754#define NEON_3R_VQADD 1
4755#define NEON_3R_VRHADD 2
4756#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4757#define NEON_3R_VHSUB 4
4758#define NEON_3R_VQSUB 5
4759#define NEON_3R_VCGT 6
4760#define NEON_3R_VCGE 7
4761#define NEON_3R_VSHL 8
4762#define NEON_3R_VQSHL 9
4763#define NEON_3R_VRSHL 10
4764#define NEON_3R_VQRSHL 11
4765#define NEON_3R_VMAX 12
4766#define NEON_3R_VMIN 13
4767#define NEON_3R_VABD 14
4768#define NEON_3R_VABA 15
4769#define NEON_3R_VADD_VSUB 16
4770#define NEON_3R_VTST_VCEQ 17
4771#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4772#define NEON_3R_VMUL 19
4773#define NEON_3R_VPMAX 20
4774#define NEON_3R_VPMIN 21
4775#define NEON_3R_VQDMULH_VQRDMULH 22
4776#define NEON_3R_VPADD 23
f1ecb913 4777#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
da97f52c 4778#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
4779#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4780#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4781#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4782#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4783#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 4784#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
4785
4786static const uint8_t neon_3r_sizes[] = {
4787 [NEON_3R_VHADD] = 0x7,
4788 [NEON_3R_VQADD] = 0xf,
4789 [NEON_3R_VRHADD] = 0x7,
4790 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4791 [NEON_3R_VHSUB] = 0x7,
4792 [NEON_3R_VQSUB] = 0xf,
4793 [NEON_3R_VCGT] = 0x7,
4794 [NEON_3R_VCGE] = 0x7,
4795 [NEON_3R_VSHL] = 0xf,
4796 [NEON_3R_VQSHL] = 0xf,
4797 [NEON_3R_VRSHL] = 0xf,
4798 [NEON_3R_VQRSHL] = 0xf,
4799 [NEON_3R_VMAX] = 0x7,
4800 [NEON_3R_VMIN] = 0x7,
4801 [NEON_3R_VABD] = 0x7,
4802 [NEON_3R_VABA] = 0x7,
4803 [NEON_3R_VADD_VSUB] = 0xf,
4804 [NEON_3R_VTST_VCEQ] = 0x7,
4805 [NEON_3R_VML] = 0x7,
4806 [NEON_3R_VMUL] = 0x7,
4807 [NEON_3R_VPMAX] = 0x7,
4808 [NEON_3R_VPMIN] = 0x7,
4809 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4810 [NEON_3R_VPADD] = 0x7,
f1ecb913 4811 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
da97f52c 4812 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4813 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4814 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4815 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4816 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4817 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 4818 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4819};
4820
600b828c
PM
4821/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4822 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4823 * table A7-13.
4824 */
4825#define NEON_2RM_VREV64 0
4826#define NEON_2RM_VREV32 1
4827#define NEON_2RM_VREV16 2
4828#define NEON_2RM_VPADDL 4
4829#define NEON_2RM_VPADDL_U 5
9d935509
AB
4830#define NEON_2RM_AESE 6 /* Includes AESD */
4831#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
4832#define NEON_2RM_VCLS 8
4833#define NEON_2RM_VCLZ 9
4834#define NEON_2RM_VCNT 10
4835#define NEON_2RM_VMVN 11
4836#define NEON_2RM_VPADAL 12
4837#define NEON_2RM_VPADAL_U 13
4838#define NEON_2RM_VQABS 14
4839#define NEON_2RM_VQNEG 15
4840#define NEON_2RM_VCGT0 16
4841#define NEON_2RM_VCGE0 17
4842#define NEON_2RM_VCEQ0 18
4843#define NEON_2RM_VCLE0 19
4844#define NEON_2RM_VCLT0 20
f1ecb913 4845#define NEON_2RM_SHA1H 21
600b828c
PM
4846#define NEON_2RM_VABS 22
4847#define NEON_2RM_VNEG 23
4848#define NEON_2RM_VCGT0_F 24
4849#define NEON_2RM_VCGE0_F 25
4850#define NEON_2RM_VCEQ0_F 26
4851#define NEON_2RM_VCLE0_F 27
4852#define NEON_2RM_VCLT0_F 28
4853#define NEON_2RM_VABS_F 30
4854#define NEON_2RM_VNEG_F 31
4855#define NEON_2RM_VSWP 32
4856#define NEON_2RM_VTRN 33
4857#define NEON_2RM_VUZP 34
4858#define NEON_2RM_VZIP 35
4859#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4860#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4861#define NEON_2RM_VSHLL 38
f1ecb913 4862#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 4863#define NEON_2RM_VRINTN 40
2ce70625 4864#define NEON_2RM_VRINTX 41
34f7b0a2
WN
4865#define NEON_2RM_VRINTA 42
4866#define NEON_2RM_VRINTZ 43
600b828c 4867#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 4868#define NEON_2RM_VRINTM 45
600b828c 4869#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 4870#define NEON_2RM_VRINTP 47
901ad525
WN
4871#define NEON_2RM_VCVTAU 48
4872#define NEON_2RM_VCVTAS 49
4873#define NEON_2RM_VCVTNU 50
4874#define NEON_2RM_VCVTNS 51
4875#define NEON_2RM_VCVTPU 52
4876#define NEON_2RM_VCVTPS 53
4877#define NEON_2RM_VCVTMU 54
4878#define NEON_2RM_VCVTMS 55
600b828c
PM
4879#define NEON_2RM_VRECPE 56
4880#define NEON_2RM_VRSQRTE 57
4881#define NEON_2RM_VRECPE_F 58
4882#define NEON_2RM_VRSQRTE_F 59
4883#define NEON_2RM_VCVT_FS 60
4884#define NEON_2RM_VCVT_FU 61
4885#define NEON_2RM_VCVT_SF 62
4886#define NEON_2RM_VCVT_UF 63
4887
4888static int neon_2rm_is_float_op(int op)
4889{
4890 /* Return true if this neon 2reg-misc op is float-to-float */
4891 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 4892 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
4893 op == NEON_2RM_VRINTM ||
4894 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 4895 op >= NEON_2RM_VRECPE_F);
600b828c
PM
4896}
4897
4898/* Each entry in this array has bit n set if the insn allows
4899 * size value n (otherwise it will UNDEF). Since unallocated
4900 * op values will have no bits set they always UNDEF.
4901 */
4902static const uint8_t neon_2rm_sizes[] = {
4903 [NEON_2RM_VREV64] = 0x7,
4904 [NEON_2RM_VREV32] = 0x3,
4905 [NEON_2RM_VREV16] = 0x1,
4906 [NEON_2RM_VPADDL] = 0x7,
4907 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
4908 [NEON_2RM_AESE] = 0x1,
4909 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
4910 [NEON_2RM_VCLS] = 0x7,
4911 [NEON_2RM_VCLZ] = 0x7,
4912 [NEON_2RM_VCNT] = 0x1,
4913 [NEON_2RM_VMVN] = 0x1,
4914 [NEON_2RM_VPADAL] = 0x7,
4915 [NEON_2RM_VPADAL_U] = 0x7,
4916 [NEON_2RM_VQABS] = 0x7,
4917 [NEON_2RM_VQNEG] = 0x7,
4918 [NEON_2RM_VCGT0] = 0x7,
4919 [NEON_2RM_VCGE0] = 0x7,
4920 [NEON_2RM_VCEQ0] = 0x7,
4921 [NEON_2RM_VCLE0] = 0x7,
4922 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 4923 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
4924 [NEON_2RM_VABS] = 0x7,
4925 [NEON_2RM_VNEG] = 0x7,
4926 [NEON_2RM_VCGT0_F] = 0x4,
4927 [NEON_2RM_VCGE0_F] = 0x4,
4928 [NEON_2RM_VCEQ0_F] = 0x4,
4929 [NEON_2RM_VCLE0_F] = 0x4,
4930 [NEON_2RM_VCLT0_F] = 0x4,
4931 [NEON_2RM_VABS_F] = 0x4,
4932 [NEON_2RM_VNEG_F] = 0x4,
4933 [NEON_2RM_VSWP] = 0x1,
4934 [NEON_2RM_VTRN] = 0x7,
4935 [NEON_2RM_VUZP] = 0x7,
4936 [NEON_2RM_VZIP] = 0x7,
4937 [NEON_2RM_VMOVN] = 0x7,
4938 [NEON_2RM_VQMOVN] = 0x7,
4939 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 4940 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 4941 [NEON_2RM_VRINTN] = 0x4,
2ce70625 4942 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
4943 [NEON_2RM_VRINTA] = 0x4,
4944 [NEON_2RM_VRINTZ] = 0x4,
600b828c 4945 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 4946 [NEON_2RM_VRINTM] = 0x4,
600b828c 4947 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 4948 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
4949 [NEON_2RM_VCVTAU] = 0x4,
4950 [NEON_2RM_VCVTAS] = 0x4,
4951 [NEON_2RM_VCVTNU] = 0x4,
4952 [NEON_2RM_VCVTNS] = 0x4,
4953 [NEON_2RM_VCVTPU] = 0x4,
4954 [NEON_2RM_VCVTPS] = 0x4,
4955 [NEON_2RM_VCVTMU] = 0x4,
4956 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
4957 [NEON_2RM_VRECPE] = 0x4,
4958 [NEON_2RM_VRSQRTE] = 0x4,
4959 [NEON_2RM_VRECPE_F] = 0x4,
4960 [NEON_2RM_VRSQRTE_F] = 0x4,
4961 [NEON_2RM_VCVT_FS] = 0x4,
4962 [NEON_2RM_VCVT_FU] = 0x4,
4963 [NEON_2RM_VCVT_SF] = 0x4,
4964 [NEON_2RM_VCVT_UF] = 0x4,
4965};
4966
9ee6e8bb
PB
4967/* Translate a NEON data processing instruction. Return nonzero if the
4968 instruction is invalid.
ad69471c
PB
4969 We process data in a mixture of 32-bit and 64-bit chunks.
4970 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4971
0ecb72a5 4972static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4973{
4974 int op;
4975 int q;
4976 int rd, rn, rm;
4977 int size;
4978 int shift;
4979 int pass;
4980 int count;
4981 int pairwise;
4982 int u;
ca9a32e4 4983 uint32_t imm, mask;
39d5492a 4984 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4985 TCGv_i64 tmp64;
9ee6e8bb 4986
2c7ffc41
PM
4987 /* FIXME: this access check should not take precedence over UNDEF
4988 * for invalid encodings; we will generate incorrect syndrome information
4989 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4990 */
4991 if (!s->cpacr_fpen) {
4992 gen_exception_insn(s, 4, EXCP_UDEF,
4993 syn_fp_access_trap(1, 0xe, s->thumb));
4994 return 0;
4995 }
4996
5df8bac1 4997 if (!s->vfp_enabled)
9ee6e8bb
PB
4998 return 1;
4999 q = (insn & (1 << 6)) != 0;
5000 u = (insn >> 24) & 1;
5001 VFP_DREG_D(rd, insn);
5002 VFP_DREG_N(rn, insn);
5003 VFP_DREG_M(rm, insn);
5004 size = (insn >> 20) & 3;
5005 if ((insn & (1 << 23)) == 0) {
5006 /* Three register same length. */
5007 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5008 /* Catch invalid op and bad size combinations: UNDEF */
5009 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5010 return 1;
5011 }
25f84f79
PM
5012 /* All insns of this form UNDEF for either this condition or the
5013 * superset of cases "Q==1"; we catch the latter later.
5014 */
5015 if (q && ((rd | rn | rm) & 1)) {
5016 return 1;
5017 }
f1ecb913
AB
5018 /*
5019 * The SHA-1/SHA-256 3-register instructions require special treatment
5020 * here, as their size field is overloaded as an op type selector, and
5021 * they all consume their input in a single pass.
5022 */
5023 if (op == NEON_3R_SHA) {
5024 if (!q) {
5025 return 1;
5026 }
5027 if (!u) { /* SHA-1 */
5028 if (!arm_feature(env, ARM_FEATURE_V8_SHA1)) {
5029 return 1;
5030 }
5031 tmp = tcg_const_i32(rd);
5032 tmp2 = tcg_const_i32(rn);
5033 tmp3 = tcg_const_i32(rm);
5034 tmp4 = tcg_const_i32(size);
5035 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5036 tcg_temp_free_i32(tmp4);
5037 } else { /* SHA-256 */
5038 if (!arm_feature(env, ARM_FEATURE_V8_SHA256) || size == 3) {
5039 return 1;
5040 }
5041 tmp = tcg_const_i32(rd);
5042 tmp2 = tcg_const_i32(rn);
5043 tmp3 = tcg_const_i32(rm);
5044 switch (size) {
5045 case 0:
5046 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5047 break;
5048 case 1:
5049 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5050 break;
5051 case 2:
5052 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5053 break;
5054 }
5055 }
5056 tcg_temp_free_i32(tmp);
5057 tcg_temp_free_i32(tmp2);
5058 tcg_temp_free_i32(tmp3);
5059 return 0;
5060 }
62698be3
PM
5061 if (size == 3 && op != NEON_3R_LOGIC) {
5062 /* 64-bit element instructions. */
9ee6e8bb 5063 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5064 neon_load_reg64(cpu_V0, rn + pass);
5065 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5066 switch (op) {
62698be3 5067 case NEON_3R_VQADD:
9ee6e8bb 5068 if (u) {
02da0b2d
PM
5069 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5070 cpu_V0, cpu_V1);
2c0262af 5071 } else {
02da0b2d
PM
5072 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5073 cpu_V0, cpu_V1);
2c0262af 5074 }
9ee6e8bb 5075 break;
62698be3 5076 case NEON_3R_VQSUB:
9ee6e8bb 5077 if (u) {
02da0b2d
PM
5078 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5079 cpu_V0, cpu_V1);
ad69471c 5080 } else {
02da0b2d
PM
5081 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5082 cpu_V0, cpu_V1);
ad69471c
PB
5083 }
5084 break;
62698be3 5085 case NEON_3R_VSHL:
ad69471c
PB
5086 if (u) {
5087 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5088 } else {
5089 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5090 }
5091 break;
62698be3 5092 case NEON_3R_VQSHL:
ad69471c 5093 if (u) {
02da0b2d
PM
5094 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5095 cpu_V1, cpu_V0);
ad69471c 5096 } else {
02da0b2d
PM
5097 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5098 cpu_V1, cpu_V0);
ad69471c
PB
5099 }
5100 break;
62698be3 5101 case NEON_3R_VRSHL:
ad69471c
PB
5102 if (u) {
5103 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5104 } else {
ad69471c
PB
5105 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5106 }
5107 break;
62698be3 5108 case NEON_3R_VQRSHL:
ad69471c 5109 if (u) {
02da0b2d
PM
5110 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5111 cpu_V1, cpu_V0);
ad69471c 5112 } else {
02da0b2d
PM
5113 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5114 cpu_V1, cpu_V0);
1e8d4eec 5115 }
9ee6e8bb 5116 break;
62698be3 5117 case NEON_3R_VADD_VSUB:
9ee6e8bb 5118 if (u) {
ad69471c 5119 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5120 } else {
ad69471c 5121 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5122 }
5123 break;
5124 default:
5125 abort();
2c0262af 5126 }
ad69471c 5127 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5128 }
9ee6e8bb 5129 return 0;
2c0262af 5130 }
25f84f79 5131 pairwise = 0;
9ee6e8bb 5132 switch (op) {
62698be3
PM
5133 case NEON_3R_VSHL:
5134 case NEON_3R_VQSHL:
5135 case NEON_3R_VRSHL:
5136 case NEON_3R_VQRSHL:
9ee6e8bb 5137 {
ad69471c
PB
5138 int rtmp;
5139 /* Shift instruction operands are reversed. */
5140 rtmp = rn;
9ee6e8bb 5141 rn = rm;
ad69471c 5142 rm = rtmp;
9ee6e8bb 5143 }
2c0262af 5144 break;
25f84f79
PM
5145 case NEON_3R_VPADD:
5146 if (u) {
5147 return 1;
5148 }
5149 /* Fall through */
62698be3
PM
5150 case NEON_3R_VPMAX:
5151 case NEON_3R_VPMIN:
9ee6e8bb 5152 pairwise = 1;
2c0262af 5153 break;
25f84f79
PM
5154 case NEON_3R_FLOAT_ARITH:
5155 pairwise = (u && size < 2); /* if VPADD (float) */
5156 break;
5157 case NEON_3R_FLOAT_MINMAX:
5158 pairwise = u; /* if VPMIN/VPMAX (float) */
5159 break;
5160 case NEON_3R_FLOAT_CMP:
5161 if (!u && size) {
5162 /* no encoding for U=0 C=1x */
5163 return 1;
5164 }
5165 break;
5166 case NEON_3R_FLOAT_ACMP:
5167 if (!u) {
5168 return 1;
5169 }
5170 break;
505935fc
WN
5171 case NEON_3R_FLOAT_MISC:
5172 /* VMAXNM/VMINNM in ARMv8 */
5173 if (u && !arm_feature(env, ARM_FEATURE_V8)) {
25f84f79
PM
5174 return 1;
5175 }
2c0262af 5176 break;
25f84f79
PM
5177 case NEON_3R_VMUL:
5178 if (u && (size != 0)) {
5179 /* UNDEF on invalid size for polynomial subcase */
5180 return 1;
5181 }
2c0262af 5182 break;
da97f52c
PM
5183 case NEON_3R_VFM:
5184 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
5185 return 1;
5186 }
5187 break;
9ee6e8bb 5188 default:
2c0262af 5189 break;
9ee6e8bb 5190 }
dd8fbd78 5191
25f84f79
PM
5192 if (pairwise && q) {
5193 /* All the pairwise insns UNDEF if Q is set */
5194 return 1;
5195 }
5196
9ee6e8bb
PB
5197 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5198
5199 if (pairwise) {
5200 /* Pairwise. */
a5a14945
JR
5201 if (pass < 1) {
5202 tmp = neon_load_reg(rn, 0);
5203 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5204 } else {
a5a14945
JR
5205 tmp = neon_load_reg(rm, 0);
5206 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5207 }
5208 } else {
5209 /* Elementwise. */
dd8fbd78
FN
5210 tmp = neon_load_reg(rn, pass);
5211 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5212 }
5213 switch (op) {
62698be3 5214 case NEON_3R_VHADD:
9ee6e8bb
PB
5215 GEN_NEON_INTEGER_OP(hadd);
5216 break;
62698be3 5217 case NEON_3R_VQADD:
02da0b2d 5218 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5219 break;
62698be3 5220 case NEON_3R_VRHADD:
9ee6e8bb 5221 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5222 break;
62698be3 5223 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5224 switch ((u << 2) | size) {
5225 case 0: /* VAND */
dd8fbd78 5226 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5227 break;
5228 case 1: /* BIC */
f669df27 5229 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5230 break;
5231 case 2: /* VORR */
dd8fbd78 5232 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5233 break;
5234 case 3: /* VORN */
f669df27 5235 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5236 break;
5237 case 4: /* VEOR */
dd8fbd78 5238 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5239 break;
5240 case 5: /* VBSL */
dd8fbd78
FN
5241 tmp3 = neon_load_reg(rd, pass);
5242 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5243 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5244 break;
5245 case 6: /* VBIT */
dd8fbd78
FN
5246 tmp3 = neon_load_reg(rd, pass);
5247 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5248 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5249 break;
5250 case 7: /* VBIF */
dd8fbd78
FN
5251 tmp3 = neon_load_reg(rd, pass);
5252 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5253 tcg_temp_free_i32(tmp3);
9ee6e8bb 5254 break;
2c0262af
FB
5255 }
5256 break;
62698be3 5257 case NEON_3R_VHSUB:
9ee6e8bb
PB
5258 GEN_NEON_INTEGER_OP(hsub);
5259 break;
62698be3 5260 case NEON_3R_VQSUB:
02da0b2d 5261 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5262 break;
62698be3 5263 case NEON_3R_VCGT:
9ee6e8bb
PB
5264 GEN_NEON_INTEGER_OP(cgt);
5265 break;
62698be3 5266 case NEON_3R_VCGE:
9ee6e8bb
PB
5267 GEN_NEON_INTEGER_OP(cge);
5268 break;
62698be3 5269 case NEON_3R_VSHL:
ad69471c 5270 GEN_NEON_INTEGER_OP(shl);
2c0262af 5271 break;
62698be3 5272 case NEON_3R_VQSHL:
02da0b2d 5273 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5274 break;
62698be3 5275 case NEON_3R_VRSHL:
ad69471c 5276 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5277 break;
62698be3 5278 case NEON_3R_VQRSHL:
02da0b2d 5279 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5280 break;
62698be3 5281 case NEON_3R_VMAX:
9ee6e8bb
PB
5282 GEN_NEON_INTEGER_OP(max);
5283 break;
62698be3 5284 case NEON_3R_VMIN:
9ee6e8bb
PB
5285 GEN_NEON_INTEGER_OP(min);
5286 break;
62698be3 5287 case NEON_3R_VABD:
9ee6e8bb
PB
5288 GEN_NEON_INTEGER_OP(abd);
5289 break;
62698be3 5290 case NEON_3R_VABA:
9ee6e8bb 5291 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5292 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5293 tmp2 = neon_load_reg(rd, pass);
5294 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5295 break;
62698be3 5296 case NEON_3R_VADD_VSUB:
9ee6e8bb 5297 if (!u) { /* VADD */
62698be3 5298 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5299 } else { /* VSUB */
5300 switch (size) {
dd8fbd78
FN
5301 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5302 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5303 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 5304 default: abort();
9ee6e8bb
PB
5305 }
5306 }
5307 break;
62698be3 5308 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
5309 if (!u) { /* VTST */
5310 switch (size) {
dd8fbd78
FN
5311 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5312 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5313 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 5314 default: abort();
9ee6e8bb
PB
5315 }
5316 } else { /* VCEQ */
5317 switch (size) {
dd8fbd78
FN
5318 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5319 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5320 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 5321 default: abort();
9ee6e8bb
PB
5322 }
5323 }
5324 break;
62698be3 5325 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 5326 switch (size) {
dd8fbd78
FN
5327 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5328 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5329 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5330 default: abort();
9ee6e8bb 5331 }
7d1b0095 5332 tcg_temp_free_i32(tmp2);
dd8fbd78 5333 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5334 if (u) { /* VMLS */
dd8fbd78 5335 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 5336 } else { /* VMLA */
dd8fbd78 5337 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5338 }
5339 break;
62698be3 5340 case NEON_3R_VMUL:
9ee6e8bb 5341 if (u) { /* polynomial */
dd8fbd78 5342 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
5343 } else { /* Integer */
5344 switch (size) {
dd8fbd78
FN
5345 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5346 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5347 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5348 default: abort();
9ee6e8bb
PB
5349 }
5350 }
5351 break;
62698be3 5352 case NEON_3R_VPMAX:
9ee6e8bb
PB
5353 GEN_NEON_INTEGER_OP(pmax);
5354 break;
62698be3 5355 case NEON_3R_VPMIN:
9ee6e8bb
PB
5356 GEN_NEON_INTEGER_OP(pmin);
5357 break;
62698be3 5358 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5359 if (!u) { /* VQDMULH */
5360 switch (size) {
02da0b2d
PM
5361 case 1:
5362 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5363 break;
5364 case 2:
5365 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5366 break;
62698be3 5367 default: abort();
9ee6e8bb 5368 }
62698be3 5369 } else { /* VQRDMULH */
9ee6e8bb 5370 switch (size) {
02da0b2d
PM
5371 case 1:
5372 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5373 break;
5374 case 2:
5375 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5376 break;
62698be3 5377 default: abort();
9ee6e8bb
PB
5378 }
5379 }
5380 break;
62698be3 5381 case NEON_3R_VPADD:
9ee6e8bb 5382 switch (size) {
dd8fbd78
FN
5383 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5384 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5385 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5386 default: abort();
9ee6e8bb
PB
5387 }
5388 break;
62698be3 5389 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5390 {
5391 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5392 switch ((u << 2) | size) {
5393 case 0: /* VADD */
aa47cfdd
PM
5394 case 4: /* VPADD */
5395 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5396 break;
5397 case 2: /* VSUB */
aa47cfdd 5398 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5399 break;
5400 case 6: /* VABD */
aa47cfdd 5401 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5402 break;
5403 default:
62698be3 5404 abort();
9ee6e8bb 5405 }
aa47cfdd 5406 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5407 break;
aa47cfdd 5408 }
62698be3 5409 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5410 {
5411 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5412 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5413 if (!u) {
7d1b0095 5414 tcg_temp_free_i32(tmp2);
dd8fbd78 5415 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5416 if (size == 0) {
aa47cfdd 5417 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5418 } else {
aa47cfdd 5419 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5420 }
5421 }
aa47cfdd 5422 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5423 break;
aa47cfdd 5424 }
62698be3 5425 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5426 {
5427 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5428 if (!u) {
aa47cfdd 5429 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5430 } else {
aa47cfdd
PM
5431 if (size == 0) {
5432 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5433 } else {
5434 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5435 }
b5ff1b31 5436 }
aa47cfdd 5437 tcg_temp_free_ptr(fpstatus);
2c0262af 5438 break;
aa47cfdd 5439 }
62698be3 5440 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
5441 {
5442 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5443 if (size == 0) {
5444 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5445 } else {
5446 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5447 }
5448 tcg_temp_free_ptr(fpstatus);
2c0262af 5449 break;
aa47cfdd 5450 }
62698be3 5451 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
5452 {
5453 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5454 if (size == 0) {
f71a2ae5 5455 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 5456 } else {
f71a2ae5 5457 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
5458 }
5459 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5460 break;
aa47cfdd 5461 }
505935fc
WN
5462 case NEON_3R_FLOAT_MISC:
5463 if (u) {
5464 /* VMAXNM/VMINNM */
5465 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5466 if (size == 0) {
f71a2ae5 5467 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 5468 } else {
f71a2ae5 5469 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
5470 }
5471 tcg_temp_free_ptr(fpstatus);
5472 } else {
5473 if (size == 0) {
5474 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5475 } else {
5476 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5477 }
5478 }
2c0262af 5479 break;
da97f52c
PM
5480 case NEON_3R_VFM:
5481 {
5482 /* VFMA, VFMS: fused multiply-add */
5483 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5484 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5485 if (size) {
5486 /* VFMS */
5487 gen_helper_vfp_negs(tmp, tmp);
5488 }
5489 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5490 tcg_temp_free_i32(tmp3);
5491 tcg_temp_free_ptr(fpstatus);
5492 break;
5493 }
9ee6e8bb
PB
5494 default:
5495 abort();
2c0262af 5496 }
7d1b0095 5497 tcg_temp_free_i32(tmp2);
dd8fbd78 5498
9ee6e8bb
PB
5499 /* Save the result. For elementwise operations we can put it
5500 straight into the destination register. For pairwise operations
5501 we have to be careful to avoid clobbering the source operands. */
5502 if (pairwise && rd == rm) {
dd8fbd78 5503 neon_store_scratch(pass, tmp);
9ee6e8bb 5504 } else {
dd8fbd78 5505 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5506 }
5507
5508 } /* for pass */
5509 if (pairwise && rd == rm) {
5510 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5511 tmp = neon_load_scratch(pass);
5512 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5513 }
5514 }
ad69471c 5515 /* End of 3 register same size operations. */
9ee6e8bb
PB
5516 } else if (insn & (1 << 4)) {
5517 if ((insn & 0x00380080) != 0) {
5518 /* Two registers and shift. */
5519 op = (insn >> 8) & 0xf;
5520 if (insn & (1 << 7)) {
cc13115b
PM
5521 /* 64-bit shift. */
5522 if (op > 7) {
5523 return 1;
5524 }
9ee6e8bb
PB
5525 size = 3;
5526 } else {
5527 size = 2;
5528 while ((insn & (1 << (size + 19))) == 0)
5529 size--;
5530 }
5531 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 5532 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
5533 by immediate using the variable shift operations. */
5534 if (op < 8) {
5535 /* Shift by immediate:
5536 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5537 if (q && ((rd | rm) & 1)) {
5538 return 1;
5539 }
5540 if (!u && (op == 4 || op == 6)) {
5541 return 1;
5542 }
9ee6e8bb
PB
5543 /* Right shifts are encoded as N - shift, where N is the
5544 element size in bits. */
5545 if (op <= 4)
5546 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
5547 if (size == 3) {
5548 count = q + 1;
5549 } else {
5550 count = q ? 4: 2;
5551 }
5552 switch (size) {
5553 case 0:
5554 imm = (uint8_t) shift;
5555 imm |= imm << 8;
5556 imm |= imm << 16;
5557 break;
5558 case 1:
5559 imm = (uint16_t) shift;
5560 imm |= imm << 16;
5561 break;
5562 case 2:
5563 case 3:
5564 imm = shift;
5565 break;
5566 default:
5567 abort();
5568 }
5569
5570 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5571 if (size == 3) {
5572 neon_load_reg64(cpu_V0, rm + pass);
5573 tcg_gen_movi_i64(cpu_V1, imm);
5574 switch (op) {
5575 case 0: /* VSHR */
5576 case 1: /* VSRA */
5577 if (u)
5578 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5579 else
ad69471c 5580 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5581 break;
ad69471c
PB
5582 case 2: /* VRSHR */
5583 case 3: /* VRSRA */
5584 if (u)
5585 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5586 else
ad69471c 5587 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5588 break;
ad69471c 5589 case 4: /* VSRI */
ad69471c
PB
5590 case 5: /* VSHL, VSLI */
5591 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5592 break;
0322b26e 5593 case 6: /* VQSHLU */
02da0b2d
PM
5594 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5595 cpu_V0, cpu_V1);
ad69471c 5596 break;
0322b26e
PM
5597 case 7: /* VQSHL */
5598 if (u) {
02da0b2d 5599 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5600 cpu_V0, cpu_V1);
5601 } else {
02da0b2d 5602 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5603 cpu_V0, cpu_V1);
5604 }
9ee6e8bb 5605 break;
9ee6e8bb 5606 }
ad69471c
PB
5607 if (op == 1 || op == 3) {
5608 /* Accumulate. */
5371cb81 5609 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5610 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5611 } else if (op == 4 || (op == 5 && u)) {
5612 /* Insert */
923e6509
CL
5613 neon_load_reg64(cpu_V1, rd + pass);
5614 uint64_t mask;
5615 if (shift < -63 || shift > 63) {
5616 mask = 0;
5617 } else {
5618 if (op == 4) {
5619 mask = 0xffffffffffffffffull >> -shift;
5620 } else {
5621 mask = 0xffffffffffffffffull << shift;
5622 }
5623 }
5624 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5625 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5626 }
5627 neon_store_reg64(cpu_V0, rd + pass);
5628 } else { /* size < 3 */
5629 /* Operands in T0 and T1. */
dd8fbd78 5630 tmp = neon_load_reg(rm, pass);
7d1b0095 5631 tmp2 = tcg_temp_new_i32();
dd8fbd78 5632 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5633 switch (op) {
5634 case 0: /* VSHR */
5635 case 1: /* VSRA */
5636 GEN_NEON_INTEGER_OP(shl);
5637 break;
5638 case 2: /* VRSHR */
5639 case 3: /* VRSRA */
5640 GEN_NEON_INTEGER_OP(rshl);
5641 break;
5642 case 4: /* VSRI */
ad69471c
PB
5643 case 5: /* VSHL, VSLI */
5644 switch (size) {
dd8fbd78
FN
5645 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5646 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5647 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5648 default: abort();
ad69471c
PB
5649 }
5650 break;
0322b26e 5651 case 6: /* VQSHLU */
ad69471c 5652 switch (size) {
0322b26e 5653 case 0:
02da0b2d
PM
5654 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5655 tmp, tmp2);
0322b26e
PM
5656 break;
5657 case 1:
02da0b2d
PM
5658 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5659 tmp, tmp2);
0322b26e
PM
5660 break;
5661 case 2:
02da0b2d
PM
5662 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5663 tmp, tmp2);
0322b26e
PM
5664 break;
5665 default:
cc13115b 5666 abort();
ad69471c
PB
5667 }
5668 break;
0322b26e 5669 case 7: /* VQSHL */
02da0b2d 5670 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5671 break;
ad69471c 5672 }
7d1b0095 5673 tcg_temp_free_i32(tmp2);
ad69471c
PB
5674
5675 if (op == 1 || op == 3) {
5676 /* Accumulate. */
dd8fbd78 5677 tmp2 = neon_load_reg(rd, pass);
5371cb81 5678 gen_neon_add(size, tmp, tmp2);
7d1b0095 5679 tcg_temp_free_i32(tmp2);
ad69471c
PB
5680 } else if (op == 4 || (op == 5 && u)) {
5681 /* Insert */
5682 switch (size) {
5683 case 0:
5684 if (op == 4)
ca9a32e4 5685 mask = 0xff >> -shift;
ad69471c 5686 else
ca9a32e4
JR
5687 mask = (uint8_t)(0xff << shift);
5688 mask |= mask << 8;
5689 mask |= mask << 16;
ad69471c
PB
5690 break;
5691 case 1:
5692 if (op == 4)
ca9a32e4 5693 mask = 0xffff >> -shift;
ad69471c 5694 else
ca9a32e4
JR
5695 mask = (uint16_t)(0xffff << shift);
5696 mask |= mask << 16;
ad69471c
PB
5697 break;
5698 case 2:
ca9a32e4
JR
5699 if (shift < -31 || shift > 31) {
5700 mask = 0;
5701 } else {
5702 if (op == 4)
5703 mask = 0xffffffffu >> -shift;
5704 else
5705 mask = 0xffffffffu << shift;
5706 }
ad69471c
PB
5707 break;
5708 default:
5709 abort();
5710 }
dd8fbd78 5711 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5712 tcg_gen_andi_i32(tmp, tmp, mask);
5713 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5714 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5715 tcg_temp_free_i32(tmp2);
ad69471c 5716 }
dd8fbd78 5717 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5718 }
5719 } /* for pass */
5720 } else if (op < 10) {
ad69471c 5721 /* Shift by immediate and narrow:
9ee6e8bb 5722 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5723 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5724 if (rm & 1) {
5725 return 1;
5726 }
9ee6e8bb
PB
5727 shift = shift - (1 << (size + 3));
5728 size++;
92cdfaeb 5729 if (size == 3) {
a7812ae4 5730 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5731 neon_load_reg64(cpu_V0, rm);
5732 neon_load_reg64(cpu_V1, rm + 1);
5733 for (pass = 0; pass < 2; pass++) {
5734 TCGv_i64 in;
5735 if (pass == 0) {
5736 in = cpu_V0;
5737 } else {
5738 in = cpu_V1;
5739 }
ad69471c 5740 if (q) {
0b36f4cd 5741 if (input_unsigned) {
92cdfaeb 5742 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5743 } else {
92cdfaeb 5744 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5745 }
ad69471c 5746 } else {
0b36f4cd 5747 if (input_unsigned) {
92cdfaeb 5748 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5749 } else {
92cdfaeb 5750 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5751 }
ad69471c 5752 }
7d1b0095 5753 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5754 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5755 neon_store_reg(rd, pass, tmp);
5756 } /* for pass */
5757 tcg_temp_free_i64(tmp64);
5758 } else {
5759 if (size == 1) {
5760 imm = (uint16_t)shift;
5761 imm |= imm << 16;
2c0262af 5762 } else {
92cdfaeb
PM
5763 /* size == 2 */
5764 imm = (uint32_t)shift;
5765 }
5766 tmp2 = tcg_const_i32(imm);
5767 tmp4 = neon_load_reg(rm + 1, 0);
5768 tmp5 = neon_load_reg(rm + 1, 1);
5769 for (pass = 0; pass < 2; pass++) {
5770 if (pass == 0) {
5771 tmp = neon_load_reg(rm, 0);
5772 } else {
5773 tmp = tmp4;
5774 }
0b36f4cd
CL
5775 gen_neon_shift_narrow(size, tmp, tmp2, q,
5776 input_unsigned);
92cdfaeb
PM
5777 if (pass == 0) {
5778 tmp3 = neon_load_reg(rm, 1);
5779 } else {
5780 tmp3 = tmp5;
5781 }
0b36f4cd
CL
5782 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5783 input_unsigned);
36aa55dc 5784 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5785 tcg_temp_free_i32(tmp);
5786 tcg_temp_free_i32(tmp3);
5787 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5788 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5789 neon_store_reg(rd, pass, tmp);
5790 } /* for pass */
c6067f04 5791 tcg_temp_free_i32(tmp2);
b75263d6 5792 }
9ee6e8bb 5793 } else if (op == 10) {
cc13115b
PM
5794 /* VSHLL, VMOVL */
5795 if (q || (rd & 1)) {
9ee6e8bb 5796 return 1;
cc13115b 5797 }
ad69471c
PB
5798 tmp = neon_load_reg(rm, 0);
5799 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5800 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5801 if (pass == 1)
5802 tmp = tmp2;
5803
5804 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5805
9ee6e8bb
PB
5806 if (shift != 0) {
5807 /* The shift is less than the width of the source
ad69471c
PB
5808 type, so we can just shift the whole register. */
5809 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5810 /* Widen the result of shift: we need to clear
5811 * the potential overflow bits resulting from
5812 * left bits of the narrow input appearing as
5813 * right bits of left the neighbour narrow
5814 * input. */
ad69471c
PB
5815 if (size < 2 || !u) {
5816 uint64_t imm64;
5817 if (size == 0) {
5818 imm = (0xffu >> (8 - shift));
5819 imm |= imm << 16;
acdf01ef 5820 } else if (size == 1) {
ad69471c 5821 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5822 } else {
5823 /* size == 2 */
5824 imm = 0xffffffff >> (32 - shift);
5825 }
5826 if (size < 2) {
5827 imm64 = imm | (((uint64_t)imm) << 32);
5828 } else {
5829 imm64 = imm;
9ee6e8bb 5830 }
acdf01ef 5831 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5832 }
5833 }
ad69471c 5834 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5835 }
f73534a5 5836 } else if (op >= 14) {
9ee6e8bb 5837 /* VCVT fixed-point. */
cc13115b
PM
5838 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5839 return 1;
5840 }
f73534a5
PM
5841 /* We have already masked out the must-be-1 top bit of imm6,
5842 * hence this 32-shift where the ARM ARM has 64-imm6.
5843 */
5844 shift = 32 - shift;
9ee6e8bb 5845 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5846 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5847 if (!(op & 1)) {
9ee6e8bb 5848 if (u)
5500b06c 5849 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5850 else
5500b06c 5851 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5852 } else {
5853 if (u)
5500b06c 5854 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5855 else
5500b06c 5856 gen_vfp_tosl(0, shift, 1);
2c0262af 5857 }
4373f3ce 5858 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5859 }
5860 } else {
9ee6e8bb
PB
5861 return 1;
5862 }
5863 } else { /* (insn & 0x00380080) == 0 */
5864 int invert;
7d80fee5
PM
5865 if (q && (rd & 1)) {
5866 return 1;
5867 }
9ee6e8bb
PB
5868
5869 op = (insn >> 8) & 0xf;
5870 /* One register and immediate. */
5871 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5872 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5873 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5874 * We choose to not special-case this and will behave as if a
5875 * valid constant encoding of 0 had been given.
5876 */
9ee6e8bb
PB
5877 switch (op) {
5878 case 0: case 1:
5879 /* no-op */
5880 break;
5881 case 2: case 3:
5882 imm <<= 8;
5883 break;
5884 case 4: case 5:
5885 imm <<= 16;
5886 break;
5887 case 6: case 7:
5888 imm <<= 24;
5889 break;
5890 case 8: case 9:
5891 imm |= imm << 16;
5892 break;
5893 case 10: case 11:
5894 imm = (imm << 8) | (imm << 24);
5895 break;
5896 case 12:
8e31209e 5897 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5898 break;
5899 case 13:
5900 imm = (imm << 16) | 0xffff;
5901 break;
5902 case 14:
5903 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5904 if (invert)
5905 imm = ~imm;
5906 break;
5907 case 15:
7d80fee5
PM
5908 if (invert) {
5909 return 1;
5910 }
9ee6e8bb
PB
5911 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5912 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5913 break;
5914 }
5915 if (invert)
5916 imm = ~imm;
5917
9ee6e8bb
PB
5918 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5919 if (op & 1 && op < 12) {
ad69471c 5920 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5921 if (invert) {
5922 /* The immediate value has already been inverted, so
5923 BIC becomes AND. */
ad69471c 5924 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5925 } else {
ad69471c 5926 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5927 }
9ee6e8bb 5928 } else {
ad69471c 5929 /* VMOV, VMVN. */
7d1b0095 5930 tmp = tcg_temp_new_i32();
9ee6e8bb 5931 if (op == 14 && invert) {
a5a14945 5932 int n;
ad69471c
PB
5933 uint32_t val;
5934 val = 0;
9ee6e8bb
PB
5935 for (n = 0; n < 4; n++) {
5936 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5937 val |= 0xff << (n * 8);
9ee6e8bb 5938 }
ad69471c
PB
5939 tcg_gen_movi_i32(tmp, val);
5940 } else {
5941 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5942 }
9ee6e8bb 5943 }
ad69471c 5944 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5945 }
5946 }
e4b3861d 5947 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5948 if (size != 3) {
5949 op = (insn >> 8) & 0xf;
5950 if ((insn & (1 << 6)) == 0) {
5951 /* Three registers of different lengths. */
5952 int src1_wide;
5953 int src2_wide;
5954 int prewiden;
526d0096
PM
5955 /* undefreq: bit 0 : UNDEF if size == 0
5956 * bit 1 : UNDEF if size == 1
5957 * bit 2 : UNDEF if size == 2
5958 * bit 3 : UNDEF if U == 1
5959 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
5960 */
5961 int undefreq;
5962 /* prewiden, src1_wide, src2_wide, undefreq */
5963 static const int neon_3reg_wide[16][4] = {
5964 {1, 0, 0, 0}, /* VADDL */
5965 {1, 1, 0, 0}, /* VADDW */
5966 {1, 0, 0, 0}, /* VSUBL */
5967 {1, 1, 0, 0}, /* VSUBW */
5968 {0, 1, 1, 0}, /* VADDHN */
5969 {0, 0, 0, 0}, /* VABAL */
5970 {0, 1, 1, 0}, /* VSUBHN */
5971 {0, 0, 0, 0}, /* VABDL */
5972 {0, 0, 0, 0}, /* VMLAL */
526d0096 5973 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 5974 {0, 0, 0, 0}, /* VMLSL */
526d0096 5975 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 5976 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 5977 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 5978 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 5979 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5980 };
5981
5982 prewiden = neon_3reg_wide[op][0];
5983 src1_wide = neon_3reg_wide[op][1];
5984 src2_wide = neon_3reg_wide[op][2];
695272dc 5985 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5986
526d0096
PM
5987 if ((undefreq & (1 << size)) ||
5988 ((undefreq & 8) && u)) {
695272dc
PM
5989 return 1;
5990 }
5991 if ((src1_wide && (rn & 1)) ||
5992 (src2_wide && (rm & 1)) ||
5993 (!src2_wide && (rd & 1))) {
ad69471c 5994 return 1;
695272dc 5995 }
ad69471c 5996
4e624eda
PM
5997 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
5998 * outside the loop below as it only performs a single pass.
5999 */
6000 if (op == 14 && size == 2) {
6001 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6002
6003 if (!arm_feature(env, ARM_FEATURE_V8_PMULL)) {
6004 return 1;
6005 }
6006 tcg_rn = tcg_temp_new_i64();
6007 tcg_rm = tcg_temp_new_i64();
6008 tcg_rd = tcg_temp_new_i64();
6009 neon_load_reg64(tcg_rn, rn);
6010 neon_load_reg64(tcg_rm, rm);
6011 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6012 neon_store_reg64(tcg_rd, rd);
6013 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6014 neon_store_reg64(tcg_rd, rd + 1);
6015 tcg_temp_free_i64(tcg_rn);
6016 tcg_temp_free_i64(tcg_rm);
6017 tcg_temp_free_i64(tcg_rd);
6018 return 0;
6019 }
6020
9ee6e8bb
PB
6021 /* Avoid overlapping operands. Wide source operands are
6022 always aligned so will never overlap with wide
6023 destinations in problematic ways. */
8f8e3aa4 6024 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6025 tmp = neon_load_reg(rm, 1);
6026 neon_store_scratch(2, tmp);
8f8e3aa4 6027 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6028 tmp = neon_load_reg(rn, 1);
6029 neon_store_scratch(2, tmp);
9ee6e8bb 6030 }
39d5492a 6031 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 6032 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6033 if (src1_wide) {
6034 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 6035 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6036 } else {
ad69471c 6037 if (pass == 1 && rd == rn) {
dd8fbd78 6038 tmp = neon_load_scratch(2);
9ee6e8bb 6039 } else {
ad69471c
PB
6040 tmp = neon_load_reg(rn, pass);
6041 }
6042 if (prewiden) {
6043 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6044 }
6045 }
ad69471c
PB
6046 if (src2_wide) {
6047 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 6048 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6049 } else {
ad69471c 6050 if (pass == 1 && rd == rm) {
dd8fbd78 6051 tmp2 = neon_load_scratch(2);
9ee6e8bb 6052 } else {
ad69471c
PB
6053 tmp2 = neon_load_reg(rm, pass);
6054 }
6055 if (prewiden) {
6056 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6057 }
9ee6e8bb
PB
6058 }
6059 switch (op) {
6060 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6061 gen_neon_addl(size);
9ee6e8bb 6062 break;
79b0e534 6063 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6064 gen_neon_subl(size);
9ee6e8bb
PB
6065 break;
6066 case 5: case 7: /* VABAL, VABDL */
6067 switch ((size << 1) | u) {
ad69471c
PB
6068 case 0:
6069 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6070 break;
6071 case 1:
6072 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6073 break;
6074 case 2:
6075 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6076 break;
6077 case 3:
6078 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6079 break;
6080 case 4:
6081 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6082 break;
6083 case 5:
6084 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6085 break;
9ee6e8bb
PB
6086 default: abort();
6087 }
7d1b0095
PM
6088 tcg_temp_free_i32(tmp2);
6089 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6090 break;
6091 case 8: case 9: case 10: case 11: case 12: case 13:
6092 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6093 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6094 break;
6095 case 14: /* Polynomial VMULL */
e5ca24cb 6096 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6097 tcg_temp_free_i32(tmp2);
6098 tcg_temp_free_i32(tmp);
e5ca24cb 6099 break;
695272dc
PM
6100 default: /* 15 is RESERVED: caught earlier */
6101 abort();
9ee6e8bb 6102 }
ebcd88ce
PM
6103 if (op == 13) {
6104 /* VQDMULL */
6105 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6106 neon_store_reg64(cpu_V0, rd + pass);
6107 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6108 /* Accumulate. */
ebcd88ce 6109 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6110 switch (op) {
4dc064e6
PM
6111 case 10: /* VMLSL */
6112 gen_neon_negl(cpu_V0, size);
6113 /* Fall through */
6114 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6115 gen_neon_addl(size);
9ee6e8bb
PB
6116 break;
6117 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6118 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6119 if (op == 11) {
6120 gen_neon_negl(cpu_V0, size);
6121 }
ad69471c
PB
6122 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6123 break;
9ee6e8bb
PB
6124 default:
6125 abort();
6126 }
ad69471c 6127 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6128 } else if (op == 4 || op == 6) {
6129 /* Narrowing operation. */
7d1b0095 6130 tmp = tcg_temp_new_i32();
79b0e534 6131 if (!u) {
9ee6e8bb 6132 switch (size) {
ad69471c
PB
6133 case 0:
6134 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6135 break;
6136 case 1:
6137 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6138 break;
6139 case 2:
6140 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6141 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
6142 break;
9ee6e8bb
PB
6143 default: abort();
6144 }
6145 } else {
6146 switch (size) {
ad69471c
PB
6147 case 0:
6148 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6149 break;
6150 case 1:
6151 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6152 break;
6153 case 2:
6154 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6155 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6156 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
6157 break;
9ee6e8bb
PB
6158 default: abort();
6159 }
6160 }
ad69471c
PB
6161 if (pass == 0) {
6162 tmp3 = tmp;
6163 } else {
6164 neon_store_reg(rd, 0, tmp3);
6165 neon_store_reg(rd, 1, tmp);
6166 }
9ee6e8bb
PB
6167 } else {
6168 /* Write back the result. */
ad69471c 6169 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6170 }
6171 }
6172 } else {
3e3326df
PM
6173 /* Two registers and a scalar. NB that for ops of this form
6174 * the ARM ARM labels bit 24 as Q, but it is in our variable
6175 * 'u', not 'q'.
6176 */
6177 if (size == 0) {
6178 return 1;
6179 }
9ee6e8bb 6180 switch (op) {
9ee6e8bb 6181 case 1: /* Float VMLA scalar */
9ee6e8bb 6182 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6183 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6184 if (size == 1) {
6185 return 1;
6186 }
6187 /* fall through */
6188 case 0: /* Integer VMLA scalar */
6189 case 4: /* Integer VMLS scalar */
6190 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6191 case 12: /* VQDMULH scalar */
6192 case 13: /* VQRDMULH scalar */
3e3326df
PM
6193 if (u && ((rd | rn) & 1)) {
6194 return 1;
6195 }
dd8fbd78
FN
6196 tmp = neon_get_scalar(size, rm);
6197 neon_store_scratch(0, tmp);
9ee6e8bb 6198 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6199 tmp = neon_load_scratch(0);
6200 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6201 if (op == 12) {
6202 if (size == 1) {
02da0b2d 6203 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6204 } else {
02da0b2d 6205 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6206 }
6207 } else if (op == 13) {
6208 if (size == 1) {
02da0b2d 6209 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6210 } else {
02da0b2d 6211 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6212 }
6213 } else if (op & 1) {
aa47cfdd
PM
6214 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6215 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6216 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6217 } else {
6218 switch (size) {
dd8fbd78
FN
6219 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6220 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6221 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6222 default: abort();
9ee6e8bb
PB
6223 }
6224 }
7d1b0095 6225 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6226 if (op < 8) {
6227 /* Accumulate. */
dd8fbd78 6228 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6229 switch (op) {
6230 case 0:
dd8fbd78 6231 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6232 break;
6233 case 1:
aa47cfdd
PM
6234 {
6235 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6236 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6237 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6238 break;
aa47cfdd 6239 }
9ee6e8bb 6240 case 4:
dd8fbd78 6241 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6242 break;
6243 case 5:
aa47cfdd
PM
6244 {
6245 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6246 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6247 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6248 break;
aa47cfdd 6249 }
9ee6e8bb
PB
6250 default:
6251 abort();
6252 }
7d1b0095 6253 tcg_temp_free_i32(tmp2);
9ee6e8bb 6254 }
dd8fbd78 6255 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6256 }
6257 break;
9ee6e8bb 6258 case 3: /* VQDMLAL scalar */
9ee6e8bb 6259 case 7: /* VQDMLSL scalar */
9ee6e8bb 6260 case 11: /* VQDMULL scalar */
3e3326df 6261 if (u == 1) {
ad69471c 6262 return 1;
3e3326df
PM
6263 }
6264 /* fall through */
6265 case 2: /* VMLAL sclar */
6266 case 6: /* VMLSL scalar */
6267 case 10: /* VMULL scalar */
6268 if (rd & 1) {
6269 return 1;
6270 }
dd8fbd78 6271 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6272 /* We need a copy of tmp2 because gen_neon_mull
6273 * deletes it during pass 0. */
7d1b0095 6274 tmp4 = tcg_temp_new_i32();
c6067f04 6275 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6276 tmp3 = neon_load_reg(rn, 1);
ad69471c 6277
9ee6e8bb 6278 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6279 if (pass == 0) {
6280 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6281 } else {
dd8fbd78 6282 tmp = tmp3;
c6067f04 6283 tmp2 = tmp4;
9ee6e8bb 6284 }
ad69471c 6285 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6286 if (op != 11) {
6287 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6288 }
9ee6e8bb 6289 switch (op) {
4dc064e6
PM
6290 case 6:
6291 gen_neon_negl(cpu_V0, size);
6292 /* Fall through */
6293 case 2:
ad69471c 6294 gen_neon_addl(size);
9ee6e8bb
PB
6295 break;
6296 case 3: case 7:
ad69471c 6297 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6298 if (op == 7) {
6299 gen_neon_negl(cpu_V0, size);
6300 }
ad69471c 6301 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6302 break;
6303 case 10:
6304 /* no-op */
6305 break;
6306 case 11:
ad69471c 6307 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6308 break;
6309 default:
6310 abort();
6311 }
ad69471c 6312 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6313 }
dd8fbd78 6314
dd8fbd78 6315
9ee6e8bb
PB
6316 break;
6317 default: /* 14 and 15 are RESERVED */
6318 return 1;
6319 }
6320 }
6321 } else { /* size == 3 */
6322 if (!u) {
6323 /* Extract. */
9ee6e8bb 6324 imm = (insn >> 8) & 0xf;
ad69471c
PB
6325
6326 if (imm > 7 && !q)
6327 return 1;
6328
52579ea1
PM
6329 if (q && ((rd | rn | rm) & 1)) {
6330 return 1;
6331 }
6332
ad69471c
PB
6333 if (imm == 0) {
6334 neon_load_reg64(cpu_V0, rn);
6335 if (q) {
6336 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6337 }
ad69471c
PB
6338 } else if (imm == 8) {
6339 neon_load_reg64(cpu_V0, rn + 1);
6340 if (q) {
6341 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6342 }
ad69471c 6343 } else if (q) {
a7812ae4 6344 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6345 if (imm < 8) {
6346 neon_load_reg64(cpu_V0, rn);
a7812ae4 6347 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6348 } else {
6349 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6350 neon_load_reg64(tmp64, rm);
ad69471c
PB
6351 }
6352 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6353 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6354 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6355 if (imm < 8) {
6356 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6357 } else {
ad69471c
PB
6358 neon_load_reg64(cpu_V1, rm + 1);
6359 imm -= 8;
9ee6e8bb 6360 }
ad69471c 6361 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6362 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6363 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6364 tcg_temp_free_i64(tmp64);
ad69471c 6365 } else {
a7812ae4 6366 /* BUGFIX */
ad69471c 6367 neon_load_reg64(cpu_V0, rn);
a7812ae4 6368 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6369 neon_load_reg64(cpu_V1, rm);
a7812ae4 6370 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6371 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6372 }
6373 neon_store_reg64(cpu_V0, rd);
6374 if (q) {
6375 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6376 }
6377 } else if ((insn & (1 << 11)) == 0) {
6378 /* Two register misc. */
6379 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6380 size = (insn >> 18) & 3;
600b828c
PM
6381 /* UNDEF for unknown op values and bad op-size combinations */
6382 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6383 return 1;
6384 }
fc2a9b37
PM
6385 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6386 q && ((rm | rd) & 1)) {
6387 return 1;
6388 }
9ee6e8bb 6389 switch (op) {
600b828c 6390 case NEON_2RM_VREV64:
9ee6e8bb 6391 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6392 tmp = neon_load_reg(rm, pass * 2);
6393 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6394 switch (size) {
dd8fbd78
FN
6395 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6396 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6397 case 2: /* no-op */ break;
6398 default: abort();
6399 }
dd8fbd78 6400 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6401 if (size == 2) {
dd8fbd78 6402 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6403 } else {
9ee6e8bb 6404 switch (size) {
dd8fbd78
FN
6405 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6406 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6407 default: abort();
6408 }
dd8fbd78 6409 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6410 }
6411 }
6412 break;
600b828c
PM
6413 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6414 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
6415 for (pass = 0; pass < q + 1; pass++) {
6416 tmp = neon_load_reg(rm, pass * 2);
6417 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6418 tmp = neon_load_reg(rm, pass * 2 + 1);
6419 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6420 switch (size) {
6421 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6422 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6423 case 2: tcg_gen_add_i64(CPU_V001); break;
6424 default: abort();
6425 }
600b828c 6426 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 6427 /* Accumulate. */
ad69471c
PB
6428 neon_load_reg64(cpu_V1, rd + pass);
6429 gen_neon_addl(size);
9ee6e8bb 6430 }
ad69471c 6431 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6432 }
6433 break;
600b828c 6434 case NEON_2RM_VTRN:
9ee6e8bb 6435 if (size == 2) {
a5a14945 6436 int n;
9ee6e8bb 6437 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
6438 tmp = neon_load_reg(rm, n);
6439 tmp2 = neon_load_reg(rd, n + 1);
6440 neon_store_reg(rm, n, tmp2);
6441 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
6442 }
6443 } else {
6444 goto elementwise;
6445 }
6446 break;
600b828c 6447 case NEON_2RM_VUZP:
02acedf9 6448 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 6449 return 1;
9ee6e8bb
PB
6450 }
6451 break;
600b828c 6452 case NEON_2RM_VZIP:
d68a6f3a 6453 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 6454 return 1;
9ee6e8bb
PB
6455 }
6456 break;
600b828c
PM
6457 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6458 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
6459 if (rm & 1) {
6460 return 1;
6461 }
39d5492a 6462 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6463 for (pass = 0; pass < 2; pass++) {
ad69471c 6464 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 6465 tmp = tcg_temp_new_i32();
600b828c
PM
6466 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6467 tmp, cpu_V0);
ad69471c
PB
6468 if (pass == 0) {
6469 tmp2 = tmp;
6470 } else {
6471 neon_store_reg(rd, 0, tmp2);
6472 neon_store_reg(rd, 1, tmp);
9ee6e8bb 6473 }
9ee6e8bb
PB
6474 }
6475 break;
600b828c 6476 case NEON_2RM_VSHLL:
fc2a9b37 6477 if (q || (rd & 1)) {
9ee6e8bb 6478 return 1;
600b828c 6479 }
ad69471c
PB
6480 tmp = neon_load_reg(rm, 0);
6481 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6482 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6483 if (pass == 1)
6484 tmp = tmp2;
6485 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 6486 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 6487 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6488 }
6489 break;
600b828c 6490 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
6491 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6492 q || (rm & 1)) {
6493 return 1;
6494 }
7d1b0095
PM
6495 tmp = tcg_temp_new_i32();
6496 tmp2 = tcg_temp_new_i32();
60011498 6497 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 6498 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 6499 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 6500 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6501 tcg_gen_shli_i32(tmp2, tmp2, 16);
6502 tcg_gen_or_i32(tmp2, tmp2, tmp);
6503 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 6504 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
6505 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6506 neon_store_reg(rd, 0, tmp2);
7d1b0095 6507 tmp2 = tcg_temp_new_i32();
2d981da7 6508 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6509 tcg_gen_shli_i32(tmp2, tmp2, 16);
6510 tcg_gen_or_i32(tmp2, tmp2, tmp);
6511 neon_store_reg(rd, 1, tmp2);
7d1b0095 6512 tcg_temp_free_i32(tmp);
60011498 6513 break;
600b828c 6514 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
6515 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6516 q || (rd & 1)) {
6517 return 1;
6518 }
7d1b0095 6519 tmp3 = tcg_temp_new_i32();
60011498
PB
6520 tmp = neon_load_reg(rm, 0);
6521 tmp2 = neon_load_reg(rm, 1);
6522 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 6523 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6524 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6525 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 6526 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6527 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 6528 tcg_temp_free_i32(tmp);
60011498 6529 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 6530 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6531 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6532 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 6533 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6534 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
6535 tcg_temp_free_i32(tmp2);
6536 tcg_temp_free_i32(tmp3);
60011498 6537 break;
9d935509
AB
6538 case NEON_2RM_AESE: case NEON_2RM_AESMC:
6539 if (!arm_feature(env, ARM_FEATURE_V8_AES)
6540 || ((rm | rd) & 1)) {
6541 return 1;
6542 }
6543 tmp = tcg_const_i32(rd);
6544 tmp2 = tcg_const_i32(rm);
6545
6546 /* Bit 6 is the lowest opcode bit; it distinguishes between
6547 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6548 */
6549 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6550
6551 if (op == NEON_2RM_AESE) {
6552 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
6553 } else {
6554 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
6555 }
6556 tcg_temp_free_i32(tmp);
6557 tcg_temp_free_i32(tmp2);
6558 tcg_temp_free_i32(tmp3);
6559 break;
f1ecb913
AB
6560 case NEON_2RM_SHA1H:
6561 if (!arm_feature(env, ARM_FEATURE_V8_SHA1)
6562 || ((rm | rd) & 1)) {
6563 return 1;
6564 }
6565 tmp = tcg_const_i32(rd);
6566 tmp2 = tcg_const_i32(rm);
6567
6568 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
6569
6570 tcg_temp_free_i32(tmp);
6571 tcg_temp_free_i32(tmp2);
6572 break;
6573 case NEON_2RM_SHA1SU1:
6574 if ((rm | rd) & 1) {
6575 return 1;
6576 }
6577 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6578 if (q) {
6579 if (!arm_feature(env, ARM_FEATURE_V8_SHA256)) {
6580 return 1;
6581 }
6582 } else if (!arm_feature(env, ARM_FEATURE_V8_SHA1)) {
6583 return 1;
6584 }
6585 tmp = tcg_const_i32(rd);
6586 tmp2 = tcg_const_i32(rm);
6587 if (q) {
6588 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
6589 } else {
6590 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
6591 }
6592 tcg_temp_free_i32(tmp);
6593 tcg_temp_free_i32(tmp2);
6594 break;
9ee6e8bb
PB
6595 default:
6596 elementwise:
6597 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 6598 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6599 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6600 neon_reg_offset(rm, pass));
39d5492a 6601 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6602 } else {
dd8fbd78 6603 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
6604 }
6605 switch (op) {
600b828c 6606 case NEON_2RM_VREV32:
9ee6e8bb 6607 switch (size) {
dd8fbd78
FN
6608 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6609 case 1: gen_swap_half(tmp); break;
600b828c 6610 default: abort();
9ee6e8bb
PB
6611 }
6612 break;
600b828c 6613 case NEON_2RM_VREV16:
dd8fbd78 6614 gen_rev16(tmp);
9ee6e8bb 6615 break;
600b828c 6616 case NEON_2RM_VCLS:
9ee6e8bb 6617 switch (size) {
dd8fbd78
FN
6618 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6619 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6620 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6621 default: abort();
9ee6e8bb
PB
6622 }
6623 break;
600b828c 6624 case NEON_2RM_VCLZ:
9ee6e8bb 6625 switch (size) {
dd8fbd78
FN
6626 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6627 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6628 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 6629 default: abort();
9ee6e8bb
PB
6630 }
6631 break;
600b828c 6632 case NEON_2RM_VCNT:
dd8fbd78 6633 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6634 break;
600b828c 6635 case NEON_2RM_VMVN:
dd8fbd78 6636 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 6637 break;
600b828c 6638 case NEON_2RM_VQABS:
9ee6e8bb 6639 switch (size) {
02da0b2d
PM
6640 case 0:
6641 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6642 break;
6643 case 1:
6644 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6645 break;
6646 case 2:
6647 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6648 break;
600b828c 6649 default: abort();
9ee6e8bb
PB
6650 }
6651 break;
600b828c 6652 case NEON_2RM_VQNEG:
9ee6e8bb 6653 switch (size) {
02da0b2d
PM
6654 case 0:
6655 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6656 break;
6657 case 1:
6658 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6659 break;
6660 case 2:
6661 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6662 break;
600b828c 6663 default: abort();
9ee6e8bb
PB
6664 }
6665 break;
600b828c 6666 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6667 tmp2 = tcg_const_i32(0);
9ee6e8bb 6668 switch(size) {
dd8fbd78
FN
6669 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6670 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6671 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6672 default: abort();
9ee6e8bb 6673 }
39d5492a 6674 tcg_temp_free_i32(tmp2);
600b828c 6675 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6676 tcg_gen_not_i32(tmp, tmp);
600b828c 6677 }
9ee6e8bb 6678 break;
600b828c 6679 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6680 tmp2 = tcg_const_i32(0);
9ee6e8bb 6681 switch(size) {
dd8fbd78
FN
6682 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6683 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6684 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6685 default: abort();
9ee6e8bb 6686 }
39d5492a 6687 tcg_temp_free_i32(tmp2);
600b828c 6688 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6689 tcg_gen_not_i32(tmp, tmp);
600b828c 6690 }
9ee6e8bb 6691 break;
600b828c 6692 case NEON_2RM_VCEQ0:
dd8fbd78 6693 tmp2 = tcg_const_i32(0);
9ee6e8bb 6694 switch(size) {
dd8fbd78
FN
6695 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6696 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6697 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6698 default: abort();
9ee6e8bb 6699 }
39d5492a 6700 tcg_temp_free_i32(tmp2);
9ee6e8bb 6701 break;
600b828c 6702 case NEON_2RM_VABS:
9ee6e8bb 6703 switch(size) {
dd8fbd78
FN
6704 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6705 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6706 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6707 default: abort();
9ee6e8bb
PB
6708 }
6709 break;
600b828c 6710 case NEON_2RM_VNEG:
dd8fbd78
FN
6711 tmp2 = tcg_const_i32(0);
6712 gen_neon_rsb(size, tmp, tmp2);
39d5492a 6713 tcg_temp_free_i32(tmp2);
9ee6e8bb 6714 break;
600b828c 6715 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6716 {
6717 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6718 tmp2 = tcg_const_i32(0);
aa47cfdd 6719 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6720 tcg_temp_free_i32(tmp2);
aa47cfdd 6721 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6722 break;
aa47cfdd 6723 }
600b828c 6724 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6725 {
6726 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6727 tmp2 = tcg_const_i32(0);
aa47cfdd 6728 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6729 tcg_temp_free_i32(tmp2);
aa47cfdd 6730 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6731 break;
aa47cfdd 6732 }
600b828c 6733 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6734 {
6735 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6736 tmp2 = tcg_const_i32(0);
aa47cfdd 6737 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6738 tcg_temp_free_i32(tmp2);
aa47cfdd 6739 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6740 break;
aa47cfdd 6741 }
600b828c 6742 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6743 {
6744 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6745 tmp2 = tcg_const_i32(0);
aa47cfdd 6746 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6747 tcg_temp_free_i32(tmp2);
aa47cfdd 6748 tcg_temp_free_ptr(fpstatus);
0e326109 6749 break;
aa47cfdd 6750 }
600b828c 6751 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6752 {
6753 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6754 tmp2 = tcg_const_i32(0);
aa47cfdd 6755 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6756 tcg_temp_free_i32(tmp2);
aa47cfdd 6757 tcg_temp_free_ptr(fpstatus);
0e326109 6758 break;
aa47cfdd 6759 }
600b828c 6760 case NEON_2RM_VABS_F:
4373f3ce 6761 gen_vfp_abs(0);
9ee6e8bb 6762 break;
600b828c 6763 case NEON_2RM_VNEG_F:
4373f3ce 6764 gen_vfp_neg(0);
9ee6e8bb 6765 break;
600b828c 6766 case NEON_2RM_VSWP:
dd8fbd78
FN
6767 tmp2 = neon_load_reg(rd, pass);
6768 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6769 break;
600b828c 6770 case NEON_2RM_VTRN:
dd8fbd78 6771 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6772 switch (size) {
dd8fbd78
FN
6773 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6774 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6775 default: abort();
9ee6e8bb 6776 }
dd8fbd78 6777 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6778 break;
34f7b0a2
WN
6779 case NEON_2RM_VRINTN:
6780 case NEON_2RM_VRINTA:
6781 case NEON_2RM_VRINTM:
6782 case NEON_2RM_VRINTP:
6783 case NEON_2RM_VRINTZ:
6784 {
6785 TCGv_i32 tcg_rmode;
6786 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6787 int rmode;
6788
6789 if (op == NEON_2RM_VRINTZ) {
6790 rmode = FPROUNDING_ZERO;
6791 } else {
6792 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6793 }
6794
6795 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6796 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6797 cpu_env);
6798 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
6799 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6800 cpu_env);
6801 tcg_temp_free_ptr(fpstatus);
6802 tcg_temp_free_i32(tcg_rmode);
6803 break;
6804 }
2ce70625
WN
6805 case NEON_2RM_VRINTX:
6806 {
6807 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6808 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
6809 tcg_temp_free_ptr(fpstatus);
6810 break;
6811 }
901ad525
WN
6812 case NEON_2RM_VCVTAU:
6813 case NEON_2RM_VCVTAS:
6814 case NEON_2RM_VCVTNU:
6815 case NEON_2RM_VCVTNS:
6816 case NEON_2RM_VCVTPU:
6817 case NEON_2RM_VCVTPS:
6818 case NEON_2RM_VCVTMU:
6819 case NEON_2RM_VCVTMS:
6820 {
6821 bool is_signed = !extract32(insn, 7, 1);
6822 TCGv_ptr fpst = get_fpstatus_ptr(1);
6823 TCGv_i32 tcg_rmode, tcg_shift;
6824 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6825
6826 tcg_shift = tcg_const_i32(0);
6827 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6828 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6829 cpu_env);
6830
6831 if (is_signed) {
6832 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
6833 tcg_shift, fpst);
6834 } else {
6835 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
6836 tcg_shift, fpst);
6837 }
6838
6839 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6840 cpu_env);
6841 tcg_temp_free_i32(tcg_rmode);
6842 tcg_temp_free_i32(tcg_shift);
6843 tcg_temp_free_ptr(fpst);
6844 break;
6845 }
600b828c 6846 case NEON_2RM_VRECPE:
b6d4443a
AB
6847 {
6848 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6849 gen_helper_recpe_u32(tmp, tmp, fpstatus);
6850 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6851 break;
b6d4443a 6852 }
600b828c 6853 case NEON_2RM_VRSQRTE:
c2fb418e
AB
6854 {
6855 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6856 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
6857 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6858 break;
c2fb418e 6859 }
600b828c 6860 case NEON_2RM_VRECPE_F:
b6d4443a
AB
6861 {
6862 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6863 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
6864 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6865 break;
b6d4443a 6866 }
600b828c 6867 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
6868 {
6869 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6870 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
6871 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6872 break;
c2fb418e 6873 }
600b828c 6874 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6875 gen_vfp_sito(0, 1);
9ee6e8bb 6876 break;
600b828c 6877 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6878 gen_vfp_uito(0, 1);
9ee6e8bb 6879 break;
600b828c 6880 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6881 gen_vfp_tosiz(0, 1);
9ee6e8bb 6882 break;
600b828c 6883 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6884 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6885 break;
6886 default:
600b828c
PM
6887 /* Reserved op values were caught by the
6888 * neon_2rm_sizes[] check earlier.
6889 */
6890 abort();
9ee6e8bb 6891 }
600b828c 6892 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6893 tcg_gen_st_f32(cpu_F0s, cpu_env,
6894 neon_reg_offset(rd, pass));
9ee6e8bb 6895 } else {
dd8fbd78 6896 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6897 }
6898 }
6899 break;
6900 }
6901 } else if ((insn & (1 << 10)) == 0) {
6902 /* VTBL, VTBX. */
56907d77
PM
6903 int n = ((insn >> 8) & 3) + 1;
6904 if ((rn + n) > 32) {
6905 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6906 * helper function running off the end of the register file.
6907 */
6908 return 1;
6909 }
6910 n <<= 3;
9ee6e8bb 6911 if (insn & (1 << 6)) {
8f8e3aa4 6912 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6913 } else {
7d1b0095 6914 tmp = tcg_temp_new_i32();
8f8e3aa4 6915 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6916 }
8f8e3aa4 6917 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6918 tmp4 = tcg_const_i32(rn);
6919 tmp5 = tcg_const_i32(n);
9ef39277 6920 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 6921 tcg_temp_free_i32(tmp);
9ee6e8bb 6922 if (insn & (1 << 6)) {
8f8e3aa4 6923 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6924 } else {
7d1b0095 6925 tmp = tcg_temp_new_i32();
8f8e3aa4 6926 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6927 }
8f8e3aa4 6928 tmp3 = neon_load_reg(rm, 1);
9ef39277 6929 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6930 tcg_temp_free_i32(tmp5);
6931 tcg_temp_free_i32(tmp4);
8f8e3aa4 6932 neon_store_reg(rd, 0, tmp2);
3018f259 6933 neon_store_reg(rd, 1, tmp3);
7d1b0095 6934 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6935 } else if ((insn & 0x380) == 0) {
6936 /* VDUP */
133da6aa
JR
6937 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6938 return 1;
6939 }
9ee6e8bb 6940 if (insn & (1 << 19)) {
dd8fbd78 6941 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6942 } else {
dd8fbd78 6943 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6944 }
6945 if (insn & (1 << 16)) {
dd8fbd78 6946 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6947 } else if (insn & (1 << 17)) {
6948 if ((insn >> 18) & 1)
dd8fbd78 6949 gen_neon_dup_high16(tmp);
9ee6e8bb 6950 else
dd8fbd78 6951 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6952 }
6953 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6954 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6955 tcg_gen_mov_i32(tmp2, tmp);
6956 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6957 }
7d1b0095 6958 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6959 } else {
6960 return 1;
6961 }
6962 }
6963 }
6964 return 0;
6965}
6966
0ecb72a5 6967static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb 6968{
4b6a83fb
PM
6969 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6970 const ARMCPRegInfo *ri;
9ee6e8bb
PB
6971
6972 cpnum = (insn >> 8) & 0xf;
6973 if (arm_feature(env, ARM_FEATURE_XSCALE)
6974 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6975 return 1;
6976
4b6a83fb 6977 /* First check for coprocessor space used for actual instructions */
9ee6e8bb
PB
6978 switch (cpnum) {
6979 case 0:
6980 case 1:
6981 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6982 return disas_iwmmxt_insn(env, s, insn);
6983 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6984 return disas_dsp_insn(env, s, insn);
6985 }
6986 return 1;
4b6a83fb
PM
6987 default:
6988 break;
6989 }
6990
6991 /* Otherwise treat as a generic register access */
6992 is64 = (insn & (1 << 25)) == 0;
6993 if (!is64 && ((insn & (1 << 4)) == 0)) {
6994 /* cdp */
6995 return 1;
6996 }
6997
6998 crm = insn & 0xf;
6999 if (is64) {
7000 crn = 0;
7001 opc1 = (insn >> 4) & 0xf;
7002 opc2 = 0;
7003 rt2 = (insn >> 16) & 0xf;
7004 } else {
7005 crn = (insn >> 16) & 0xf;
7006 opc1 = (insn >> 21) & 7;
7007 opc2 = (insn >> 5) & 7;
7008 rt2 = 0;
7009 }
7010 isread = (insn >> 20) & 1;
7011 rt = (insn >> 12) & 0xf;
7012
60322b39 7013 ri = get_arm_cp_reginfo(s->cp_regs,
4b6a83fb
PM
7014 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
7015 if (ri) {
7016 /* Check access permissions */
60322b39 7017 if (!cp_access_ok(s->current_pl, ri, isread)) {
4b6a83fb
PM
7018 return 1;
7019 }
7020
f59df3f2
PM
7021 if (ri->accessfn) {
7022 /* Emit code to perform further access permissions checks at
7023 * runtime; this may result in an exception.
7024 */
7025 TCGv_ptr tmpptr;
8bcbf37c
PM
7026 TCGv_i32 tcg_syn;
7027 uint32_t syndrome;
7028
7029 /* Note that since we are an implementation which takes an
7030 * exception on a trapped conditional instruction only if the
7031 * instruction passes its condition code check, we can take
7032 * advantage of the clause in the ARM ARM that allows us to set
7033 * the COND field in the instruction to 0xE in all cases.
7034 * We could fish the actual condition out of the insn (ARM)
7035 * or the condexec bits (Thumb) but it isn't necessary.
7036 */
7037 switch (cpnum) {
7038 case 14:
7039 if (is64) {
7040 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7041 isread, s->thumb);
7042 } else {
7043 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7044 rt, isread, s->thumb);
7045 }
7046 break;
7047 case 15:
7048 if (is64) {
7049 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7050 isread, s->thumb);
7051 } else {
7052 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7053 rt, isread, s->thumb);
7054 }
7055 break;
7056 default:
7057 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7058 * so this can only happen if this is an ARMv7 or earlier CPU,
7059 * in which case the syndrome information won't actually be
7060 * guest visible.
7061 */
7062 assert(!arm_feature(env, ARM_FEATURE_V8));
7063 syndrome = syn_uncategorized();
7064 break;
7065 }
7066
f59df3f2
PM
7067 gen_set_pc_im(s, s->pc);
7068 tmpptr = tcg_const_ptr(ri);
8bcbf37c
PM
7069 tcg_syn = tcg_const_i32(syndrome);
7070 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn);
f59df3f2 7071 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7072 tcg_temp_free_i32(tcg_syn);
f59df3f2
PM
7073 }
7074
4b6a83fb
PM
7075 /* Handle special cases first */
7076 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7077 case ARM_CP_NOP:
7078 return 0;
7079 case ARM_CP_WFI:
7080 if (isread) {
7081 return 1;
7082 }
eaed129d 7083 gen_set_pc_im(s, s->pc);
4b6a83fb 7084 s->is_jmp = DISAS_WFI;
2bee5105 7085 return 0;
4b6a83fb
PM
7086 default:
7087 break;
7088 }
7089
2452731c
PM
7090 if (use_icount && (ri->type & ARM_CP_IO)) {
7091 gen_io_start();
7092 }
7093
4b6a83fb
PM
7094 if (isread) {
7095 /* Read */
7096 if (is64) {
7097 TCGv_i64 tmp64;
7098 TCGv_i32 tmp;
7099 if (ri->type & ARM_CP_CONST) {
7100 tmp64 = tcg_const_i64(ri->resetvalue);
7101 } else if (ri->readfn) {
7102 TCGv_ptr tmpptr;
4b6a83fb
PM
7103 tmp64 = tcg_temp_new_i64();
7104 tmpptr = tcg_const_ptr(ri);
7105 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7106 tcg_temp_free_ptr(tmpptr);
7107 } else {
7108 tmp64 = tcg_temp_new_i64();
7109 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7110 }
7111 tmp = tcg_temp_new_i32();
7112 tcg_gen_trunc_i64_i32(tmp, tmp64);
7113 store_reg(s, rt, tmp);
7114 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 7115 tmp = tcg_temp_new_i32();
4b6a83fb 7116 tcg_gen_trunc_i64_i32(tmp, tmp64);
ed336850 7117 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7118 store_reg(s, rt2, tmp);
7119 } else {
39d5492a 7120 TCGv_i32 tmp;
4b6a83fb
PM
7121 if (ri->type & ARM_CP_CONST) {
7122 tmp = tcg_const_i32(ri->resetvalue);
7123 } else if (ri->readfn) {
7124 TCGv_ptr tmpptr;
4b6a83fb
PM
7125 tmp = tcg_temp_new_i32();
7126 tmpptr = tcg_const_ptr(ri);
7127 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7128 tcg_temp_free_ptr(tmpptr);
7129 } else {
7130 tmp = load_cpu_offset(ri->fieldoffset);
7131 }
7132 if (rt == 15) {
7133 /* Destination register of r15 for 32 bit loads sets
7134 * the condition codes from the high 4 bits of the value
7135 */
7136 gen_set_nzcv(tmp);
7137 tcg_temp_free_i32(tmp);
7138 } else {
7139 store_reg(s, rt, tmp);
7140 }
7141 }
7142 } else {
7143 /* Write */
7144 if (ri->type & ARM_CP_CONST) {
7145 /* If not forbidden by access permissions, treat as WI */
7146 return 0;
7147 }
7148
7149 if (is64) {
39d5492a 7150 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7151 TCGv_i64 tmp64 = tcg_temp_new_i64();
7152 tmplo = load_reg(s, rt);
7153 tmphi = load_reg(s, rt2);
7154 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7155 tcg_temp_free_i32(tmplo);
7156 tcg_temp_free_i32(tmphi);
7157 if (ri->writefn) {
7158 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7159 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7160 tcg_temp_free_ptr(tmpptr);
7161 } else {
7162 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7163 }
7164 tcg_temp_free_i64(tmp64);
7165 } else {
7166 if (ri->writefn) {
39d5492a 7167 TCGv_i32 tmp;
4b6a83fb 7168 TCGv_ptr tmpptr;
4b6a83fb
PM
7169 tmp = load_reg(s, rt);
7170 tmpptr = tcg_const_ptr(ri);
7171 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7172 tcg_temp_free_ptr(tmpptr);
7173 tcg_temp_free_i32(tmp);
7174 } else {
39d5492a 7175 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7176 store_cpu_offset(tmp, ri->fieldoffset);
7177 }
7178 }
2452731c
PM
7179 }
7180
7181 if (use_icount && (ri->type & ARM_CP_IO)) {
7182 /* I/O operations must end the TB here (whether read or write) */
7183 gen_io_end();
7184 gen_lookup_tb(s);
7185 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7186 /* We default to ending the TB on a coprocessor register write,
7187 * but allow this to be suppressed by the register definition
7188 * (usually only necessary to work around guest bugs).
7189 */
2452731c 7190 gen_lookup_tb(s);
4b6a83fb 7191 }
2452731c 7192
4b6a83fb
PM
7193 return 0;
7194 }
7195
626187d8
PM
7196 /* Unknown register; this might be a guest error or a QEMU
7197 * unimplemented feature.
7198 */
7199 if (is64) {
7200 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7201 "64 bit system register cp:%d opc1: %d crm:%d\n",
7202 isread ? "read" : "write", cpnum, opc1, crm);
7203 } else {
7204 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7205 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d\n",
7206 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2);
7207 }
7208
4a9a539f 7209 return 1;
9ee6e8bb
PB
7210}
7211
5e3f878a
PB
7212
7213/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7214static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7215{
39d5492a 7216 TCGv_i32 tmp;
7d1b0095 7217 tmp = tcg_temp_new_i32();
5e3f878a
PB
7218 tcg_gen_trunc_i64_i32(tmp, val);
7219 store_reg(s, rlow, tmp);
7d1b0095 7220 tmp = tcg_temp_new_i32();
5e3f878a
PB
7221 tcg_gen_shri_i64(val, val, 32);
7222 tcg_gen_trunc_i64_i32(tmp, val);
7223 store_reg(s, rhigh, tmp);
7224}
7225
7226/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7227static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7228{
a7812ae4 7229 TCGv_i64 tmp;
39d5492a 7230 TCGv_i32 tmp2;
5e3f878a 7231
36aa55dc 7232 /* Load value and extend to 64 bits. */
a7812ae4 7233 tmp = tcg_temp_new_i64();
5e3f878a
PB
7234 tmp2 = load_reg(s, rlow);
7235 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7236 tcg_temp_free_i32(tmp2);
5e3f878a 7237 tcg_gen_add_i64(val, val, tmp);
b75263d6 7238 tcg_temp_free_i64(tmp);
5e3f878a
PB
7239}
7240
7241/* load and add a 64-bit value from a register pair. */
a7812ae4 7242static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7243{
a7812ae4 7244 TCGv_i64 tmp;
39d5492a
PM
7245 TCGv_i32 tmpl;
7246 TCGv_i32 tmph;
5e3f878a
PB
7247
7248 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7249 tmpl = load_reg(s, rlow);
7250 tmph = load_reg(s, rhigh);
a7812ae4 7251 tmp = tcg_temp_new_i64();
36aa55dc 7252 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7253 tcg_temp_free_i32(tmpl);
7254 tcg_temp_free_i32(tmph);
5e3f878a 7255 tcg_gen_add_i64(val, val, tmp);
b75263d6 7256 tcg_temp_free_i64(tmp);
5e3f878a
PB
7257}
7258
c9f10124 7259/* Set N and Z flags from hi|lo. */
39d5492a 7260static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7261{
c9f10124
RH
7262 tcg_gen_mov_i32(cpu_NF, hi);
7263 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7264}
7265
426f5abc
PB
7266/* Load/Store exclusive instructions are implemented by remembering
7267 the value/address loaded, and seeing if these are the same
b90372ad 7268 when the store is performed. This should be sufficient to implement
426f5abc
PB
7269 the architecturally mandated semantics, and avoids having to monitor
7270 regular stores.
7271
7272 In system emulation mode only one CPU will be running at once, so
7273 this sequence is effectively atomic. In user emulation mode we
7274 throw an exception and handle the atomic operation elsewhere. */
7275static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7276 TCGv_i32 addr, int size)
426f5abc 7277{
94ee24e7 7278 TCGv_i32 tmp = tcg_temp_new_i32();
426f5abc
PB
7279
7280 switch (size) {
7281 case 0:
6ce2faf4 7282 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
426f5abc
PB
7283 break;
7284 case 1:
6ce2faf4 7285 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
426f5abc
PB
7286 break;
7287 case 2:
7288 case 3:
6ce2faf4 7289 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
426f5abc
PB
7290 break;
7291 default:
7292 abort();
7293 }
03d05e2d 7294
426f5abc 7295 if (size == 3) {
39d5492a 7296 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d
PM
7297 TCGv_i32 tmp3 = tcg_temp_new_i32();
7298
2c9adbda 7299 tcg_gen_addi_i32(tmp2, addr, 4);
6ce2faf4 7300 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7d1b0095 7301 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7302 tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
7303 store_reg(s, rt2, tmp3);
7304 } else {
7305 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7306 }
03d05e2d
PM
7307
7308 store_reg(s, rt, tmp);
7309 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7310}
7311
7312static void gen_clrex(DisasContext *s)
7313{
03d05e2d 7314 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7315}
7316
7317#ifdef CONFIG_USER_ONLY
7318static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7319 TCGv_i32 addr, int size)
426f5abc 7320{
03d05e2d 7321 tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
426f5abc
PB
7322 tcg_gen_movi_i32(cpu_exclusive_info,
7323 size | (rd << 4) | (rt << 8) | (rt2 << 12));
d4a2dc67 7324 gen_exception_internal_insn(s, 4, EXCP_STREX);
426f5abc
PB
7325}
7326#else
7327static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7328 TCGv_i32 addr, int size)
426f5abc 7329{
39d5492a 7330 TCGv_i32 tmp;
03d05e2d 7331 TCGv_i64 val64, extaddr;
426f5abc
PB
7332 int done_label;
7333 int fail_label;
7334
7335 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7336 [addr] = {Rt};
7337 {Rd} = 0;
7338 } else {
7339 {Rd} = 1;
7340 } */
7341 fail_label = gen_new_label();
7342 done_label = gen_new_label();
03d05e2d
PM
7343 extaddr = tcg_temp_new_i64();
7344 tcg_gen_extu_i32_i64(extaddr, addr);
7345 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7346 tcg_temp_free_i64(extaddr);
7347
94ee24e7 7348 tmp = tcg_temp_new_i32();
426f5abc
PB
7349 switch (size) {
7350 case 0:
6ce2faf4 7351 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
426f5abc
PB
7352 break;
7353 case 1:
6ce2faf4 7354 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
426f5abc
PB
7355 break;
7356 case 2:
7357 case 3:
6ce2faf4 7358 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
426f5abc
PB
7359 break;
7360 default:
7361 abort();
7362 }
03d05e2d
PM
7363
7364 val64 = tcg_temp_new_i64();
426f5abc 7365 if (size == 3) {
39d5492a 7366 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d 7367 TCGv_i32 tmp3 = tcg_temp_new_i32();
426f5abc 7368 tcg_gen_addi_i32(tmp2, addr, 4);
6ce2faf4 7369 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7d1b0095 7370 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7371 tcg_gen_concat_i32_i64(val64, tmp, tmp3);
7372 tcg_temp_free_i32(tmp3);
7373 } else {
7374 tcg_gen_extu_i32_i64(val64, tmp);
426f5abc 7375 }
03d05e2d
PM
7376 tcg_temp_free_i32(tmp);
7377
7378 tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
7379 tcg_temp_free_i64(val64);
7380
426f5abc
PB
7381 tmp = load_reg(s, rt);
7382 switch (size) {
7383 case 0:
6ce2faf4 7384 gen_aa32_st8(tmp, addr, get_mem_index(s));
426f5abc
PB
7385 break;
7386 case 1:
6ce2faf4 7387 gen_aa32_st16(tmp, addr, get_mem_index(s));
426f5abc
PB
7388 break;
7389 case 2:
7390 case 3:
6ce2faf4 7391 gen_aa32_st32(tmp, addr, get_mem_index(s));
426f5abc
PB
7392 break;
7393 default:
7394 abort();
7395 }
94ee24e7 7396 tcg_temp_free_i32(tmp);
426f5abc
PB
7397 if (size == 3) {
7398 tcg_gen_addi_i32(addr, addr, 4);
7399 tmp = load_reg(s, rt2);
6ce2faf4 7400 gen_aa32_st32(tmp, addr, get_mem_index(s));
94ee24e7 7401 tcg_temp_free_i32(tmp);
426f5abc
PB
7402 }
7403 tcg_gen_movi_i32(cpu_R[rd], 0);
7404 tcg_gen_br(done_label);
7405 gen_set_label(fail_label);
7406 tcg_gen_movi_i32(cpu_R[rd], 1);
7407 gen_set_label(done_label);
03d05e2d 7408 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7409}
7410#endif
7411
81465888
PM
7412/* gen_srs:
7413 * @env: CPUARMState
7414 * @s: DisasContext
7415 * @mode: mode field from insn (which stack to store to)
7416 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7417 * @writeback: true if writeback bit set
7418 *
7419 * Generate code for the SRS (Store Return State) insn.
7420 */
7421static void gen_srs(DisasContext *s,
7422 uint32_t mode, uint32_t amode, bool writeback)
7423{
7424 int32_t offset;
7425 TCGv_i32 addr = tcg_temp_new_i32();
7426 TCGv_i32 tmp = tcg_const_i32(mode);
7427 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7428 tcg_temp_free_i32(tmp);
7429 switch (amode) {
7430 case 0: /* DA */
7431 offset = -4;
7432 break;
7433 case 1: /* IA */
7434 offset = 0;
7435 break;
7436 case 2: /* DB */
7437 offset = -8;
7438 break;
7439 case 3: /* IB */
7440 offset = 4;
7441 break;
7442 default:
7443 abort();
7444 }
7445 tcg_gen_addi_i32(addr, addr, offset);
7446 tmp = load_reg(s, 14);
c1197795 7447 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 7448 tcg_temp_free_i32(tmp);
81465888
PM
7449 tmp = load_cpu_field(spsr);
7450 tcg_gen_addi_i32(addr, addr, 4);
c1197795 7451 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 7452 tcg_temp_free_i32(tmp);
81465888
PM
7453 if (writeback) {
7454 switch (amode) {
7455 case 0:
7456 offset = -8;
7457 break;
7458 case 1:
7459 offset = 4;
7460 break;
7461 case 2:
7462 offset = -4;
7463 break;
7464 case 3:
7465 offset = 0;
7466 break;
7467 default:
7468 abort();
7469 }
7470 tcg_gen_addi_i32(addr, addr, offset);
7471 tmp = tcg_const_i32(mode);
7472 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7473 tcg_temp_free_i32(tmp);
7474 }
7475 tcg_temp_free_i32(addr);
7476}
7477
0ecb72a5 7478static void disas_arm_insn(CPUARMState * env, DisasContext *s)
9ee6e8bb
PB
7479{
7480 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
7481 TCGv_i32 tmp;
7482 TCGv_i32 tmp2;
7483 TCGv_i32 tmp3;
7484 TCGv_i32 addr;
a7812ae4 7485 TCGv_i64 tmp64;
9ee6e8bb 7486
d31dd73e 7487 insn = arm_ldl_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
7488 s->pc += 4;
7489
7490 /* M variants do not implement ARM mode. */
7491 if (IS_M(env))
7492 goto illegal_op;
7493 cond = insn >> 28;
7494 if (cond == 0xf){
be5e7a76
DES
7495 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7496 * choose to UNDEF. In ARMv5 and above the space is used
7497 * for miscellaneous unconditional instructions.
7498 */
7499 ARCH(5);
7500
9ee6e8bb
PB
7501 /* Unconditional instructions. */
7502 if (((insn >> 25) & 7) == 1) {
7503 /* NEON Data processing. */
7504 if (!arm_feature(env, ARM_FEATURE_NEON))
7505 goto illegal_op;
7506
7507 if (disas_neon_data_insn(env, s, insn))
7508 goto illegal_op;
7509 return;
7510 }
7511 if ((insn & 0x0f100000) == 0x04000000) {
7512 /* NEON load/store. */
7513 if (!arm_feature(env, ARM_FEATURE_NEON))
7514 goto illegal_op;
7515
7516 if (disas_neon_ls_insn(env, s, insn))
7517 goto illegal_op;
7518 return;
7519 }
6a57f3eb
WN
7520 if ((insn & 0x0f000e10) == 0x0e000a00) {
7521 /* VFP. */
7522 if (disas_vfp_insn(env, s, insn)) {
7523 goto illegal_op;
7524 }
7525 return;
7526 }
3d185e5d
PM
7527 if (((insn & 0x0f30f000) == 0x0510f000) ||
7528 ((insn & 0x0f30f010) == 0x0710f000)) {
7529 if ((insn & (1 << 22)) == 0) {
7530 /* PLDW; v7MP */
7531 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
7532 goto illegal_op;
7533 }
7534 }
7535 /* Otherwise PLD; v5TE+ */
be5e7a76 7536 ARCH(5TE);
3d185e5d
PM
7537 return;
7538 }
7539 if (((insn & 0x0f70f000) == 0x0450f000) ||
7540 ((insn & 0x0f70f010) == 0x0650f000)) {
7541 ARCH(7);
7542 return; /* PLI; V7 */
7543 }
7544 if (((insn & 0x0f700000) == 0x04100000) ||
7545 ((insn & 0x0f700010) == 0x06100000)) {
7546 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
7547 goto illegal_op;
7548 }
7549 return; /* v7MP: Unallocated memory hint: must NOP */
7550 }
7551
7552 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
7553 ARCH(6);
7554 /* setend */
10962fd5
PM
7555 if (((insn >> 9) & 1) != s->bswap_code) {
7556 /* Dynamic endianness switching not implemented. */
e0c270d9 7557 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
9ee6e8bb
PB
7558 goto illegal_op;
7559 }
7560 return;
7561 } else if ((insn & 0x0fffff00) == 0x057ff000) {
7562 switch ((insn >> 4) & 0xf) {
7563 case 1: /* clrex */
7564 ARCH(6K);
426f5abc 7565 gen_clrex(s);
9ee6e8bb
PB
7566 return;
7567 case 4: /* dsb */
7568 case 5: /* dmb */
7569 case 6: /* isb */
7570 ARCH(7);
7571 /* We don't emulate caches so these are a no-op. */
7572 return;
7573 default:
7574 goto illegal_op;
7575 }
7576 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
7577 /* srs */
81465888 7578 if (IS_USER(s)) {
9ee6e8bb 7579 goto illegal_op;
9ee6e8bb 7580 }
81465888
PM
7581 ARCH(6);
7582 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 7583 return;
ea825eee 7584 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 7585 /* rfe */
c67b6b71 7586 int32_t offset;
9ee6e8bb
PB
7587 if (IS_USER(s))
7588 goto illegal_op;
7589 ARCH(6);
7590 rn = (insn >> 16) & 0xf;
b0109805 7591 addr = load_reg(s, rn);
9ee6e8bb
PB
7592 i = (insn >> 23) & 3;
7593 switch (i) {
b0109805 7594 case 0: offset = -4; break; /* DA */
c67b6b71
FN
7595 case 1: offset = 0; break; /* IA */
7596 case 2: offset = -8; break; /* DB */
b0109805 7597 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
7598 default: abort();
7599 }
7600 if (offset)
b0109805
PB
7601 tcg_gen_addi_i32(addr, addr, offset);
7602 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 7603 tmp = tcg_temp_new_i32();
6ce2faf4 7604 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 7605 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 7606 tmp2 = tcg_temp_new_i32();
6ce2faf4 7607 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
7608 if (insn & (1 << 21)) {
7609 /* Base writeback. */
7610 switch (i) {
b0109805 7611 case 0: offset = -8; break;
c67b6b71
FN
7612 case 1: offset = 4; break;
7613 case 2: offset = -4; break;
b0109805 7614 case 3: offset = 0; break;
9ee6e8bb
PB
7615 default: abort();
7616 }
7617 if (offset)
b0109805
PB
7618 tcg_gen_addi_i32(addr, addr, offset);
7619 store_reg(s, rn, addr);
7620 } else {
7d1b0095 7621 tcg_temp_free_i32(addr);
9ee6e8bb 7622 }
b0109805 7623 gen_rfe(s, tmp, tmp2);
c67b6b71 7624 return;
9ee6e8bb
PB
7625 } else if ((insn & 0x0e000000) == 0x0a000000) {
7626 /* branch link and change to thumb (blx <offset>) */
7627 int32_t offset;
7628
7629 val = (uint32_t)s->pc;
7d1b0095 7630 tmp = tcg_temp_new_i32();
d9ba4830
PB
7631 tcg_gen_movi_i32(tmp, val);
7632 store_reg(s, 14, tmp);
9ee6e8bb
PB
7633 /* Sign-extend the 24-bit offset */
7634 offset = (((int32_t)insn) << 8) >> 8;
7635 /* offset * 4 + bit24 * 2 + (thumb bit) */
7636 val += (offset << 2) | ((insn >> 23) & 2) | 1;
7637 /* pipeline offset */
7638 val += 4;
be5e7a76 7639 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 7640 gen_bx_im(s, val);
9ee6e8bb
PB
7641 return;
7642 } else if ((insn & 0x0e000f00) == 0x0c000100) {
7643 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
7644 /* iWMMXt register transfer. */
7645 if (env->cp15.c15_cpar & (1 << 1))
7646 if (!disas_iwmmxt_insn(env, s, insn))
7647 return;
7648 }
7649 } else if ((insn & 0x0fe00000) == 0x0c400000) {
7650 /* Coprocessor double register transfer. */
be5e7a76 7651 ARCH(5TE);
9ee6e8bb
PB
7652 } else if ((insn & 0x0f000010) == 0x0e000010) {
7653 /* Additional coprocessor register transfer. */
7997d92f 7654 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
7655 uint32_t mask;
7656 uint32_t val;
7657 /* cps (privileged) */
7658 if (IS_USER(s))
7659 return;
7660 mask = val = 0;
7661 if (insn & (1 << 19)) {
7662 if (insn & (1 << 8))
7663 mask |= CPSR_A;
7664 if (insn & (1 << 7))
7665 mask |= CPSR_I;
7666 if (insn & (1 << 6))
7667 mask |= CPSR_F;
7668 if (insn & (1 << 18))
7669 val |= mask;
7670 }
7997d92f 7671 if (insn & (1 << 17)) {
9ee6e8bb
PB
7672 mask |= CPSR_M;
7673 val |= (insn & 0x1f);
7674 }
7675 if (mask) {
2fbac54b 7676 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
7677 }
7678 return;
7679 }
7680 goto illegal_op;
7681 }
7682 if (cond != 0xe) {
7683 /* if not always execute, we generate a conditional jump to
7684 next instruction */
7685 s->condlabel = gen_new_label();
39fb730a 7686 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
7687 s->condjmp = 1;
7688 }
7689 if ((insn & 0x0f900000) == 0x03000000) {
7690 if ((insn & (1 << 21)) == 0) {
7691 ARCH(6T2);
7692 rd = (insn >> 12) & 0xf;
7693 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7694 if ((insn & (1 << 22)) == 0) {
7695 /* MOVW */
7d1b0095 7696 tmp = tcg_temp_new_i32();
5e3f878a 7697 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
7698 } else {
7699 /* MOVT */
5e3f878a 7700 tmp = load_reg(s, rd);
86831435 7701 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7702 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 7703 }
5e3f878a 7704 store_reg(s, rd, tmp);
9ee6e8bb
PB
7705 } else {
7706 if (((insn >> 12) & 0xf) != 0xf)
7707 goto illegal_op;
7708 if (((insn >> 16) & 0xf) == 0) {
7709 gen_nop_hint(s, insn & 0xff);
7710 } else {
7711 /* CPSR = immediate */
7712 val = insn & 0xff;
7713 shift = ((insn >> 8) & 0xf) * 2;
7714 if (shift)
7715 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 7716 i = ((insn & (1 << 22)) != 0);
2fbac54b 7717 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
7718 goto illegal_op;
7719 }
7720 }
7721 } else if ((insn & 0x0f900000) == 0x01000000
7722 && (insn & 0x00000090) != 0x00000090) {
7723 /* miscellaneous instructions */
7724 op1 = (insn >> 21) & 3;
7725 sh = (insn >> 4) & 0xf;
7726 rm = insn & 0xf;
7727 switch (sh) {
7728 case 0x0: /* move program status register */
7729 if (op1 & 1) {
7730 /* PSR = reg */
2fbac54b 7731 tmp = load_reg(s, rm);
9ee6e8bb 7732 i = ((op1 & 2) != 0);
2fbac54b 7733 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
7734 goto illegal_op;
7735 } else {
7736 /* reg = PSR */
7737 rd = (insn >> 12) & 0xf;
7738 if (op1 & 2) {
7739 if (IS_USER(s))
7740 goto illegal_op;
d9ba4830 7741 tmp = load_cpu_field(spsr);
9ee6e8bb 7742 } else {
7d1b0095 7743 tmp = tcg_temp_new_i32();
9ef39277 7744 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 7745 }
d9ba4830 7746 store_reg(s, rd, tmp);
9ee6e8bb
PB
7747 }
7748 break;
7749 case 0x1:
7750 if (op1 == 1) {
7751 /* branch/exchange thumb (bx). */
be5e7a76 7752 ARCH(4T);
d9ba4830
PB
7753 tmp = load_reg(s, rm);
7754 gen_bx(s, tmp);
9ee6e8bb
PB
7755 } else if (op1 == 3) {
7756 /* clz */
be5e7a76 7757 ARCH(5);
9ee6e8bb 7758 rd = (insn >> 12) & 0xf;
1497c961
PB
7759 tmp = load_reg(s, rm);
7760 gen_helper_clz(tmp, tmp);
7761 store_reg(s, rd, tmp);
9ee6e8bb
PB
7762 } else {
7763 goto illegal_op;
7764 }
7765 break;
7766 case 0x2:
7767 if (op1 == 1) {
7768 ARCH(5J); /* bxj */
7769 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7770 tmp = load_reg(s, rm);
7771 gen_bx(s, tmp);
9ee6e8bb
PB
7772 } else {
7773 goto illegal_op;
7774 }
7775 break;
7776 case 0x3:
7777 if (op1 != 1)
7778 goto illegal_op;
7779
be5e7a76 7780 ARCH(5);
9ee6e8bb 7781 /* branch link/exchange thumb (blx) */
d9ba4830 7782 tmp = load_reg(s, rm);
7d1b0095 7783 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
7784 tcg_gen_movi_i32(tmp2, s->pc);
7785 store_reg(s, 14, tmp2);
7786 gen_bx(s, tmp);
9ee6e8bb 7787 break;
eb0ecd5a
WN
7788 case 0x4:
7789 {
7790 /* crc32/crc32c */
7791 uint32_t c = extract32(insn, 8, 4);
7792
7793 /* Check this CPU supports ARMv8 CRC instructions.
7794 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
7795 * Bits 8, 10 and 11 should be zero.
7796 */
7797 if (!arm_feature(env, ARM_FEATURE_CRC) || op1 == 0x3 ||
7798 (c & 0xd) != 0) {
7799 goto illegal_op;
7800 }
7801
7802 rn = extract32(insn, 16, 4);
7803 rd = extract32(insn, 12, 4);
7804
7805 tmp = load_reg(s, rn);
7806 tmp2 = load_reg(s, rm);
aa633469
PM
7807 if (op1 == 0) {
7808 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
7809 } else if (op1 == 1) {
7810 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
7811 }
eb0ecd5a
WN
7812 tmp3 = tcg_const_i32(1 << op1);
7813 if (c & 0x2) {
7814 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
7815 } else {
7816 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
7817 }
7818 tcg_temp_free_i32(tmp2);
7819 tcg_temp_free_i32(tmp3);
7820 store_reg(s, rd, tmp);
7821 break;
7822 }
9ee6e8bb 7823 case 0x5: /* saturating add/subtract */
be5e7a76 7824 ARCH(5TE);
9ee6e8bb
PB
7825 rd = (insn >> 12) & 0xf;
7826 rn = (insn >> 16) & 0xf;
b40d0353 7827 tmp = load_reg(s, rm);
5e3f878a 7828 tmp2 = load_reg(s, rn);
9ee6e8bb 7829 if (op1 & 2)
9ef39277 7830 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 7831 if (op1 & 1)
9ef39277 7832 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 7833 else
9ef39277 7834 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 7835 tcg_temp_free_i32(tmp2);
5e3f878a 7836 store_reg(s, rd, tmp);
9ee6e8bb 7837 break;
49e14940 7838 case 7:
d4a2dc67
PM
7839 {
7840 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
49e14940
AL
7841 /* SMC instruction (op1 == 3)
7842 and undefined instructions (op1 == 0 || op1 == 2)
7843 will trap */
7844 if (op1 != 1) {
7845 goto illegal_op;
7846 }
7847 /* bkpt */
be5e7a76 7848 ARCH(5);
d4a2dc67 7849 gen_exception_insn(s, 4, EXCP_BKPT, syn_aa32_bkpt(imm16, false));
9ee6e8bb 7850 break;
d4a2dc67 7851 }
9ee6e8bb
PB
7852 case 0x8: /* signed multiply */
7853 case 0xa:
7854 case 0xc:
7855 case 0xe:
be5e7a76 7856 ARCH(5TE);
9ee6e8bb
PB
7857 rs = (insn >> 8) & 0xf;
7858 rn = (insn >> 12) & 0xf;
7859 rd = (insn >> 16) & 0xf;
7860 if (op1 == 1) {
7861 /* (32 * 16) >> 16 */
5e3f878a
PB
7862 tmp = load_reg(s, rm);
7863 tmp2 = load_reg(s, rs);
9ee6e8bb 7864 if (sh & 4)
5e3f878a 7865 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7866 else
5e3f878a 7867 gen_sxth(tmp2);
a7812ae4
PB
7868 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7869 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 7870 tmp = tcg_temp_new_i32();
a7812ae4 7871 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7872 tcg_temp_free_i64(tmp64);
9ee6e8bb 7873 if ((sh & 2) == 0) {
5e3f878a 7874 tmp2 = load_reg(s, rn);
9ef39277 7875 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7876 tcg_temp_free_i32(tmp2);
9ee6e8bb 7877 }
5e3f878a 7878 store_reg(s, rd, tmp);
9ee6e8bb
PB
7879 } else {
7880 /* 16 * 16 */
5e3f878a
PB
7881 tmp = load_reg(s, rm);
7882 tmp2 = load_reg(s, rs);
7883 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 7884 tcg_temp_free_i32(tmp2);
9ee6e8bb 7885 if (op1 == 2) {
a7812ae4
PB
7886 tmp64 = tcg_temp_new_i64();
7887 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7888 tcg_temp_free_i32(tmp);
a7812ae4
PB
7889 gen_addq(s, tmp64, rn, rd);
7890 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7891 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7892 } else {
7893 if (op1 == 0) {
5e3f878a 7894 tmp2 = load_reg(s, rn);
9ef39277 7895 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7896 tcg_temp_free_i32(tmp2);
9ee6e8bb 7897 }
5e3f878a 7898 store_reg(s, rd, tmp);
9ee6e8bb
PB
7899 }
7900 }
7901 break;
7902 default:
7903 goto illegal_op;
7904 }
7905 } else if (((insn & 0x0e000000) == 0 &&
7906 (insn & 0x00000090) != 0x90) ||
7907 ((insn & 0x0e000000) == (1 << 25))) {
7908 int set_cc, logic_cc, shiftop;
7909
7910 op1 = (insn >> 21) & 0xf;
7911 set_cc = (insn >> 20) & 1;
7912 logic_cc = table_logic_cc[op1] & set_cc;
7913
7914 /* data processing instruction */
7915 if (insn & (1 << 25)) {
7916 /* immediate operand */
7917 val = insn & 0xff;
7918 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 7919 if (shift) {
9ee6e8bb 7920 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 7921 }
7d1b0095 7922 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
7923 tcg_gen_movi_i32(tmp2, val);
7924 if (logic_cc && shift) {
7925 gen_set_CF_bit31(tmp2);
7926 }
9ee6e8bb
PB
7927 } else {
7928 /* register */
7929 rm = (insn) & 0xf;
e9bb4aa9 7930 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7931 shiftop = (insn >> 5) & 3;
7932 if (!(insn & (1 << 4))) {
7933 shift = (insn >> 7) & 0x1f;
e9bb4aa9 7934 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
7935 } else {
7936 rs = (insn >> 8) & 0xf;
8984bd2e 7937 tmp = load_reg(s, rs);
e9bb4aa9 7938 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
7939 }
7940 }
7941 if (op1 != 0x0f && op1 != 0x0d) {
7942 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
7943 tmp = load_reg(s, rn);
7944 } else {
39d5492a 7945 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
7946 }
7947 rd = (insn >> 12) & 0xf;
7948 switch(op1) {
7949 case 0x00:
e9bb4aa9
JR
7950 tcg_gen_and_i32(tmp, tmp, tmp2);
7951 if (logic_cc) {
7952 gen_logic_CC(tmp);
7953 }
21aeb343 7954 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7955 break;
7956 case 0x01:
e9bb4aa9
JR
7957 tcg_gen_xor_i32(tmp, tmp, tmp2);
7958 if (logic_cc) {
7959 gen_logic_CC(tmp);
7960 }
21aeb343 7961 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7962 break;
7963 case 0x02:
7964 if (set_cc && rd == 15) {
7965 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 7966 if (IS_USER(s)) {
9ee6e8bb 7967 goto illegal_op;
e9bb4aa9 7968 }
72485ec4 7969 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 7970 gen_exception_return(s, tmp);
9ee6e8bb 7971 } else {
e9bb4aa9 7972 if (set_cc) {
72485ec4 7973 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7974 } else {
7975 tcg_gen_sub_i32(tmp, tmp, tmp2);
7976 }
21aeb343 7977 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7978 }
7979 break;
7980 case 0x03:
e9bb4aa9 7981 if (set_cc) {
72485ec4 7982 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7983 } else {
7984 tcg_gen_sub_i32(tmp, tmp2, tmp);
7985 }
21aeb343 7986 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7987 break;
7988 case 0x04:
e9bb4aa9 7989 if (set_cc) {
72485ec4 7990 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7991 } else {
7992 tcg_gen_add_i32(tmp, tmp, tmp2);
7993 }
21aeb343 7994 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7995 break;
7996 case 0x05:
e9bb4aa9 7997 if (set_cc) {
49b4c31e 7998 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7999 } else {
8000 gen_add_carry(tmp, tmp, tmp2);
8001 }
21aeb343 8002 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8003 break;
8004 case 0x06:
e9bb4aa9 8005 if (set_cc) {
2de68a49 8006 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8007 } else {
8008 gen_sub_carry(tmp, tmp, tmp2);
8009 }
21aeb343 8010 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8011 break;
8012 case 0x07:
e9bb4aa9 8013 if (set_cc) {
2de68a49 8014 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8015 } else {
8016 gen_sub_carry(tmp, tmp2, tmp);
8017 }
21aeb343 8018 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8019 break;
8020 case 0x08:
8021 if (set_cc) {
e9bb4aa9
JR
8022 tcg_gen_and_i32(tmp, tmp, tmp2);
8023 gen_logic_CC(tmp);
9ee6e8bb 8024 }
7d1b0095 8025 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8026 break;
8027 case 0x09:
8028 if (set_cc) {
e9bb4aa9
JR
8029 tcg_gen_xor_i32(tmp, tmp, tmp2);
8030 gen_logic_CC(tmp);
9ee6e8bb 8031 }
7d1b0095 8032 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8033 break;
8034 case 0x0a:
8035 if (set_cc) {
72485ec4 8036 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 8037 }
7d1b0095 8038 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8039 break;
8040 case 0x0b:
8041 if (set_cc) {
72485ec4 8042 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 8043 }
7d1b0095 8044 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8045 break;
8046 case 0x0c:
e9bb4aa9
JR
8047 tcg_gen_or_i32(tmp, tmp, tmp2);
8048 if (logic_cc) {
8049 gen_logic_CC(tmp);
8050 }
21aeb343 8051 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8052 break;
8053 case 0x0d:
8054 if (logic_cc && rd == 15) {
8055 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 8056 if (IS_USER(s)) {
9ee6e8bb 8057 goto illegal_op;
e9bb4aa9
JR
8058 }
8059 gen_exception_return(s, tmp2);
9ee6e8bb 8060 } else {
e9bb4aa9
JR
8061 if (logic_cc) {
8062 gen_logic_CC(tmp2);
8063 }
21aeb343 8064 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
8065 }
8066 break;
8067 case 0x0e:
f669df27 8068 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
8069 if (logic_cc) {
8070 gen_logic_CC(tmp);
8071 }
21aeb343 8072 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8073 break;
8074 default:
8075 case 0x0f:
e9bb4aa9
JR
8076 tcg_gen_not_i32(tmp2, tmp2);
8077 if (logic_cc) {
8078 gen_logic_CC(tmp2);
8079 }
21aeb343 8080 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
8081 break;
8082 }
e9bb4aa9 8083 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 8084 tcg_temp_free_i32(tmp2);
e9bb4aa9 8085 }
9ee6e8bb
PB
8086 } else {
8087 /* other instructions */
8088 op1 = (insn >> 24) & 0xf;
8089 switch(op1) {
8090 case 0x0:
8091 case 0x1:
8092 /* multiplies, extra load/stores */
8093 sh = (insn >> 5) & 3;
8094 if (sh == 0) {
8095 if (op1 == 0x0) {
8096 rd = (insn >> 16) & 0xf;
8097 rn = (insn >> 12) & 0xf;
8098 rs = (insn >> 8) & 0xf;
8099 rm = (insn) & 0xf;
8100 op1 = (insn >> 20) & 0xf;
8101 switch (op1) {
8102 case 0: case 1: case 2: case 3: case 6:
8103 /* 32 bit mul */
5e3f878a
PB
8104 tmp = load_reg(s, rs);
8105 tmp2 = load_reg(s, rm);
8106 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8107 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8108 if (insn & (1 << 22)) {
8109 /* Subtract (mls) */
8110 ARCH(6T2);
5e3f878a
PB
8111 tmp2 = load_reg(s, rn);
8112 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 8113 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8114 } else if (insn & (1 << 21)) {
8115 /* Add */
5e3f878a
PB
8116 tmp2 = load_reg(s, rn);
8117 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8118 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8119 }
8120 if (insn & (1 << 20))
5e3f878a
PB
8121 gen_logic_CC(tmp);
8122 store_reg(s, rd, tmp);
9ee6e8bb 8123 break;
8aac08b1
AJ
8124 case 4:
8125 /* 64 bit mul double accumulate (UMAAL) */
8126 ARCH(6);
8127 tmp = load_reg(s, rs);
8128 tmp2 = load_reg(s, rm);
8129 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8130 gen_addq_lo(s, tmp64, rn);
8131 gen_addq_lo(s, tmp64, rd);
8132 gen_storeq_reg(s, rn, rd, tmp64);
8133 tcg_temp_free_i64(tmp64);
8134 break;
8135 case 8: case 9: case 10: case 11:
8136 case 12: case 13: case 14: case 15:
8137 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
8138 tmp = load_reg(s, rs);
8139 tmp2 = load_reg(s, rm);
8aac08b1 8140 if (insn & (1 << 22)) {
c9f10124 8141 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 8142 } else {
c9f10124 8143 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
8144 }
8145 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
8146 TCGv_i32 al = load_reg(s, rn);
8147 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8148 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8149 tcg_temp_free_i32(al);
8150 tcg_temp_free_i32(ah);
9ee6e8bb 8151 }
8aac08b1 8152 if (insn & (1 << 20)) {
c9f10124 8153 gen_logicq_cc(tmp, tmp2);
8aac08b1 8154 }
c9f10124
RH
8155 store_reg(s, rn, tmp);
8156 store_reg(s, rd, tmp2);
9ee6e8bb 8157 break;
8aac08b1
AJ
8158 default:
8159 goto illegal_op;
9ee6e8bb
PB
8160 }
8161 } else {
8162 rn = (insn >> 16) & 0xf;
8163 rd = (insn >> 12) & 0xf;
8164 if (insn & (1 << 23)) {
8165 /* load/store exclusive */
2359bf80 8166 int op2 = (insn >> 8) & 3;
86753403 8167 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8168
8169 switch (op2) {
8170 case 0: /* lda/stl */
8171 if (op1 == 1) {
8172 goto illegal_op;
8173 }
8174 ARCH(8);
8175 break;
8176 case 1: /* reserved */
8177 goto illegal_op;
8178 case 2: /* ldaex/stlex */
8179 ARCH(8);
8180 break;
8181 case 3: /* ldrex/strex */
8182 if (op1) {
8183 ARCH(6K);
8184 } else {
8185 ARCH(6);
8186 }
8187 break;
8188 }
8189
3174f8e9 8190 addr = tcg_temp_local_new_i32();
98a46317 8191 load_reg_var(s, addr, rn);
2359bf80
MR
8192
8193 /* Since the emulation does not have barriers,
8194 the acquire/release semantics need no special
8195 handling */
8196 if (op2 == 0) {
8197 if (insn & (1 << 20)) {
8198 tmp = tcg_temp_new_i32();
8199 switch (op1) {
8200 case 0: /* lda */
6ce2faf4 8201 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
2359bf80
MR
8202 break;
8203 case 2: /* ldab */
6ce2faf4 8204 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
2359bf80
MR
8205 break;
8206 case 3: /* ldah */
6ce2faf4 8207 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
2359bf80
MR
8208 break;
8209 default:
8210 abort();
8211 }
8212 store_reg(s, rd, tmp);
8213 } else {
8214 rm = insn & 0xf;
8215 tmp = load_reg(s, rm);
8216 switch (op1) {
8217 case 0: /* stl */
6ce2faf4 8218 gen_aa32_st32(tmp, addr, get_mem_index(s));
2359bf80
MR
8219 break;
8220 case 2: /* stlb */
6ce2faf4 8221 gen_aa32_st8(tmp, addr, get_mem_index(s));
2359bf80
MR
8222 break;
8223 case 3: /* stlh */
6ce2faf4 8224 gen_aa32_st16(tmp, addr, get_mem_index(s));
2359bf80
MR
8225 break;
8226 default:
8227 abort();
8228 }
8229 tcg_temp_free_i32(tmp);
8230 }
8231 } else if (insn & (1 << 20)) {
86753403
PB
8232 switch (op1) {
8233 case 0: /* ldrex */
426f5abc 8234 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8235 break;
8236 case 1: /* ldrexd */
426f5abc 8237 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8238 break;
8239 case 2: /* ldrexb */
426f5abc 8240 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8241 break;
8242 case 3: /* ldrexh */
426f5abc 8243 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8244 break;
8245 default:
8246 abort();
8247 }
9ee6e8bb
PB
8248 } else {
8249 rm = insn & 0xf;
86753403
PB
8250 switch (op1) {
8251 case 0: /* strex */
426f5abc 8252 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8253 break;
8254 case 1: /* strexd */
502e64fe 8255 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
8256 break;
8257 case 2: /* strexb */
426f5abc 8258 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
8259 break;
8260 case 3: /* strexh */
426f5abc 8261 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
8262 break;
8263 default:
8264 abort();
8265 }
9ee6e8bb 8266 }
39d5492a 8267 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8268 } else {
8269 /* SWP instruction */
8270 rm = (insn) & 0xf;
8271
8984bd2e
PB
8272 /* ??? This is not really atomic. However we know
8273 we never have multiple CPUs running in parallel,
8274 so it is good enough. */
8275 addr = load_reg(s, rn);
8276 tmp = load_reg(s, rm);
5a839c0d 8277 tmp2 = tcg_temp_new_i32();
9ee6e8bb 8278 if (insn & (1 << 22)) {
6ce2faf4
EI
8279 gen_aa32_ld8u(tmp2, addr, get_mem_index(s));
8280 gen_aa32_st8(tmp, addr, get_mem_index(s));
9ee6e8bb 8281 } else {
6ce2faf4
EI
8282 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
8283 gen_aa32_st32(tmp, addr, get_mem_index(s));
9ee6e8bb 8284 }
5a839c0d 8285 tcg_temp_free_i32(tmp);
7d1b0095 8286 tcg_temp_free_i32(addr);
8984bd2e 8287 store_reg(s, rd, tmp2);
9ee6e8bb
PB
8288 }
8289 }
8290 } else {
8291 int address_offset;
8292 int load;
8293 /* Misc load/store */
8294 rn = (insn >> 16) & 0xf;
8295 rd = (insn >> 12) & 0xf;
b0109805 8296 addr = load_reg(s, rn);
9ee6e8bb 8297 if (insn & (1 << 24))
b0109805 8298 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
8299 address_offset = 0;
8300 if (insn & (1 << 20)) {
8301 /* load */
5a839c0d 8302 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
8303 switch(sh) {
8304 case 1:
6ce2faf4 8305 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
8306 break;
8307 case 2:
6ce2faf4 8308 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
8309 break;
8310 default:
8311 case 3:
6ce2faf4 8312 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
8313 break;
8314 }
8315 load = 1;
8316 } else if (sh & 2) {
be5e7a76 8317 ARCH(5TE);
9ee6e8bb
PB
8318 /* doubleword */
8319 if (sh & 1) {
8320 /* store */
b0109805 8321 tmp = load_reg(s, rd);
6ce2faf4 8322 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 8323 tcg_temp_free_i32(tmp);
b0109805
PB
8324 tcg_gen_addi_i32(addr, addr, 4);
8325 tmp = load_reg(s, rd + 1);
6ce2faf4 8326 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 8327 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8328 load = 0;
8329 } else {
8330 /* load */
5a839c0d 8331 tmp = tcg_temp_new_i32();
6ce2faf4 8332 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805
PB
8333 store_reg(s, rd, tmp);
8334 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8335 tmp = tcg_temp_new_i32();
6ce2faf4 8336 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
8337 rd++;
8338 load = 1;
8339 }
8340 address_offset = -4;
8341 } else {
8342 /* store */
b0109805 8343 tmp = load_reg(s, rd);
6ce2faf4 8344 gen_aa32_st16(tmp, addr, get_mem_index(s));
5a839c0d 8345 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8346 load = 0;
8347 }
8348 /* Perform base writeback before the loaded value to
8349 ensure correct behavior with overlapping index registers.
8350 ldrd with base writeback is is undefined if the
8351 destination and index registers overlap. */
8352 if (!(insn & (1 << 24))) {
b0109805
PB
8353 gen_add_datah_offset(s, insn, address_offset, addr);
8354 store_reg(s, rn, addr);
9ee6e8bb
PB
8355 } else if (insn & (1 << 21)) {
8356 if (address_offset)
b0109805
PB
8357 tcg_gen_addi_i32(addr, addr, address_offset);
8358 store_reg(s, rn, addr);
8359 } else {
7d1b0095 8360 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8361 }
8362 if (load) {
8363 /* Complete the load. */
b0109805 8364 store_reg(s, rd, tmp);
9ee6e8bb
PB
8365 }
8366 }
8367 break;
8368 case 0x4:
8369 case 0x5:
8370 goto do_ldst;
8371 case 0x6:
8372 case 0x7:
8373 if (insn & (1 << 4)) {
8374 ARCH(6);
8375 /* Armv6 Media instructions. */
8376 rm = insn & 0xf;
8377 rn = (insn >> 16) & 0xf;
2c0262af 8378 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
8379 rs = (insn >> 8) & 0xf;
8380 switch ((insn >> 23) & 3) {
8381 case 0: /* Parallel add/subtract. */
8382 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
8383 tmp = load_reg(s, rn);
8384 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8385 sh = (insn >> 5) & 7;
8386 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8387 goto illegal_op;
6ddbc6e4 8388 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 8389 tcg_temp_free_i32(tmp2);
6ddbc6e4 8390 store_reg(s, rd, tmp);
9ee6e8bb
PB
8391 break;
8392 case 1:
8393 if ((insn & 0x00700020) == 0) {
6c95676b 8394 /* Halfword pack. */
3670669c
PB
8395 tmp = load_reg(s, rn);
8396 tmp2 = load_reg(s, rm);
9ee6e8bb 8397 shift = (insn >> 7) & 0x1f;
3670669c
PB
8398 if (insn & (1 << 6)) {
8399 /* pkhtb */
22478e79
AZ
8400 if (shift == 0)
8401 shift = 31;
8402 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 8403 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 8404 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
8405 } else {
8406 /* pkhbt */
22478e79
AZ
8407 if (shift)
8408 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 8409 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
8410 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8411 }
8412 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8413 tcg_temp_free_i32(tmp2);
3670669c 8414 store_reg(s, rd, tmp);
9ee6e8bb
PB
8415 } else if ((insn & 0x00200020) == 0x00200000) {
8416 /* [us]sat */
6ddbc6e4 8417 tmp = load_reg(s, rm);
9ee6e8bb
PB
8418 shift = (insn >> 7) & 0x1f;
8419 if (insn & (1 << 6)) {
8420 if (shift == 0)
8421 shift = 31;
6ddbc6e4 8422 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8423 } else {
6ddbc6e4 8424 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
8425 }
8426 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8427 tmp2 = tcg_const_i32(sh);
8428 if (insn & (1 << 22))
9ef39277 8429 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 8430 else
9ef39277 8431 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 8432 tcg_temp_free_i32(tmp2);
6ddbc6e4 8433 store_reg(s, rd, tmp);
9ee6e8bb
PB
8434 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8435 /* [us]sat16 */
6ddbc6e4 8436 tmp = load_reg(s, rm);
9ee6e8bb 8437 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8438 tmp2 = tcg_const_i32(sh);
8439 if (insn & (1 << 22))
9ef39277 8440 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8441 else
9ef39277 8442 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8443 tcg_temp_free_i32(tmp2);
6ddbc6e4 8444 store_reg(s, rd, tmp);
9ee6e8bb
PB
8445 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8446 /* Select bytes. */
6ddbc6e4
PB
8447 tmp = load_reg(s, rn);
8448 tmp2 = load_reg(s, rm);
7d1b0095 8449 tmp3 = tcg_temp_new_i32();
0ecb72a5 8450 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 8451 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8452 tcg_temp_free_i32(tmp3);
8453 tcg_temp_free_i32(tmp2);
6ddbc6e4 8454 store_reg(s, rd, tmp);
9ee6e8bb 8455 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 8456 tmp = load_reg(s, rm);
9ee6e8bb 8457 shift = (insn >> 10) & 3;
1301f322 8458 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8459 rotate, a shift is sufficient. */
8460 if (shift != 0)
f669df27 8461 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8462 op1 = (insn >> 20) & 7;
8463 switch (op1) {
5e3f878a
PB
8464 case 0: gen_sxtb16(tmp); break;
8465 case 2: gen_sxtb(tmp); break;
8466 case 3: gen_sxth(tmp); break;
8467 case 4: gen_uxtb16(tmp); break;
8468 case 6: gen_uxtb(tmp); break;
8469 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
8470 default: goto illegal_op;
8471 }
8472 if (rn != 15) {
5e3f878a 8473 tmp2 = load_reg(s, rn);
9ee6e8bb 8474 if ((op1 & 3) == 0) {
5e3f878a 8475 gen_add16(tmp, tmp2);
9ee6e8bb 8476 } else {
5e3f878a 8477 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8478 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8479 }
8480 }
6c95676b 8481 store_reg(s, rd, tmp);
9ee6e8bb
PB
8482 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
8483 /* rev */
b0109805 8484 tmp = load_reg(s, rm);
9ee6e8bb
PB
8485 if (insn & (1 << 22)) {
8486 if (insn & (1 << 7)) {
b0109805 8487 gen_revsh(tmp);
9ee6e8bb
PB
8488 } else {
8489 ARCH(6T2);
b0109805 8490 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8491 }
8492 } else {
8493 if (insn & (1 << 7))
b0109805 8494 gen_rev16(tmp);
9ee6e8bb 8495 else
66896cb8 8496 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 8497 }
b0109805 8498 store_reg(s, rd, tmp);
9ee6e8bb
PB
8499 } else {
8500 goto illegal_op;
8501 }
8502 break;
8503 case 2: /* Multiplies (Type 3). */
41e9564d
PM
8504 switch ((insn >> 20) & 0x7) {
8505 case 5:
8506 if (((insn >> 6) ^ (insn >> 7)) & 1) {
8507 /* op2 not 00x or 11x : UNDEF */
8508 goto illegal_op;
8509 }
838fa72d
AJ
8510 /* Signed multiply most significant [accumulate].
8511 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
8512 tmp = load_reg(s, rm);
8513 tmp2 = load_reg(s, rs);
a7812ae4 8514 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 8515
955a7dd5 8516 if (rd != 15) {
838fa72d 8517 tmp = load_reg(s, rd);
9ee6e8bb 8518 if (insn & (1 << 6)) {
838fa72d 8519 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 8520 } else {
838fa72d 8521 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
8522 }
8523 }
838fa72d
AJ
8524 if (insn & (1 << 5)) {
8525 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8526 }
8527 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8528 tmp = tcg_temp_new_i32();
838fa72d
AJ
8529 tcg_gen_trunc_i64_i32(tmp, tmp64);
8530 tcg_temp_free_i64(tmp64);
955a7dd5 8531 store_reg(s, rn, tmp);
41e9564d
PM
8532 break;
8533 case 0:
8534 case 4:
8535 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8536 if (insn & (1 << 7)) {
8537 goto illegal_op;
8538 }
8539 tmp = load_reg(s, rm);
8540 tmp2 = load_reg(s, rs);
9ee6e8bb 8541 if (insn & (1 << 5))
5e3f878a
PB
8542 gen_swap_half(tmp2);
8543 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8544 if (insn & (1 << 22)) {
5e3f878a 8545 /* smlald, smlsld */
33bbd75a
PC
8546 TCGv_i64 tmp64_2;
8547
a7812ae4 8548 tmp64 = tcg_temp_new_i64();
33bbd75a 8549 tmp64_2 = tcg_temp_new_i64();
a7812ae4 8550 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 8551 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 8552 tcg_temp_free_i32(tmp);
33bbd75a
PC
8553 tcg_temp_free_i32(tmp2);
8554 if (insn & (1 << 6)) {
8555 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
8556 } else {
8557 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
8558 }
8559 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
8560 gen_addq(s, tmp64, rd, rn);
8561 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 8562 tcg_temp_free_i64(tmp64);
9ee6e8bb 8563 } else {
5e3f878a 8564 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
8565 if (insn & (1 << 6)) {
8566 /* This subtraction cannot overflow. */
8567 tcg_gen_sub_i32(tmp, tmp, tmp2);
8568 } else {
8569 /* This addition cannot overflow 32 bits;
8570 * however it may overflow considered as a
8571 * signed operation, in which case we must set
8572 * the Q flag.
8573 */
8574 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8575 }
8576 tcg_temp_free_i32(tmp2);
22478e79 8577 if (rd != 15)
9ee6e8bb 8578 {
22478e79 8579 tmp2 = load_reg(s, rd);
9ef39277 8580 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8581 tcg_temp_free_i32(tmp2);
9ee6e8bb 8582 }
22478e79 8583 store_reg(s, rn, tmp);
9ee6e8bb 8584 }
41e9564d 8585 break;
b8b8ea05
PM
8586 case 1:
8587 case 3:
8588 /* SDIV, UDIV */
8589 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
8590 goto illegal_op;
8591 }
8592 if (((insn >> 5) & 7) || (rd != 15)) {
8593 goto illegal_op;
8594 }
8595 tmp = load_reg(s, rm);
8596 tmp2 = load_reg(s, rs);
8597 if (insn & (1 << 21)) {
8598 gen_helper_udiv(tmp, tmp, tmp2);
8599 } else {
8600 gen_helper_sdiv(tmp, tmp, tmp2);
8601 }
8602 tcg_temp_free_i32(tmp2);
8603 store_reg(s, rn, tmp);
8604 break;
41e9564d
PM
8605 default:
8606 goto illegal_op;
9ee6e8bb
PB
8607 }
8608 break;
8609 case 3:
8610 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
8611 switch (op1) {
8612 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
8613 ARCH(6);
8614 tmp = load_reg(s, rm);
8615 tmp2 = load_reg(s, rs);
8616 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8617 tcg_temp_free_i32(tmp2);
ded9d295
AZ
8618 if (rd != 15) {
8619 tmp2 = load_reg(s, rd);
6ddbc6e4 8620 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8621 tcg_temp_free_i32(tmp2);
9ee6e8bb 8622 }
ded9d295 8623 store_reg(s, rn, tmp);
9ee6e8bb
PB
8624 break;
8625 case 0x20: case 0x24: case 0x28: case 0x2c:
8626 /* Bitfield insert/clear. */
8627 ARCH(6T2);
8628 shift = (insn >> 7) & 0x1f;
8629 i = (insn >> 16) & 0x1f;
8630 i = i + 1 - shift;
8631 if (rm == 15) {
7d1b0095 8632 tmp = tcg_temp_new_i32();
5e3f878a 8633 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 8634 } else {
5e3f878a 8635 tmp = load_reg(s, rm);
9ee6e8bb
PB
8636 }
8637 if (i != 32) {
5e3f878a 8638 tmp2 = load_reg(s, rd);
d593c48e 8639 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 8640 tcg_temp_free_i32(tmp2);
9ee6e8bb 8641 }
5e3f878a 8642 store_reg(s, rd, tmp);
9ee6e8bb
PB
8643 break;
8644 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
8645 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 8646 ARCH(6T2);
5e3f878a 8647 tmp = load_reg(s, rm);
9ee6e8bb
PB
8648 shift = (insn >> 7) & 0x1f;
8649 i = ((insn >> 16) & 0x1f) + 1;
8650 if (shift + i > 32)
8651 goto illegal_op;
8652 if (i < 32) {
8653 if (op1 & 0x20) {
5e3f878a 8654 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 8655 } else {
5e3f878a 8656 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
8657 }
8658 }
5e3f878a 8659 store_reg(s, rd, tmp);
9ee6e8bb
PB
8660 break;
8661 default:
8662 goto illegal_op;
8663 }
8664 break;
8665 }
8666 break;
8667 }
8668 do_ldst:
8669 /* Check for undefined extension instructions
8670 * per the ARM Bible IE:
8671 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
8672 */
8673 sh = (0xf << 20) | (0xf << 4);
8674 if (op1 == 0x7 && ((insn & sh) == sh))
8675 {
8676 goto illegal_op;
8677 }
8678 /* load/store byte/word */
8679 rn = (insn >> 16) & 0xf;
8680 rd = (insn >> 12) & 0xf;
b0109805 8681 tmp2 = load_reg(s, rn);
a99caa48
PM
8682 if ((insn & 0x01200000) == 0x00200000) {
8683 /* ldrt/strt */
8684 i = MMU_USER_IDX;
8685 } else {
8686 i = get_mem_index(s);
8687 }
9ee6e8bb 8688 if (insn & (1 << 24))
b0109805 8689 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
8690 if (insn & (1 << 20)) {
8691 /* load */
5a839c0d 8692 tmp = tcg_temp_new_i32();
9ee6e8bb 8693 if (insn & (1 << 22)) {
08307563 8694 gen_aa32_ld8u(tmp, tmp2, i);
9ee6e8bb 8695 } else {
08307563 8696 gen_aa32_ld32u(tmp, tmp2, i);
9ee6e8bb 8697 }
9ee6e8bb
PB
8698 } else {
8699 /* store */
b0109805 8700 tmp = load_reg(s, rd);
5a839c0d 8701 if (insn & (1 << 22)) {
08307563 8702 gen_aa32_st8(tmp, tmp2, i);
5a839c0d 8703 } else {
08307563 8704 gen_aa32_st32(tmp, tmp2, i);
5a839c0d
PM
8705 }
8706 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8707 }
8708 if (!(insn & (1 << 24))) {
b0109805
PB
8709 gen_add_data_offset(s, insn, tmp2);
8710 store_reg(s, rn, tmp2);
8711 } else if (insn & (1 << 21)) {
8712 store_reg(s, rn, tmp2);
8713 } else {
7d1b0095 8714 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8715 }
8716 if (insn & (1 << 20)) {
8717 /* Complete the load. */
be5e7a76 8718 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
8719 }
8720 break;
8721 case 0x08:
8722 case 0x09:
8723 {
8724 int j, n, user, loaded_base;
39d5492a 8725 TCGv_i32 loaded_var;
9ee6e8bb
PB
8726 /* load/store multiple words */
8727 /* XXX: store correct base if write back */
8728 user = 0;
8729 if (insn & (1 << 22)) {
8730 if (IS_USER(s))
8731 goto illegal_op; /* only usable in supervisor mode */
8732
8733 if ((insn & (1 << 15)) == 0)
8734 user = 1;
8735 }
8736 rn = (insn >> 16) & 0xf;
b0109805 8737 addr = load_reg(s, rn);
9ee6e8bb
PB
8738
8739 /* compute total size */
8740 loaded_base = 0;
39d5492a 8741 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
8742 n = 0;
8743 for(i=0;i<16;i++) {
8744 if (insn & (1 << i))
8745 n++;
8746 }
8747 /* XXX: test invalid n == 0 case ? */
8748 if (insn & (1 << 23)) {
8749 if (insn & (1 << 24)) {
8750 /* pre increment */
b0109805 8751 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8752 } else {
8753 /* post increment */
8754 }
8755 } else {
8756 if (insn & (1 << 24)) {
8757 /* pre decrement */
b0109805 8758 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
8759 } else {
8760 /* post decrement */
8761 if (n != 1)
b0109805 8762 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
8763 }
8764 }
8765 j = 0;
8766 for(i=0;i<16;i++) {
8767 if (insn & (1 << i)) {
8768 if (insn & (1 << 20)) {
8769 /* load */
5a839c0d 8770 tmp = tcg_temp_new_i32();
6ce2faf4 8771 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
be5e7a76 8772 if (user) {
b75263d6 8773 tmp2 = tcg_const_i32(i);
1ce94f81 8774 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 8775 tcg_temp_free_i32(tmp2);
7d1b0095 8776 tcg_temp_free_i32(tmp);
9ee6e8bb 8777 } else if (i == rn) {
b0109805 8778 loaded_var = tmp;
9ee6e8bb
PB
8779 loaded_base = 1;
8780 } else {
be5e7a76 8781 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
8782 }
8783 } else {
8784 /* store */
8785 if (i == 15) {
8786 /* special case: r15 = PC + 8 */
8787 val = (long)s->pc + 4;
7d1b0095 8788 tmp = tcg_temp_new_i32();
b0109805 8789 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 8790 } else if (user) {
7d1b0095 8791 tmp = tcg_temp_new_i32();
b75263d6 8792 tmp2 = tcg_const_i32(i);
9ef39277 8793 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 8794 tcg_temp_free_i32(tmp2);
9ee6e8bb 8795 } else {
b0109805 8796 tmp = load_reg(s, i);
9ee6e8bb 8797 }
6ce2faf4 8798 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 8799 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8800 }
8801 j++;
8802 /* no need to add after the last transfer */
8803 if (j != n)
b0109805 8804 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8805 }
8806 }
8807 if (insn & (1 << 21)) {
8808 /* write back */
8809 if (insn & (1 << 23)) {
8810 if (insn & (1 << 24)) {
8811 /* pre increment */
8812 } else {
8813 /* post increment */
b0109805 8814 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8815 }
8816 } else {
8817 if (insn & (1 << 24)) {
8818 /* pre decrement */
8819 if (n != 1)
b0109805 8820 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
8821 } else {
8822 /* post decrement */
b0109805 8823 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
8824 }
8825 }
b0109805
PB
8826 store_reg(s, rn, addr);
8827 } else {
7d1b0095 8828 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8829 }
8830 if (loaded_base) {
b0109805 8831 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
8832 }
8833 if ((insn & (1 << 22)) && !user) {
8834 /* Restore CPSR from SPSR. */
d9ba4830
PB
8835 tmp = load_cpu_field(spsr);
8836 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 8837 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8838 s->is_jmp = DISAS_UPDATE;
8839 }
8840 }
8841 break;
8842 case 0xa:
8843 case 0xb:
8844 {
8845 int32_t offset;
8846
8847 /* branch (and link) */
8848 val = (int32_t)s->pc;
8849 if (insn & (1 << 24)) {
7d1b0095 8850 tmp = tcg_temp_new_i32();
5e3f878a
PB
8851 tcg_gen_movi_i32(tmp, val);
8852 store_reg(s, 14, tmp);
9ee6e8bb 8853 }
534df156
PM
8854 offset = sextract32(insn << 2, 0, 26);
8855 val += offset + 4;
9ee6e8bb
PB
8856 gen_jmp(s, val);
8857 }
8858 break;
8859 case 0xc:
8860 case 0xd:
8861 case 0xe:
6a57f3eb
WN
8862 if (((insn >> 8) & 0xe) == 10) {
8863 /* VFP. */
8864 if (disas_vfp_insn(env, s, insn)) {
8865 goto illegal_op;
8866 }
8867 } else if (disas_coproc_insn(env, s, insn)) {
8868 /* Coprocessor. */
9ee6e8bb 8869 goto illegal_op;
6a57f3eb 8870 }
9ee6e8bb
PB
8871 break;
8872 case 0xf:
8873 /* swi */
eaed129d 8874 gen_set_pc_im(s, s->pc);
d4a2dc67 8875 s->svc_imm = extract32(insn, 0, 24);
9ee6e8bb
PB
8876 s->is_jmp = DISAS_SWI;
8877 break;
8878 default:
8879 illegal_op:
d4a2dc67 8880 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
9ee6e8bb
PB
8881 break;
8882 }
8883 }
8884}
8885
8886/* Return true if this is a Thumb-2 logical op. */
8887static int
8888thumb2_logic_op(int op)
8889{
8890 return (op < 8);
8891}
8892
8893/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
8894 then set condition code flags based on the result of the operation.
8895 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
8896 to the high bit of T1.
8897 Returns zero if the opcode is valid. */
8898
8899static int
39d5492a
PM
8900gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
8901 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
8902{
8903 int logic_cc;
8904
8905 logic_cc = 0;
8906 switch (op) {
8907 case 0: /* and */
396e467c 8908 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
8909 logic_cc = conds;
8910 break;
8911 case 1: /* bic */
f669df27 8912 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
8913 logic_cc = conds;
8914 break;
8915 case 2: /* orr */
396e467c 8916 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
8917 logic_cc = conds;
8918 break;
8919 case 3: /* orn */
29501f1b 8920 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
8921 logic_cc = conds;
8922 break;
8923 case 4: /* eor */
396e467c 8924 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
8925 logic_cc = conds;
8926 break;
8927 case 8: /* add */
8928 if (conds)
72485ec4 8929 gen_add_CC(t0, t0, t1);
9ee6e8bb 8930 else
396e467c 8931 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
8932 break;
8933 case 10: /* adc */
8934 if (conds)
49b4c31e 8935 gen_adc_CC(t0, t0, t1);
9ee6e8bb 8936 else
396e467c 8937 gen_adc(t0, t1);
9ee6e8bb
PB
8938 break;
8939 case 11: /* sbc */
2de68a49
RH
8940 if (conds) {
8941 gen_sbc_CC(t0, t0, t1);
8942 } else {
396e467c 8943 gen_sub_carry(t0, t0, t1);
2de68a49 8944 }
9ee6e8bb
PB
8945 break;
8946 case 13: /* sub */
8947 if (conds)
72485ec4 8948 gen_sub_CC(t0, t0, t1);
9ee6e8bb 8949 else
396e467c 8950 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
8951 break;
8952 case 14: /* rsb */
8953 if (conds)
72485ec4 8954 gen_sub_CC(t0, t1, t0);
9ee6e8bb 8955 else
396e467c 8956 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
8957 break;
8958 default: /* 5, 6, 7, 9, 12, 15. */
8959 return 1;
8960 }
8961 if (logic_cc) {
396e467c 8962 gen_logic_CC(t0);
9ee6e8bb 8963 if (shifter_out)
396e467c 8964 gen_set_CF_bit31(t1);
9ee6e8bb
PB
8965 }
8966 return 0;
8967}
8968
8969/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
8970 is not legal. */
0ecb72a5 8971static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 8972{
b0109805 8973 uint32_t insn, imm, shift, offset;
9ee6e8bb 8974 uint32_t rd, rn, rm, rs;
39d5492a
PM
8975 TCGv_i32 tmp;
8976 TCGv_i32 tmp2;
8977 TCGv_i32 tmp3;
8978 TCGv_i32 addr;
a7812ae4 8979 TCGv_i64 tmp64;
9ee6e8bb
PB
8980 int op;
8981 int shiftop;
8982 int conds;
8983 int logic_cc;
8984
8985 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
8986 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 8987 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
8988 16-bit instructions to get correct prefetch abort behavior. */
8989 insn = insn_hw1;
8990 if ((insn & (1 << 12)) == 0) {
be5e7a76 8991 ARCH(5);
9ee6e8bb
PB
8992 /* Second half of blx. */
8993 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
8994 tmp = load_reg(s, 14);
8995 tcg_gen_addi_i32(tmp, tmp, offset);
8996 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 8997
7d1b0095 8998 tmp2 = tcg_temp_new_i32();
b0109805 8999 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9000 store_reg(s, 14, tmp2);
9001 gen_bx(s, tmp);
9ee6e8bb
PB
9002 return 0;
9003 }
9004 if (insn & (1 << 11)) {
9005 /* Second half of bl. */
9006 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 9007 tmp = load_reg(s, 14);
6a0d8a1d 9008 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 9009
7d1b0095 9010 tmp2 = tcg_temp_new_i32();
b0109805 9011 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9012 store_reg(s, 14, tmp2);
9013 gen_bx(s, tmp);
9ee6e8bb
PB
9014 return 0;
9015 }
9016 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9017 /* Instruction spans a page boundary. Implement it as two
9018 16-bit instructions in case the second half causes an
9019 prefetch abort. */
9020 offset = ((int32_t)insn << 21) >> 9;
396e467c 9021 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
9022 return 0;
9023 }
9024 /* Fall through to 32-bit decode. */
9025 }
9026
d31dd73e 9027 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
9028 s->pc += 2;
9029 insn |= (uint32_t)insn_hw1 << 16;
9030
9031 if ((insn & 0xf800e800) != 0xf000e800) {
9032 ARCH(6T2);
9033 }
9034
9035 rn = (insn >> 16) & 0xf;
9036 rs = (insn >> 12) & 0xf;
9037 rd = (insn >> 8) & 0xf;
9038 rm = insn & 0xf;
9039 switch ((insn >> 25) & 0xf) {
9040 case 0: case 1: case 2: case 3:
9041 /* 16-bit instructions. Should never happen. */
9042 abort();
9043 case 4:
9044 if (insn & (1 << 22)) {
9045 /* Other load/store, table branch. */
9046 if (insn & 0x01200000) {
9047 /* Load/store doubleword. */
9048 if (rn == 15) {
7d1b0095 9049 addr = tcg_temp_new_i32();
b0109805 9050 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 9051 } else {
b0109805 9052 addr = load_reg(s, rn);
9ee6e8bb
PB
9053 }
9054 offset = (insn & 0xff) * 4;
9055 if ((insn & (1 << 23)) == 0)
9056 offset = -offset;
9057 if (insn & (1 << 24)) {
b0109805 9058 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
9059 offset = 0;
9060 }
9061 if (insn & (1 << 20)) {
9062 /* ldrd */
e2592fad 9063 tmp = tcg_temp_new_i32();
6ce2faf4 9064 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805
PB
9065 store_reg(s, rs, tmp);
9066 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9067 tmp = tcg_temp_new_i32();
6ce2faf4 9068 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 9069 store_reg(s, rd, tmp);
9ee6e8bb
PB
9070 } else {
9071 /* strd */
b0109805 9072 tmp = load_reg(s, rs);
6ce2faf4 9073 gen_aa32_st32(tmp, addr, get_mem_index(s));
e2592fad 9074 tcg_temp_free_i32(tmp);
b0109805
PB
9075 tcg_gen_addi_i32(addr, addr, 4);
9076 tmp = load_reg(s, rd);
6ce2faf4 9077 gen_aa32_st32(tmp, addr, get_mem_index(s));
e2592fad 9078 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9079 }
9080 if (insn & (1 << 21)) {
9081 /* Base writeback. */
9082 if (rn == 15)
9083 goto illegal_op;
b0109805
PB
9084 tcg_gen_addi_i32(addr, addr, offset - 4);
9085 store_reg(s, rn, addr);
9086 } else {
7d1b0095 9087 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9088 }
9089 } else if ((insn & (1 << 23)) == 0) {
9090 /* Load/store exclusive word. */
39d5492a 9091 addr = tcg_temp_local_new_i32();
98a46317 9092 load_reg_var(s, addr, rn);
426f5abc 9093 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 9094 if (insn & (1 << 20)) {
426f5abc 9095 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 9096 } else {
426f5abc 9097 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 9098 }
39d5492a 9099 tcg_temp_free_i32(addr);
2359bf80 9100 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
9101 /* Table Branch. */
9102 if (rn == 15) {
7d1b0095 9103 addr = tcg_temp_new_i32();
b0109805 9104 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 9105 } else {
b0109805 9106 addr = load_reg(s, rn);
9ee6e8bb 9107 }
b26eefb6 9108 tmp = load_reg(s, rm);
b0109805 9109 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
9110 if (insn & (1 << 4)) {
9111 /* tbh */
b0109805 9112 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9113 tcg_temp_free_i32(tmp);
e2592fad 9114 tmp = tcg_temp_new_i32();
6ce2faf4 9115 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9ee6e8bb 9116 } else { /* tbb */
7d1b0095 9117 tcg_temp_free_i32(tmp);
e2592fad 9118 tmp = tcg_temp_new_i32();
6ce2faf4 9119 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9ee6e8bb 9120 }
7d1b0095 9121 tcg_temp_free_i32(addr);
b0109805
PB
9122 tcg_gen_shli_i32(tmp, tmp, 1);
9123 tcg_gen_addi_i32(tmp, tmp, s->pc);
9124 store_reg(s, 15, tmp);
9ee6e8bb 9125 } else {
2359bf80 9126 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 9127 op = (insn >> 4) & 0x3;
2359bf80
MR
9128 switch (op2) {
9129 case 0:
426f5abc 9130 goto illegal_op;
2359bf80
MR
9131 case 1:
9132 /* Load/store exclusive byte/halfword/doubleword */
9133 if (op == 2) {
9134 goto illegal_op;
9135 }
9136 ARCH(7);
9137 break;
9138 case 2:
9139 /* Load-acquire/store-release */
9140 if (op == 3) {
9141 goto illegal_op;
9142 }
9143 /* Fall through */
9144 case 3:
9145 /* Load-acquire/store-release exclusive */
9146 ARCH(8);
9147 break;
426f5abc 9148 }
39d5492a 9149 addr = tcg_temp_local_new_i32();
98a46317 9150 load_reg_var(s, addr, rn);
2359bf80
MR
9151 if (!(op2 & 1)) {
9152 if (insn & (1 << 20)) {
9153 tmp = tcg_temp_new_i32();
9154 switch (op) {
9155 case 0: /* ldab */
6ce2faf4 9156 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
2359bf80
MR
9157 break;
9158 case 1: /* ldah */
6ce2faf4 9159 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
2359bf80
MR
9160 break;
9161 case 2: /* lda */
6ce2faf4 9162 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
2359bf80
MR
9163 break;
9164 default:
9165 abort();
9166 }
9167 store_reg(s, rs, tmp);
9168 } else {
9169 tmp = load_reg(s, rs);
9170 switch (op) {
9171 case 0: /* stlb */
6ce2faf4 9172 gen_aa32_st8(tmp, addr, get_mem_index(s));
2359bf80
MR
9173 break;
9174 case 1: /* stlh */
6ce2faf4 9175 gen_aa32_st16(tmp, addr, get_mem_index(s));
2359bf80
MR
9176 break;
9177 case 2: /* stl */
6ce2faf4 9178 gen_aa32_st32(tmp, addr, get_mem_index(s));
2359bf80
MR
9179 break;
9180 default:
9181 abort();
9182 }
9183 tcg_temp_free_i32(tmp);
9184 }
9185 } else if (insn & (1 << 20)) {
426f5abc 9186 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 9187 } else {
426f5abc 9188 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 9189 }
39d5492a 9190 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9191 }
9192 } else {
9193 /* Load/store multiple, RFE, SRS. */
9194 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976
PM
9195 /* RFE, SRS: not available in user mode or on M profile */
9196 if (IS_USER(s) || IS_M(env)) {
9ee6e8bb 9197 goto illegal_op;
00115976 9198 }
9ee6e8bb
PB
9199 if (insn & (1 << 20)) {
9200 /* rfe */
b0109805
PB
9201 addr = load_reg(s, rn);
9202 if ((insn & (1 << 24)) == 0)
9203 tcg_gen_addi_i32(addr, addr, -8);
9204 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 9205 tmp = tcg_temp_new_i32();
6ce2faf4 9206 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 9207 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9208 tmp2 = tcg_temp_new_i32();
6ce2faf4 9209 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9210 if (insn & (1 << 21)) {
9211 /* Base writeback. */
b0109805
PB
9212 if (insn & (1 << 24)) {
9213 tcg_gen_addi_i32(addr, addr, 4);
9214 } else {
9215 tcg_gen_addi_i32(addr, addr, -4);
9216 }
9217 store_reg(s, rn, addr);
9218 } else {
7d1b0095 9219 tcg_temp_free_i32(addr);
9ee6e8bb 9220 }
b0109805 9221 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
9222 } else {
9223 /* srs */
81465888
PM
9224 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9225 insn & (1 << 21));
9ee6e8bb
PB
9226 }
9227 } else {
5856d44e 9228 int i, loaded_base = 0;
39d5492a 9229 TCGv_i32 loaded_var;
9ee6e8bb 9230 /* Load/store multiple. */
b0109805 9231 addr = load_reg(s, rn);
9ee6e8bb
PB
9232 offset = 0;
9233 for (i = 0; i < 16; i++) {
9234 if (insn & (1 << i))
9235 offset += 4;
9236 }
9237 if (insn & (1 << 24)) {
b0109805 9238 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9239 }
9240
39d5492a 9241 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9242 for (i = 0; i < 16; i++) {
9243 if ((insn & (1 << i)) == 0)
9244 continue;
9245 if (insn & (1 << 20)) {
9246 /* Load. */
e2592fad 9247 tmp = tcg_temp_new_i32();
6ce2faf4 9248 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9ee6e8bb 9249 if (i == 15) {
b0109805 9250 gen_bx(s, tmp);
5856d44e
YO
9251 } else if (i == rn) {
9252 loaded_var = tmp;
9253 loaded_base = 1;
9ee6e8bb 9254 } else {
b0109805 9255 store_reg(s, i, tmp);
9ee6e8bb
PB
9256 }
9257 } else {
9258 /* Store. */
b0109805 9259 tmp = load_reg(s, i);
6ce2faf4 9260 gen_aa32_st32(tmp, addr, get_mem_index(s));
e2592fad 9261 tcg_temp_free_i32(tmp);
9ee6e8bb 9262 }
b0109805 9263 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 9264 }
5856d44e
YO
9265 if (loaded_base) {
9266 store_reg(s, rn, loaded_var);
9267 }
9ee6e8bb
PB
9268 if (insn & (1 << 21)) {
9269 /* Base register writeback. */
9270 if (insn & (1 << 24)) {
b0109805 9271 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9272 }
9273 /* Fault if writeback register is in register list. */
9274 if (insn & (1 << rn))
9275 goto illegal_op;
b0109805
PB
9276 store_reg(s, rn, addr);
9277 } else {
7d1b0095 9278 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9279 }
9280 }
9281 }
9282 break;
2af9ab77
JB
9283 case 5:
9284
9ee6e8bb 9285 op = (insn >> 21) & 0xf;
2af9ab77
JB
9286 if (op == 6) {
9287 /* Halfword pack. */
9288 tmp = load_reg(s, rn);
9289 tmp2 = load_reg(s, rm);
9290 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9291 if (insn & (1 << 5)) {
9292 /* pkhtb */
9293 if (shift == 0)
9294 shift = 31;
9295 tcg_gen_sari_i32(tmp2, tmp2, shift);
9296 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9297 tcg_gen_ext16u_i32(tmp2, tmp2);
9298 } else {
9299 /* pkhbt */
9300 if (shift)
9301 tcg_gen_shli_i32(tmp2, tmp2, shift);
9302 tcg_gen_ext16u_i32(tmp, tmp);
9303 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9304 }
9305 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9306 tcg_temp_free_i32(tmp2);
3174f8e9
FN
9307 store_reg(s, rd, tmp);
9308 } else {
2af9ab77
JB
9309 /* Data processing register constant shift. */
9310 if (rn == 15) {
7d1b0095 9311 tmp = tcg_temp_new_i32();
2af9ab77
JB
9312 tcg_gen_movi_i32(tmp, 0);
9313 } else {
9314 tmp = load_reg(s, rn);
9315 }
9316 tmp2 = load_reg(s, rm);
9317
9318 shiftop = (insn >> 4) & 3;
9319 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9320 conds = (insn & (1 << 20)) != 0;
9321 logic_cc = (conds && thumb2_logic_op(op));
9322 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9323 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9324 goto illegal_op;
7d1b0095 9325 tcg_temp_free_i32(tmp2);
2af9ab77
JB
9326 if (rd != 15) {
9327 store_reg(s, rd, tmp);
9328 } else {
7d1b0095 9329 tcg_temp_free_i32(tmp);
2af9ab77 9330 }
3174f8e9 9331 }
9ee6e8bb
PB
9332 break;
9333 case 13: /* Misc data processing. */
9334 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9335 if (op < 4 && (insn & 0xf000) != 0xf000)
9336 goto illegal_op;
9337 switch (op) {
9338 case 0: /* Register controlled shift. */
8984bd2e
PB
9339 tmp = load_reg(s, rn);
9340 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9341 if ((insn & 0x70) != 0)
9342 goto illegal_op;
9343 op = (insn >> 21) & 3;
8984bd2e
PB
9344 logic_cc = (insn & (1 << 20)) != 0;
9345 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9346 if (logic_cc)
9347 gen_logic_CC(tmp);
21aeb343 9348 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
9349 break;
9350 case 1: /* Sign/zero extend. */
5e3f878a 9351 tmp = load_reg(s, rm);
9ee6e8bb 9352 shift = (insn >> 4) & 3;
1301f322 9353 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9354 rotate, a shift is sufficient. */
9355 if (shift != 0)
f669df27 9356 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9357 op = (insn >> 20) & 7;
9358 switch (op) {
5e3f878a
PB
9359 case 0: gen_sxth(tmp); break;
9360 case 1: gen_uxth(tmp); break;
9361 case 2: gen_sxtb16(tmp); break;
9362 case 3: gen_uxtb16(tmp); break;
9363 case 4: gen_sxtb(tmp); break;
9364 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
9365 default: goto illegal_op;
9366 }
9367 if (rn != 15) {
5e3f878a 9368 tmp2 = load_reg(s, rn);
9ee6e8bb 9369 if ((op >> 1) == 1) {
5e3f878a 9370 gen_add16(tmp, tmp2);
9ee6e8bb 9371 } else {
5e3f878a 9372 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9373 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9374 }
9375 }
5e3f878a 9376 store_reg(s, rd, tmp);
9ee6e8bb
PB
9377 break;
9378 case 2: /* SIMD add/subtract. */
9379 op = (insn >> 20) & 7;
9380 shift = (insn >> 4) & 7;
9381 if ((op & 3) == 3 || (shift & 3) == 3)
9382 goto illegal_op;
6ddbc6e4
PB
9383 tmp = load_reg(s, rn);
9384 tmp2 = load_reg(s, rm);
9385 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 9386 tcg_temp_free_i32(tmp2);
6ddbc6e4 9387 store_reg(s, rd, tmp);
9ee6e8bb
PB
9388 break;
9389 case 3: /* Other data processing. */
9390 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9391 if (op < 4) {
9392 /* Saturating add/subtract. */
d9ba4830
PB
9393 tmp = load_reg(s, rn);
9394 tmp2 = load_reg(s, rm);
9ee6e8bb 9395 if (op & 1)
9ef39277 9396 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 9397 if (op & 2)
9ef39277 9398 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 9399 else
9ef39277 9400 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 9401 tcg_temp_free_i32(tmp2);
9ee6e8bb 9402 } else {
d9ba4830 9403 tmp = load_reg(s, rn);
9ee6e8bb
PB
9404 switch (op) {
9405 case 0x0a: /* rbit */
d9ba4830 9406 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9407 break;
9408 case 0x08: /* rev */
66896cb8 9409 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
9410 break;
9411 case 0x09: /* rev16 */
d9ba4830 9412 gen_rev16(tmp);
9ee6e8bb
PB
9413 break;
9414 case 0x0b: /* revsh */
d9ba4830 9415 gen_revsh(tmp);
9ee6e8bb
PB
9416 break;
9417 case 0x10: /* sel */
d9ba4830 9418 tmp2 = load_reg(s, rm);
7d1b0095 9419 tmp3 = tcg_temp_new_i32();
0ecb72a5 9420 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 9421 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9422 tcg_temp_free_i32(tmp3);
9423 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9424 break;
9425 case 0x18: /* clz */
d9ba4830 9426 gen_helper_clz(tmp, tmp);
9ee6e8bb 9427 break;
eb0ecd5a
WN
9428 case 0x20:
9429 case 0x21:
9430 case 0x22:
9431 case 0x28:
9432 case 0x29:
9433 case 0x2a:
9434 {
9435 /* crc32/crc32c */
9436 uint32_t sz = op & 0x3;
9437 uint32_t c = op & 0x8;
9438
9439 if (!arm_feature(env, ARM_FEATURE_CRC)) {
9440 goto illegal_op;
9441 }
9442
9443 tmp2 = load_reg(s, rm);
aa633469
PM
9444 if (sz == 0) {
9445 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
9446 } else if (sz == 1) {
9447 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
9448 }
eb0ecd5a
WN
9449 tmp3 = tcg_const_i32(1 << sz);
9450 if (c) {
9451 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
9452 } else {
9453 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
9454 }
9455 tcg_temp_free_i32(tmp2);
9456 tcg_temp_free_i32(tmp3);
9457 break;
9458 }
9ee6e8bb
PB
9459 default:
9460 goto illegal_op;
9461 }
9462 }
d9ba4830 9463 store_reg(s, rd, tmp);
9ee6e8bb
PB
9464 break;
9465 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
9466 op = (insn >> 4) & 0xf;
d9ba4830
PB
9467 tmp = load_reg(s, rn);
9468 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9469 switch ((insn >> 20) & 7) {
9470 case 0: /* 32 x 32 -> 32 */
d9ba4830 9471 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 9472 tcg_temp_free_i32(tmp2);
9ee6e8bb 9473 if (rs != 15) {
d9ba4830 9474 tmp2 = load_reg(s, rs);
9ee6e8bb 9475 if (op)
d9ba4830 9476 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 9477 else
d9ba4830 9478 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9479 tcg_temp_free_i32(tmp2);
9ee6e8bb 9480 }
9ee6e8bb
PB
9481 break;
9482 case 1: /* 16 x 16 -> 32 */
d9ba4830 9483 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 9484 tcg_temp_free_i32(tmp2);
9ee6e8bb 9485 if (rs != 15) {
d9ba4830 9486 tmp2 = load_reg(s, rs);
9ef39277 9487 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9488 tcg_temp_free_i32(tmp2);
9ee6e8bb 9489 }
9ee6e8bb
PB
9490 break;
9491 case 2: /* Dual multiply add. */
9492 case 4: /* Dual multiply subtract. */
9493 if (op)
d9ba4830
PB
9494 gen_swap_half(tmp2);
9495 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9496 if (insn & (1 << 22)) {
e1d177b9 9497 /* This subtraction cannot overflow. */
d9ba4830 9498 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9499 } else {
e1d177b9
PM
9500 /* This addition cannot overflow 32 bits;
9501 * however it may overflow considered as a signed
9502 * operation, in which case we must set the Q flag.
9503 */
9ef39277 9504 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9505 }
7d1b0095 9506 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9507 if (rs != 15)
9508 {
d9ba4830 9509 tmp2 = load_reg(s, rs);
9ef39277 9510 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9511 tcg_temp_free_i32(tmp2);
9ee6e8bb 9512 }
9ee6e8bb
PB
9513 break;
9514 case 3: /* 32 * 16 -> 32msb */
9515 if (op)
d9ba4830 9516 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 9517 else
d9ba4830 9518 gen_sxth(tmp2);
a7812ae4
PB
9519 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9520 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 9521 tmp = tcg_temp_new_i32();
a7812ae4 9522 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 9523 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9524 if (rs != 15)
9525 {
d9ba4830 9526 tmp2 = load_reg(s, rs);
9ef39277 9527 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9528 tcg_temp_free_i32(tmp2);
9ee6e8bb 9529 }
9ee6e8bb 9530 break;
838fa72d
AJ
9531 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9532 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 9533 if (rs != 15) {
838fa72d
AJ
9534 tmp = load_reg(s, rs);
9535 if (insn & (1 << 20)) {
9536 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 9537 } else {
838fa72d 9538 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 9539 }
2c0262af 9540 }
838fa72d
AJ
9541 if (insn & (1 << 4)) {
9542 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9543 }
9544 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9545 tmp = tcg_temp_new_i32();
838fa72d
AJ
9546 tcg_gen_trunc_i64_i32(tmp, tmp64);
9547 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9548 break;
9549 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 9550 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9551 tcg_temp_free_i32(tmp2);
9ee6e8bb 9552 if (rs != 15) {
d9ba4830
PB
9553 tmp2 = load_reg(s, rs);
9554 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9555 tcg_temp_free_i32(tmp2);
5fd46862 9556 }
9ee6e8bb 9557 break;
2c0262af 9558 }
d9ba4830 9559 store_reg(s, rd, tmp);
2c0262af 9560 break;
9ee6e8bb
PB
9561 case 6: case 7: /* 64-bit multiply, Divide. */
9562 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
9563 tmp = load_reg(s, rn);
9564 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9565 if ((op & 0x50) == 0x10) {
9566 /* sdiv, udiv */
47789990 9567 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 9568 goto illegal_op;
47789990 9569 }
9ee6e8bb 9570 if (op & 0x20)
5e3f878a 9571 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 9572 else
5e3f878a 9573 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 9574 tcg_temp_free_i32(tmp2);
5e3f878a 9575 store_reg(s, rd, tmp);
9ee6e8bb
PB
9576 } else if ((op & 0xe) == 0xc) {
9577 /* Dual multiply accumulate long. */
9578 if (op & 1)
5e3f878a
PB
9579 gen_swap_half(tmp2);
9580 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9581 if (op & 0x10) {
5e3f878a 9582 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 9583 } else {
5e3f878a 9584 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 9585 }
7d1b0095 9586 tcg_temp_free_i32(tmp2);
a7812ae4
PB
9587 /* BUGFIX */
9588 tmp64 = tcg_temp_new_i64();
9589 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9590 tcg_temp_free_i32(tmp);
a7812ae4
PB
9591 gen_addq(s, tmp64, rs, rd);
9592 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 9593 tcg_temp_free_i64(tmp64);
2c0262af 9594 } else {
9ee6e8bb
PB
9595 if (op & 0x20) {
9596 /* Unsigned 64-bit multiply */
a7812ae4 9597 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 9598 } else {
9ee6e8bb
PB
9599 if (op & 8) {
9600 /* smlalxy */
5e3f878a 9601 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 9602 tcg_temp_free_i32(tmp2);
a7812ae4
PB
9603 tmp64 = tcg_temp_new_i64();
9604 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9605 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9606 } else {
9607 /* Signed 64-bit multiply */
a7812ae4 9608 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 9609 }
b5ff1b31 9610 }
9ee6e8bb
PB
9611 if (op & 4) {
9612 /* umaal */
a7812ae4
PB
9613 gen_addq_lo(s, tmp64, rs);
9614 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
9615 } else if (op & 0x40) {
9616 /* 64-bit accumulate. */
a7812ae4 9617 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 9618 }
a7812ae4 9619 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 9620 tcg_temp_free_i64(tmp64);
5fd46862 9621 }
2c0262af 9622 break;
9ee6e8bb
PB
9623 }
9624 break;
9625 case 6: case 7: case 14: case 15:
9626 /* Coprocessor. */
9627 if (((insn >> 24) & 3) == 3) {
9628 /* Translate into the equivalent ARM encoding. */
f06053e3 9629 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
9630 if (disas_neon_data_insn(env, s, insn))
9631 goto illegal_op;
6a57f3eb
WN
9632 } else if (((insn >> 8) & 0xe) == 10) {
9633 if (disas_vfp_insn(env, s, insn)) {
9634 goto illegal_op;
9635 }
9ee6e8bb
PB
9636 } else {
9637 if (insn & (1 << 28))
9638 goto illegal_op;
9639 if (disas_coproc_insn (env, s, insn))
9640 goto illegal_op;
9641 }
9642 break;
9643 case 8: case 9: case 10: case 11:
9644 if (insn & (1 << 15)) {
9645 /* Branches, misc control. */
9646 if (insn & 0x5000) {
9647 /* Unconditional branch. */
9648 /* signextend(hw1[10:0]) -> offset[:12]. */
9649 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
9650 /* hw1[10:0] -> offset[11:1]. */
9651 offset |= (insn & 0x7ff) << 1;
9652 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
9653 offset[24:22] already have the same value because of the
9654 sign extension above. */
9655 offset ^= ((~insn) & (1 << 13)) << 10;
9656 offset ^= ((~insn) & (1 << 11)) << 11;
9657
9ee6e8bb
PB
9658 if (insn & (1 << 14)) {
9659 /* Branch and link. */
3174f8e9 9660 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 9661 }
3b46e624 9662
b0109805 9663 offset += s->pc;
9ee6e8bb
PB
9664 if (insn & (1 << 12)) {
9665 /* b/bl */
b0109805 9666 gen_jmp(s, offset);
9ee6e8bb
PB
9667 } else {
9668 /* blx */
b0109805 9669 offset &= ~(uint32_t)2;
be5e7a76 9670 /* thumb2 bx, no need to check */
b0109805 9671 gen_bx_im(s, offset);
2c0262af 9672 }
9ee6e8bb
PB
9673 } else if (((insn >> 23) & 7) == 7) {
9674 /* Misc control */
9675 if (insn & (1 << 13))
9676 goto illegal_op;
9677
9678 if (insn & (1 << 26)) {
9679 /* Secure monitor call (v6Z) */
e0c270d9
SW
9680 qemu_log_mask(LOG_UNIMP,
9681 "arm: unimplemented secure monitor call\n");
9ee6e8bb 9682 goto illegal_op; /* not implemented. */
2c0262af 9683 } else {
9ee6e8bb
PB
9684 op = (insn >> 20) & 7;
9685 switch (op) {
9686 case 0: /* msr cpsr. */
9687 if (IS_M(env)) {
8984bd2e
PB
9688 tmp = load_reg(s, rn);
9689 addr = tcg_const_i32(insn & 0xff);
9690 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9691 tcg_temp_free_i32(addr);
7d1b0095 9692 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9693 gen_lookup_tb(s);
9694 break;
9695 }
9696 /* fall through */
9697 case 1: /* msr spsr. */
9698 if (IS_M(env))
9699 goto illegal_op;
2fbac54b
FN
9700 tmp = load_reg(s, rn);
9701 if (gen_set_psr(s,
9ee6e8bb 9702 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 9703 op == 1, tmp))
9ee6e8bb
PB
9704 goto illegal_op;
9705 break;
9706 case 2: /* cps, nop-hint. */
9707 if (((insn >> 8) & 7) == 0) {
9708 gen_nop_hint(s, insn & 0xff);
9709 }
9710 /* Implemented as NOP in user mode. */
9711 if (IS_USER(s))
9712 break;
9713 offset = 0;
9714 imm = 0;
9715 if (insn & (1 << 10)) {
9716 if (insn & (1 << 7))
9717 offset |= CPSR_A;
9718 if (insn & (1 << 6))
9719 offset |= CPSR_I;
9720 if (insn & (1 << 5))
9721 offset |= CPSR_F;
9722 if (insn & (1 << 9))
9723 imm = CPSR_A | CPSR_I | CPSR_F;
9724 }
9725 if (insn & (1 << 8)) {
9726 offset |= 0x1f;
9727 imm |= (insn & 0x1f);
9728 }
9729 if (offset) {
2fbac54b 9730 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
9731 }
9732 break;
9733 case 3: /* Special control operations. */
426f5abc 9734 ARCH(7);
9ee6e8bb
PB
9735 op = (insn >> 4) & 0xf;
9736 switch (op) {
9737 case 2: /* clrex */
426f5abc 9738 gen_clrex(s);
9ee6e8bb
PB
9739 break;
9740 case 4: /* dsb */
9741 case 5: /* dmb */
9742 case 6: /* isb */
9743 /* These execute as NOPs. */
9ee6e8bb
PB
9744 break;
9745 default:
9746 goto illegal_op;
9747 }
9748 break;
9749 case 4: /* bxj */
9750 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
9751 tmp = load_reg(s, rn);
9752 gen_bx(s, tmp);
9ee6e8bb
PB
9753 break;
9754 case 5: /* Exception return. */
b8b45b68
RV
9755 if (IS_USER(s)) {
9756 goto illegal_op;
9757 }
9758 if (rn != 14 || rd != 15) {
9759 goto illegal_op;
9760 }
9761 tmp = load_reg(s, rn);
9762 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
9763 gen_exception_return(s, tmp);
9764 break;
9ee6e8bb 9765 case 6: /* mrs cpsr. */
7d1b0095 9766 tmp = tcg_temp_new_i32();
9ee6e8bb 9767 if (IS_M(env)) {
8984bd2e
PB
9768 addr = tcg_const_i32(insn & 0xff);
9769 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 9770 tcg_temp_free_i32(addr);
9ee6e8bb 9771 } else {
9ef39277 9772 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 9773 }
8984bd2e 9774 store_reg(s, rd, tmp);
9ee6e8bb
PB
9775 break;
9776 case 7: /* mrs spsr. */
9777 /* Not accessible in user mode. */
9778 if (IS_USER(s) || IS_M(env))
9779 goto illegal_op;
d9ba4830
PB
9780 tmp = load_cpu_field(spsr);
9781 store_reg(s, rd, tmp);
9ee6e8bb 9782 break;
2c0262af
FB
9783 }
9784 }
9ee6e8bb
PB
9785 } else {
9786 /* Conditional branch. */
9787 op = (insn >> 22) & 0xf;
9788 /* Generate a conditional jump to next instruction. */
9789 s->condlabel = gen_new_label();
39fb730a 9790 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
9791 s->condjmp = 1;
9792
9793 /* offset[11:1] = insn[10:0] */
9794 offset = (insn & 0x7ff) << 1;
9795 /* offset[17:12] = insn[21:16]. */
9796 offset |= (insn & 0x003f0000) >> 4;
9797 /* offset[31:20] = insn[26]. */
9798 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
9799 /* offset[18] = insn[13]. */
9800 offset |= (insn & (1 << 13)) << 5;
9801 /* offset[19] = insn[11]. */
9802 offset |= (insn & (1 << 11)) << 8;
9803
9804 /* jump to the offset */
b0109805 9805 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
9806 }
9807 } else {
9808 /* Data processing immediate. */
9809 if (insn & (1 << 25)) {
9810 if (insn & (1 << 24)) {
9811 if (insn & (1 << 20))
9812 goto illegal_op;
9813 /* Bitfield/Saturate. */
9814 op = (insn >> 21) & 7;
9815 imm = insn & 0x1f;
9816 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 9817 if (rn == 15) {
7d1b0095 9818 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
9819 tcg_gen_movi_i32(tmp, 0);
9820 } else {
9821 tmp = load_reg(s, rn);
9822 }
9ee6e8bb
PB
9823 switch (op) {
9824 case 2: /* Signed bitfield extract. */
9825 imm++;
9826 if (shift + imm > 32)
9827 goto illegal_op;
9828 if (imm < 32)
6ddbc6e4 9829 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
9830 break;
9831 case 6: /* Unsigned bitfield extract. */
9832 imm++;
9833 if (shift + imm > 32)
9834 goto illegal_op;
9835 if (imm < 32)
6ddbc6e4 9836 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
9837 break;
9838 case 3: /* Bitfield insert/clear. */
9839 if (imm < shift)
9840 goto illegal_op;
9841 imm = imm + 1 - shift;
9842 if (imm != 32) {
6ddbc6e4 9843 tmp2 = load_reg(s, rd);
d593c48e 9844 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 9845 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9846 }
9847 break;
9848 case 7:
9849 goto illegal_op;
9850 default: /* Saturate. */
9ee6e8bb
PB
9851 if (shift) {
9852 if (op & 1)
6ddbc6e4 9853 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9854 else
6ddbc6e4 9855 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 9856 }
6ddbc6e4 9857 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
9858 if (op & 4) {
9859 /* Unsigned. */
9ee6e8bb 9860 if ((op & 1) && shift == 0)
9ef39277 9861 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9862 else
9ef39277 9863 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
2c0262af 9864 } else {
9ee6e8bb 9865 /* Signed. */
9ee6e8bb 9866 if ((op & 1) && shift == 0)
9ef39277 9867 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9868 else
9ef39277 9869 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
2c0262af 9870 }
b75263d6 9871 tcg_temp_free_i32(tmp2);
9ee6e8bb 9872 break;
2c0262af 9873 }
6ddbc6e4 9874 store_reg(s, rd, tmp);
9ee6e8bb
PB
9875 } else {
9876 imm = ((insn & 0x04000000) >> 15)
9877 | ((insn & 0x7000) >> 4) | (insn & 0xff);
9878 if (insn & (1 << 22)) {
9879 /* 16-bit immediate. */
9880 imm |= (insn >> 4) & 0xf000;
9881 if (insn & (1 << 23)) {
9882 /* movt */
5e3f878a 9883 tmp = load_reg(s, rd);
86831435 9884 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 9885 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 9886 } else {
9ee6e8bb 9887 /* movw */
7d1b0095 9888 tmp = tcg_temp_new_i32();
5e3f878a 9889 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
9890 }
9891 } else {
9ee6e8bb
PB
9892 /* Add/sub 12-bit immediate. */
9893 if (rn == 15) {
b0109805 9894 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 9895 if (insn & (1 << 23))
b0109805 9896 offset -= imm;
9ee6e8bb 9897 else
b0109805 9898 offset += imm;
7d1b0095 9899 tmp = tcg_temp_new_i32();
5e3f878a 9900 tcg_gen_movi_i32(tmp, offset);
2c0262af 9901 } else {
5e3f878a 9902 tmp = load_reg(s, rn);
9ee6e8bb 9903 if (insn & (1 << 23))
5e3f878a 9904 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 9905 else
5e3f878a 9906 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 9907 }
9ee6e8bb 9908 }
5e3f878a 9909 store_reg(s, rd, tmp);
191abaa2 9910 }
9ee6e8bb
PB
9911 } else {
9912 int shifter_out = 0;
9913 /* modified 12-bit immediate. */
9914 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
9915 imm = (insn & 0xff);
9916 switch (shift) {
9917 case 0: /* XY */
9918 /* Nothing to do. */
9919 break;
9920 case 1: /* 00XY00XY */
9921 imm |= imm << 16;
9922 break;
9923 case 2: /* XY00XY00 */
9924 imm |= imm << 16;
9925 imm <<= 8;
9926 break;
9927 case 3: /* XYXYXYXY */
9928 imm |= imm << 16;
9929 imm |= imm << 8;
9930 break;
9931 default: /* Rotated constant. */
9932 shift = (shift << 1) | (imm >> 7);
9933 imm |= 0x80;
9934 imm = imm << (32 - shift);
9935 shifter_out = 1;
9936 break;
b5ff1b31 9937 }
7d1b0095 9938 tmp2 = tcg_temp_new_i32();
3174f8e9 9939 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 9940 rn = (insn >> 16) & 0xf;
3174f8e9 9941 if (rn == 15) {
7d1b0095 9942 tmp = tcg_temp_new_i32();
3174f8e9
FN
9943 tcg_gen_movi_i32(tmp, 0);
9944 } else {
9945 tmp = load_reg(s, rn);
9946 }
9ee6e8bb
PB
9947 op = (insn >> 21) & 0xf;
9948 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 9949 shifter_out, tmp, tmp2))
9ee6e8bb 9950 goto illegal_op;
7d1b0095 9951 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9952 rd = (insn >> 8) & 0xf;
9953 if (rd != 15) {
3174f8e9
FN
9954 store_reg(s, rd, tmp);
9955 } else {
7d1b0095 9956 tcg_temp_free_i32(tmp);
2c0262af 9957 }
2c0262af 9958 }
9ee6e8bb
PB
9959 }
9960 break;
9961 case 12: /* Load/store single data item. */
9962 {
9963 int postinc = 0;
9964 int writeback = 0;
a99caa48 9965 int memidx;
9ee6e8bb
PB
9966 if ((insn & 0x01100000) == 0x01000000) {
9967 if (disas_neon_ls_insn(env, s, insn))
c1713132 9968 goto illegal_op;
9ee6e8bb
PB
9969 break;
9970 }
a2fdc890
PM
9971 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
9972 if (rs == 15) {
9973 if (!(insn & (1 << 20))) {
9974 goto illegal_op;
9975 }
9976 if (op != 2) {
9977 /* Byte or halfword load space with dest == r15 : memory hints.
9978 * Catch them early so we don't emit pointless addressing code.
9979 * This space is a mix of:
9980 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
9981 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
9982 * cores)
9983 * unallocated hints, which must be treated as NOPs
9984 * UNPREDICTABLE space, which we NOP or UNDEF depending on
9985 * which is easiest for the decoding logic
9986 * Some space which must UNDEF
9987 */
9988 int op1 = (insn >> 23) & 3;
9989 int op2 = (insn >> 6) & 0x3f;
9990 if (op & 2) {
9991 goto illegal_op;
9992 }
9993 if (rn == 15) {
02afbf64
PM
9994 /* UNPREDICTABLE, unallocated hint or
9995 * PLD/PLDW/PLI (literal)
9996 */
a2fdc890
PM
9997 return 0;
9998 }
9999 if (op1 & 1) {
02afbf64 10000 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10001 }
10002 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 10003 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10004 }
10005 /* UNDEF space, or an UNPREDICTABLE */
10006 return 1;
10007 }
10008 }
a99caa48 10009 memidx = get_mem_index(s);
9ee6e8bb 10010 if (rn == 15) {
7d1b0095 10011 addr = tcg_temp_new_i32();
9ee6e8bb
PB
10012 /* PC relative. */
10013 /* s->pc has already been incremented by 4. */
10014 imm = s->pc & 0xfffffffc;
10015 if (insn & (1 << 23))
10016 imm += insn & 0xfff;
10017 else
10018 imm -= insn & 0xfff;
b0109805 10019 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 10020 } else {
b0109805 10021 addr = load_reg(s, rn);
9ee6e8bb
PB
10022 if (insn & (1 << 23)) {
10023 /* Positive offset. */
10024 imm = insn & 0xfff;
b0109805 10025 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 10026 } else {
9ee6e8bb 10027 imm = insn & 0xff;
2a0308c5
PM
10028 switch ((insn >> 8) & 0xf) {
10029 case 0x0: /* Shifted Register. */
9ee6e8bb 10030 shift = (insn >> 4) & 0xf;
2a0308c5
PM
10031 if (shift > 3) {
10032 tcg_temp_free_i32(addr);
18c9b560 10033 goto illegal_op;
2a0308c5 10034 }
b26eefb6 10035 tmp = load_reg(s, rm);
9ee6e8bb 10036 if (shift)
b26eefb6 10037 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 10038 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10039 tcg_temp_free_i32(tmp);
9ee6e8bb 10040 break;
2a0308c5 10041 case 0xc: /* Negative offset. */
b0109805 10042 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 10043 break;
2a0308c5 10044 case 0xe: /* User privilege. */
b0109805 10045 tcg_gen_addi_i32(addr, addr, imm);
a99caa48 10046 memidx = MMU_USER_IDX;
9ee6e8bb 10047 break;
2a0308c5 10048 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
10049 imm = -imm;
10050 /* Fall through. */
2a0308c5 10051 case 0xb: /* Post-increment. */
9ee6e8bb
PB
10052 postinc = 1;
10053 writeback = 1;
10054 break;
2a0308c5 10055 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
10056 imm = -imm;
10057 /* Fall through. */
2a0308c5 10058 case 0xf: /* Pre-increment. */
b0109805 10059 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
10060 writeback = 1;
10061 break;
10062 default:
2a0308c5 10063 tcg_temp_free_i32(addr);
b7bcbe95 10064 goto illegal_op;
9ee6e8bb
PB
10065 }
10066 }
10067 }
9ee6e8bb
PB
10068 if (insn & (1 << 20)) {
10069 /* Load. */
5a839c0d 10070 tmp = tcg_temp_new_i32();
a2fdc890 10071 switch (op) {
5a839c0d 10072 case 0:
a99caa48 10073 gen_aa32_ld8u(tmp, addr, memidx);
5a839c0d
PM
10074 break;
10075 case 4:
a99caa48 10076 gen_aa32_ld8s(tmp, addr, memidx);
5a839c0d
PM
10077 break;
10078 case 1:
a99caa48 10079 gen_aa32_ld16u(tmp, addr, memidx);
5a839c0d
PM
10080 break;
10081 case 5:
a99caa48 10082 gen_aa32_ld16s(tmp, addr, memidx);
5a839c0d
PM
10083 break;
10084 case 2:
a99caa48 10085 gen_aa32_ld32u(tmp, addr, memidx);
5a839c0d 10086 break;
2a0308c5 10087 default:
5a839c0d 10088 tcg_temp_free_i32(tmp);
2a0308c5
PM
10089 tcg_temp_free_i32(addr);
10090 goto illegal_op;
a2fdc890
PM
10091 }
10092 if (rs == 15) {
10093 gen_bx(s, tmp);
9ee6e8bb 10094 } else {
a2fdc890 10095 store_reg(s, rs, tmp);
9ee6e8bb
PB
10096 }
10097 } else {
10098 /* Store. */
b0109805 10099 tmp = load_reg(s, rs);
9ee6e8bb 10100 switch (op) {
5a839c0d 10101 case 0:
a99caa48 10102 gen_aa32_st8(tmp, addr, memidx);
5a839c0d
PM
10103 break;
10104 case 1:
a99caa48 10105 gen_aa32_st16(tmp, addr, memidx);
5a839c0d
PM
10106 break;
10107 case 2:
a99caa48 10108 gen_aa32_st32(tmp, addr, memidx);
5a839c0d 10109 break;
2a0308c5 10110 default:
5a839c0d 10111 tcg_temp_free_i32(tmp);
2a0308c5
PM
10112 tcg_temp_free_i32(addr);
10113 goto illegal_op;
b7bcbe95 10114 }
5a839c0d 10115 tcg_temp_free_i32(tmp);
2c0262af 10116 }
9ee6e8bb 10117 if (postinc)
b0109805
PB
10118 tcg_gen_addi_i32(addr, addr, imm);
10119 if (writeback) {
10120 store_reg(s, rn, addr);
10121 } else {
7d1b0095 10122 tcg_temp_free_i32(addr);
b0109805 10123 }
9ee6e8bb
PB
10124 }
10125 break;
10126 default:
10127 goto illegal_op;
2c0262af 10128 }
9ee6e8bb
PB
10129 return 0;
10130illegal_op:
10131 return 1;
2c0262af
FB
10132}
10133
0ecb72a5 10134static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
10135{
10136 uint32_t val, insn, op, rm, rn, rd, shift, cond;
10137 int32_t offset;
10138 int i;
39d5492a
PM
10139 TCGv_i32 tmp;
10140 TCGv_i32 tmp2;
10141 TCGv_i32 addr;
99c475ab 10142
9ee6e8bb
PB
10143 if (s->condexec_mask) {
10144 cond = s->condexec_cond;
bedd2912
JB
10145 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
10146 s->condlabel = gen_new_label();
39fb730a 10147 arm_gen_test_cc(cond ^ 1, s->condlabel);
bedd2912
JB
10148 s->condjmp = 1;
10149 }
9ee6e8bb
PB
10150 }
10151
d31dd73e 10152 insn = arm_lduw_code(env, s->pc, s->bswap_code);
99c475ab 10153 s->pc += 2;
b5ff1b31 10154
99c475ab
FB
10155 switch (insn >> 12) {
10156 case 0: case 1:
396e467c 10157
99c475ab
FB
10158 rd = insn & 7;
10159 op = (insn >> 11) & 3;
10160 if (op == 3) {
10161 /* add/subtract */
10162 rn = (insn >> 3) & 7;
396e467c 10163 tmp = load_reg(s, rn);
99c475ab
FB
10164 if (insn & (1 << 10)) {
10165 /* immediate */
7d1b0095 10166 tmp2 = tcg_temp_new_i32();
396e467c 10167 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
10168 } else {
10169 /* reg */
10170 rm = (insn >> 6) & 7;
396e467c 10171 tmp2 = load_reg(s, rm);
99c475ab 10172 }
9ee6e8bb
PB
10173 if (insn & (1 << 9)) {
10174 if (s->condexec_mask)
396e467c 10175 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10176 else
72485ec4 10177 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
10178 } else {
10179 if (s->condexec_mask)
396e467c 10180 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 10181 else
72485ec4 10182 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 10183 }
7d1b0095 10184 tcg_temp_free_i32(tmp2);
396e467c 10185 store_reg(s, rd, tmp);
99c475ab
FB
10186 } else {
10187 /* shift immediate */
10188 rm = (insn >> 3) & 7;
10189 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
10190 tmp = load_reg(s, rm);
10191 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10192 if (!s->condexec_mask)
10193 gen_logic_CC(tmp);
10194 store_reg(s, rd, tmp);
99c475ab
FB
10195 }
10196 break;
10197 case 2: case 3:
10198 /* arithmetic large immediate */
10199 op = (insn >> 11) & 3;
10200 rd = (insn >> 8) & 0x7;
396e467c 10201 if (op == 0) { /* mov */
7d1b0095 10202 tmp = tcg_temp_new_i32();
396e467c 10203 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 10204 if (!s->condexec_mask)
396e467c
FN
10205 gen_logic_CC(tmp);
10206 store_reg(s, rd, tmp);
10207 } else {
10208 tmp = load_reg(s, rd);
7d1b0095 10209 tmp2 = tcg_temp_new_i32();
396e467c
FN
10210 tcg_gen_movi_i32(tmp2, insn & 0xff);
10211 switch (op) {
10212 case 1: /* cmp */
72485ec4 10213 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10214 tcg_temp_free_i32(tmp);
10215 tcg_temp_free_i32(tmp2);
396e467c
FN
10216 break;
10217 case 2: /* add */
10218 if (s->condexec_mask)
10219 tcg_gen_add_i32(tmp, tmp, tmp2);
10220 else
72485ec4 10221 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 10222 tcg_temp_free_i32(tmp2);
396e467c
FN
10223 store_reg(s, rd, tmp);
10224 break;
10225 case 3: /* sub */
10226 if (s->condexec_mask)
10227 tcg_gen_sub_i32(tmp, tmp, tmp2);
10228 else
72485ec4 10229 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 10230 tcg_temp_free_i32(tmp2);
396e467c
FN
10231 store_reg(s, rd, tmp);
10232 break;
10233 }
99c475ab 10234 }
99c475ab
FB
10235 break;
10236 case 4:
10237 if (insn & (1 << 11)) {
10238 rd = (insn >> 8) & 7;
5899f386
FB
10239 /* load pc-relative. Bit 1 of PC is ignored. */
10240 val = s->pc + 2 + ((insn & 0xff) * 4);
10241 val &= ~(uint32_t)2;
7d1b0095 10242 addr = tcg_temp_new_i32();
b0109805 10243 tcg_gen_movi_i32(addr, val);
c40c8556 10244 tmp = tcg_temp_new_i32();
6ce2faf4 10245 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
7d1b0095 10246 tcg_temp_free_i32(addr);
b0109805 10247 store_reg(s, rd, tmp);
99c475ab
FB
10248 break;
10249 }
10250 if (insn & (1 << 10)) {
10251 /* data processing extended or blx */
10252 rd = (insn & 7) | ((insn >> 4) & 8);
10253 rm = (insn >> 3) & 0xf;
10254 op = (insn >> 8) & 3;
10255 switch (op) {
10256 case 0: /* add */
396e467c
FN
10257 tmp = load_reg(s, rd);
10258 tmp2 = load_reg(s, rm);
10259 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10260 tcg_temp_free_i32(tmp2);
396e467c 10261 store_reg(s, rd, tmp);
99c475ab
FB
10262 break;
10263 case 1: /* cmp */
396e467c
FN
10264 tmp = load_reg(s, rd);
10265 tmp2 = load_reg(s, rm);
72485ec4 10266 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10267 tcg_temp_free_i32(tmp2);
10268 tcg_temp_free_i32(tmp);
99c475ab
FB
10269 break;
10270 case 2: /* mov/cpy */
396e467c
FN
10271 tmp = load_reg(s, rm);
10272 store_reg(s, rd, tmp);
99c475ab
FB
10273 break;
10274 case 3:/* branch [and link] exchange thumb register */
b0109805 10275 tmp = load_reg(s, rm);
99c475ab 10276 if (insn & (1 << 7)) {
be5e7a76 10277 ARCH(5);
99c475ab 10278 val = (uint32_t)s->pc | 1;
7d1b0095 10279 tmp2 = tcg_temp_new_i32();
b0109805
PB
10280 tcg_gen_movi_i32(tmp2, val);
10281 store_reg(s, 14, tmp2);
99c475ab 10282 }
be5e7a76 10283 /* already thumb, no need to check */
d9ba4830 10284 gen_bx(s, tmp);
99c475ab
FB
10285 break;
10286 }
10287 break;
10288 }
10289
10290 /* data processing register */
10291 rd = insn & 7;
10292 rm = (insn >> 3) & 7;
10293 op = (insn >> 6) & 0xf;
10294 if (op == 2 || op == 3 || op == 4 || op == 7) {
10295 /* the shift/rotate ops want the operands backwards */
10296 val = rm;
10297 rm = rd;
10298 rd = val;
10299 val = 1;
10300 } else {
10301 val = 0;
10302 }
10303
396e467c 10304 if (op == 9) { /* neg */
7d1b0095 10305 tmp = tcg_temp_new_i32();
396e467c
FN
10306 tcg_gen_movi_i32(tmp, 0);
10307 } else if (op != 0xf) { /* mvn doesn't read its first operand */
10308 tmp = load_reg(s, rd);
10309 } else {
39d5492a 10310 TCGV_UNUSED_I32(tmp);
396e467c 10311 }
99c475ab 10312
396e467c 10313 tmp2 = load_reg(s, rm);
5899f386 10314 switch (op) {
99c475ab 10315 case 0x0: /* and */
396e467c 10316 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 10317 if (!s->condexec_mask)
396e467c 10318 gen_logic_CC(tmp);
99c475ab
FB
10319 break;
10320 case 0x1: /* eor */
396e467c 10321 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 10322 if (!s->condexec_mask)
396e467c 10323 gen_logic_CC(tmp);
99c475ab
FB
10324 break;
10325 case 0x2: /* lsl */
9ee6e8bb 10326 if (s->condexec_mask) {
365af80e 10327 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 10328 } else {
9ef39277 10329 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10330 gen_logic_CC(tmp2);
9ee6e8bb 10331 }
99c475ab
FB
10332 break;
10333 case 0x3: /* lsr */
9ee6e8bb 10334 if (s->condexec_mask) {
365af80e 10335 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 10336 } else {
9ef39277 10337 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10338 gen_logic_CC(tmp2);
9ee6e8bb 10339 }
99c475ab
FB
10340 break;
10341 case 0x4: /* asr */
9ee6e8bb 10342 if (s->condexec_mask) {
365af80e 10343 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 10344 } else {
9ef39277 10345 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10346 gen_logic_CC(tmp2);
9ee6e8bb 10347 }
99c475ab
FB
10348 break;
10349 case 0x5: /* adc */
49b4c31e 10350 if (s->condexec_mask) {
396e467c 10351 gen_adc(tmp, tmp2);
49b4c31e
RH
10352 } else {
10353 gen_adc_CC(tmp, tmp, tmp2);
10354 }
99c475ab
FB
10355 break;
10356 case 0x6: /* sbc */
2de68a49 10357 if (s->condexec_mask) {
396e467c 10358 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
10359 } else {
10360 gen_sbc_CC(tmp, tmp, tmp2);
10361 }
99c475ab
FB
10362 break;
10363 case 0x7: /* ror */
9ee6e8bb 10364 if (s->condexec_mask) {
f669df27
AJ
10365 tcg_gen_andi_i32(tmp, tmp, 0x1f);
10366 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 10367 } else {
9ef39277 10368 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10369 gen_logic_CC(tmp2);
9ee6e8bb 10370 }
99c475ab
FB
10371 break;
10372 case 0x8: /* tst */
396e467c
FN
10373 tcg_gen_and_i32(tmp, tmp, tmp2);
10374 gen_logic_CC(tmp);
99c475ab 10375 rd = 16;
5899f386 10376 break;
99c475ab 10377 case 0x9: /* neg */
9ee6e8bb 10378 if (s->condexec_mask)
396e467c 10379 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 10380 else
72485ec4 10381 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
10382 break;
10383 case 0xa: /* cmp */
72485ec4 10384 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
10385 rd = 16;
10386 break;
10387 case 0xb: /* cmn */
72485ec4 10388 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
10389 rd = 16;
10390 break;
10391 case 0xc: /* orr */
396e467c 10392 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 10393 if (!s->condexec_mask)
396e467c 10394 gen_logic_CC(tmp);
99c475ab
FB
10395 break;
10396 case 0xd: /* mul */
7b2919a0 10397 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 10398 if (!s->condexec_mask)
396e467c 10399 gen_logic_CC(tmp);
99c475ab
FB
10400 break;
10401 case 0xe: /* bic */
f669df27 10402 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 10403 if (!s->condexec_mask)
396e467c 10404 gen_logic_CC(tmp);
99c475ab
FB
10405 break;
10406 case 0xf: /* mvn */
396e467c 10407 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 10408 if (!s->condexec_mask)
396e467c 10409 gen_logic_CC(tmp2);
99c475ab 10410 val = 1;
5899f386 10411 rm = rd;
99c475ab
FB
10412 break;
10413 }
10414 if (rd != 16) {
396e467c
FN
10415 if (val) {
10416 store_reg(s, rm, tmp2);
10417 if (op != 0xf)
7d1b0095 10418 tcg_temp_free_i32(tmp);
396e467c
FN
10419 } else {
10420 store_reg(s, rd, tmp);
7d1b0095 10421 tcg_temp_free_i32(tmp2);
396e467c
FN
10422 }
10423 } else {
7d1b0095
PM
10424 tcg_temp_free_i32(tmp);
10425 tcg_temp_free_i32(tmp2);
99c475ab
FB
10426 }
10427 break;
10428
10429 case 5:
10430 /* load/store register offset. */
10431 rd = insn & 7;
10432 rn = (insn >> 3) & 7;
10433 rm = (insn >> 6) & 7;
10434 op = (insn >> 9) & 7;
b0109805 10435 addr = load_reg(s, rn);
b26eefb6 10436 tmp = load_reg(s, rm);
b0109805 10437 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10438 tcg_temp_free_i32(tmp);
99c475ab 10439
c40c8556 10440 if (op < 3) { /* store */
b0109805 10441 tmp = load_reg(s, rd);
c40c8556
PM
10442 } else {
10443 tmp = tcg_temp_new_i32();
10444 }
99c475ab
FB
10445
10446 switch (op) {
10447 case 0: /* str */
6ce2faf4 10448 gen_aa32_st32(tmp, addr, get_mem_index(s));
99c475ab
FB
10449 break;
10450 case 1: /* strh */
6ce2faf4 10451 gen_aa32_st16(tmp, addr, get_mem_index(s));
99c475ab
FB
10452 break;
10453 case 2: /* strb */
6ce2faf4 10454 gen_aa32_st8(tmp, addr, get_mem_index(s));
99c475ab
FB
10455 break;
10456 case 3: /* ldrsb */
6ce2faf4 10457 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
99c475ab
FB
10458 break;
10459 case 4: /* ldr */
6ce2faf4 10460 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
99c475ab
FB
10461 break;
10462 case 5: /* ldrh */
6ce2faf4 10463 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
99c475ab
FB
10464 break;
10465 case 6: /* ldrb */
6ce2faf4 10466 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
99c475ab
FB
10467 break;
10468 case 7: /* ldrsh */
6ce2faf4 10469 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
99c475ab
FB
10470 break;
10471 }
c40c8556 10472 if (op >= 3) { /* load */
b0109805 10473 store_reg(s, rd, tmp);
c40c8556
PM
10474 } else {
10475 tcg_temp_free_i32(tmp);
10476 }
7d1b0095 10477 tcg_temp_free_i32(addr);
99c475ab
FB
10478 break;
10479
10480 case 6:
10481 /* load/store word immediate offset */
10482 rd = insn & 7;
10483 rn = (insn >> 3) & 7;
b0109805 10484 addr = load_reg(s, rn);
99c475ab 10485 val = (insn >> 4) & 0x7c;
b0109805 10486 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10487
10488 if (insn & (1 << 11)) {
10489 /* load */
c40c8556 10490 tmp = tcg_temp_new_i32();
6ce2faf4 10491 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 10492 store_reg(s, rd, tmp);
99c475ab
FB
10493 } else {
10494 /* store */
b0109805 10495 tmp = load_reg(s, rd);
6ce2faf4 10496 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10497 tcg_temp_free_i32(tmp);
99c475ab 10498 }
7d1b0095 10499 tcg_temp_free_i32(addr);
99c475ab
FB
10500 break;
10501
10502 case 7:
10503 /* load/store byte immediate offset */
10504 rd = insn & 7;
10505 rn = (insn >> 3) & 7;
b0109805 10506 addr = load_reg(s, rn);
99c475ab 10507 val = (insn >> 6) & 0x1f;
b0109805 10508 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10509
10510 if (insn & (1 << 11)) {
10511 /* load */
c40c8556 10512 tmp = tcg_temp_new_i32();
6ce2faf4 10513 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
b0109805 10514 store_reg(s, rd, tmp);
99c475ab
FB
10515 } else {
10516 /* store */
b0109805 10517 tmp = load_reg(s, rd);
6ce2faf4 10518 gen_aa32_st8(tmp, addr, get_mem_index(s));
c40c8556 10519 tcg_temp_free_i32(tmp);
99c475ab 10520 }
7d1b0095 10521 tcg_temp_free_i32(addr);
99c475ab
FB
10522 break;
10523
10524 case 8:
10525 /* load/store halfword immediate offset */
10526 rd = insn & 7;
10527 rn = (insn >> 3) & 7;
b0109805 10528 addr = load_reg(s, rn);
99c475ab 10529 val = (insn >> 5) & 0x3e;
b0109805 10530 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10531
10532 if (insn & (1 << 11)) {
10533 /* load */
c40c8556 10534 tmp = tcg_temp_new_i32();
6ce2faf4 10535 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
b0109805 10536 store_reg(s, rd, tmp);
99c475ab
FB
10537 } else {
10538 /* store */
b0109805 10539 tmp = load_reg(s, rd);
6ce2faf4 10540 gen_aa32_st16(tmp, addr, get_mem_index(s));
c40c8556 10541 tcg_temp_free_i32(tmp);
99c475ab 10542 }
7d1b0095 10543 tcg_temp_free_i32(addr);
99c475ab
FB
10544 break;
10545
10546 case 9:
10547 /* load/store from stack */
10548 rd = (insn >> 8) & 7;
b0109805 10549 addr = load_reg(s, 13);
99c475ab 10550 val = (insn & 0xff) * 4;
b0109805 10551 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10552
10553 if (insn & (1 << 11)) {
10554 /* load */
c40c8556 10555 tmp = tcg_temp_new_i32();
6ce2faf4 10556 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 10557 store_reg(s, rd, tmp);
99c475ab
FB
10558 } else {
10559 /* store */
b0109805 10560 tmp = load_reg(s, rd);
6ce2faf4 10561 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10562 tcg_temp_free_i32(tmp);
99c475ab 10563 }
7d1b0095 10564 tcg_temp_free_i32(addr);
99c475ab
FB
10565 break;
10566
10567 case 10:
10568 /* add to high reg */
10569 rd = (insn >> 8) & 7;
5899f386
FB
10570 if (insn & (1 << 11)) {
10571 /* SP */
5e3f878a 10572 tmp = load_reg(s, 13);
5899f386
FB
10573 } else {
10574 /* PC. bit 1 is ignored. */
7d1b0095 10575 tmp = tcg_temp_new_i32();
5e3f878a 10576 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 10577 }
99c475ab 10578 val = (insn & 0xff) * 4;
5e3f878a
PB
10579 tcg_gen_addi_i32(tmp, tmp, val);
10580 store_reg(s, rd, tmp);
99c475ab
FB
10581 break;
10582
10583 case 11:
10584 /* misc */
10585 op = (insn >> 8) & 0xf;
10586 switch (op) {
10587 case 0:
10588 /* adjust stack pointer */
b26eefb6 10589 tmp = load_reg(s, 13);
99c475ab
FB
10590 val = (insn & 0x7f) * 4;
10591 if (insn & (1 << 7))
6a0d8a1d 10592 val = -(int32_t)val;
b26eefb6
PB
10593 tcg_gen_addi_i32(tmp, tmp, val);
10594 store_reg(s, 13, tmp);
99c475ab
FB
10595 break;
10596
9ee6e8bb
PB
10597 case 2: /* sign/zero extend. */
10598 ARCH(6);
10599 rd = insn & 7;
10600 rm = (insn >> 3) & 7;
b0109805 10601 tmp = load_reg(s, rm);
9ee6e8bb 10602 switch ((insn >> 6) & 3) {
b0109805
PB
10603 case 0: gen_sxth(tmp); break;
10604 case 1: gen_sxtb(tmp); break;
10605 case 2: gen_uxth(tmp); break;
10606 case 3: gen_uxtb(tmp); break;
9ee6e8bb 10607 }
b0109805 10608 store_reg(s, rd, tmp);
9ee6e8bb 10609 break;
99c475ab
FB
10610 case 4: case 5: case 0xc: case 0xd:
10611 /* push/pop */
b0109805 10612 addr = load_reg(s, 13);
5899f386
FB
10613 if (insn & (1 << 8))
10614 offset = 4;
99c475ab 10615 else
5899f386
FB
10616 offset = 0;
10617 for (i = 0; i < 8; i++) {
10618 if (insn & (1 << i))
10619 offset += 4;
10620 }
10621 if ((insn & (1 << 11)) == 0) {
b0109805 10622 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 10623 }
99c475ab
FB
10624 for (i = 0; i < 8; i++) {
10625 if (insn & (1 << i)) {
10626 if (insn & (1 << 11)) {
10627 /* pop */
c40c8556 10628 tmp = tcg_temp_new_i32();
6ce2faf4 10629 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 10630 store_reg(s, i, tmp);
99c475ab
FB
10631 } else {
10632 /* push */
b0109805 10633 tmp = load_reg(s, i);
6ce2faf4 10634 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10635 tcg_temp_free_i32(tmp);
99c475ab 10636 }
5899f386 10637 /* advance to the next address. */
b0109805 10638 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
10639 }
10640 }
39d5492a 10641 TCGV_UNUSED_I32(tmp);
99c475ab
FB
10642 if (insn & (1 << 8)) {
10643 if (insn & (1 << 11)) {
10644 /* pop pc */
c40c8556 10645 tmp = tcg_temp_new_i32();
6ce2faf4 10646 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
99c475ab
FB
10647 /* don't set the pc until the rest of the instruction
10648 has completed */
10649 } else {
10650 /* push lr */
b0109805 10651 tmp = load_reg(s, 14);
6ce2faf4 10652 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10653 tcg_temp_free_i32(tmp);
99c475ab 10654 }
b0109805 10655 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 10656 }
5899f386 10657 if ((insn & (1 << 11)) == 0) {
b0109805 10658 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 10659 }
99c475ab 10660 /* write back the new stack pointer */
b0109805 10661 store_reg(s, 13, addr);
99c475ab 10662 /* set the new PC value */
be5e7a76
DES
10663 if ((insn & 0x0900) == 0x0900) {
10664 store_reg_from_load(env, s, 15, tmp);
10665 }
99c475ab
FB
10666 break;
10667
9ee6e8bb
PB
10668 case 1: case 3: case 9: case 11: /* czb */
10669 rm = insn & 7;
d9ba4830 10670 tmp = load_reg(s, rm);
9ee6e8bb
PB
10671 s->condlabel = gen_new_label();
10672 s->condjmp = 1;
10673 if (insn & (1 << 11))
cb63669a 10674 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 10675 else
cb63669a 10676 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 10677 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10678 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
10679 val = (uint32_t)s->pc + 2;
10680 val += offset;
10681 gen_jmp(s, val);
10682 break;
10683
10684 case 15: /* IT, nop-hint. */
10685 if ((insn & 0xf) == 0) {
10686 gen_nop_hint(s, (insn >> 4) & 0xf);
10687 break;
10688 }
10689 /* If Then. */
10690 s->condexec_cond = (insn >> 4) & 0xe;
10691 s->condexec_mask = insn & 0x1f;
10692 /* No actual code generated for this insn, just setup state. */
10693 break;
10694
06c949e6 10695 case 0xe: /* bkpt */
d4a2dc67
PM
10696 {
10697 int imm8 = extract32(insn, 0, 8);
be5e7a76 10698 ARCH(5);
d4a2dc67 10699 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true));
06c949e6 10700 break;
d4a2dc67 10701 }
06c949e6 10702
9ee6e8bb
PB
10703 case 0xa: /* rev */
10704 ARCH(6);
10705 rn = (insn >> 3) & 0x7;
10706 rd = insn & 0x7;
b0109805 10707 tmp = load_reg(s, rn);
9ee6e8bb 10708 switch ((insn >> 6) & 3) {
66896cb8 10709 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
10710 case 1: gen_rev16(tmp); break;
10711 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
10712 default: goto illegal_op;
10713 }
b0109805 10714 store_reg(s, rd, tmp);
9ee6e8bb
PB
10715 break;
10716
d9e028c1
PM
10717 case 6:
10718 switch ((insn >> 5) & 7) {
10719 case 2:
10720 /* setend */
10721 ARCH(6);
10962fd5
PM
10722 if (((insn >> 3) & 1) != s->bswap_code) {
10723 /* Dynamic endianness switching not implemented. */
e0c270d9 10724 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
d9e028c1
PM
10725 goto illegal_op;
10726 }
9ee6e8bb 10727 break;
d9e028c1
PM
10728 case 3:
10729 /* cps */
10730 ARCH(6);
10731 if (IS_USER(s)) {
10732 break;
8984bd2e 10733 }
d9e028c1
PM
10734 if (IS_M(env)) {
10735 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
10736 /* FAULTMASK */
10737 if (insn & 1) {
10738 addr = tcg_const_i32(19);
10739 gen_helper_v7m_msr(cpu_env, addr, tmp);
10740 tcg_temp_free_i32(addr);
10741 }
10742 /* PRIMASK */
10743 if (insn & 2) {
10744 addr = tcg_const_i32(16);
10745 gen_helper_v7m_msr(cpu_env, addr, tmp);
10746 tcg_temp_free_i32(addr);
10747 }
10748 tcg_temp_free_i32(tmp);
10749 gen_lookup_tb(s);
10750 } else {
10751 if (insn & (1 << 4)) {
10752 shift = CPSR_A | CPSR_I | CPSR_F;
10753 } else {
10754 shift = 0;
10755 }
10756 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 10757 }
d9e028c1
PM
10758 break;
10759 default:
10760 goto undef;
9ee6e8bb
PB
10761 }
10762 break;
10763
99c475ab
FB
10764 default:
10765 goto undef;
10766 }
10767 break;
10768
10769 case 12:
a7d3970d 10770 {
99c475ab 10771 /* load/store multiple */
39d5492a
PM
10772 TCGv_i32 loaded_var;
10773 TCGV_UNUSED_I32(loaded_var);
99c475ab 10774 rn = (insn >> 8) & 0x7;
b0109805 10775 addr = load_reg(s, rn);
99c475ab
FB
10776 for (i = 0; i < 8; i++) {
10777 if (insn & (1 << i)) {
99c475ab
FB
10778 if (insn & (1 << 11)) {
10779 /* load */
c40c8556 10780 tmp = tcg_temp_new_i32();
6ce2faf4 10781 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
a7d3970d
PM
10782 if (i == rn) {
10783 loaded_var = tmp;
10784 } else {
10785 store_reg(s, i, tmp);
10786 }
99c475ab
FB
10787 } else {
10788 /* store */
b0109805 10789 tmp = load_reg(s, i);
6ce2faf4 10790 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10791 tcg_temp_free_i32(tmp);
99c475ab 10792 }
5899f386 10793 /* advance to the next address */
b0109805 10794 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
10795 }
10796 }
b0109805 10797 if ((insn & (1 << rn)) == 0) {
a7d3970d 10798 /* base reg not in list: base register writeback */
b0109805
PB
10799 store_reg(s, rn, addr);
10800 } else {
a7d3970d
PM
10801 /* base reg in list: if load, complete it now */
10802 if (insn & (1 << 11)) {
10803 store_reg(s, rn, loaded_var);
10804 }
7d1b0095 10805 tcg_temp_free_i32(addr);
b0109805 10806 }
99c475ab 10807 break;
a7d3970d 10808 }
99c475ab
FB
10809 case 13:
10810 /* conditional branch or swi */
10811 cond = (insn >> 8) & 0xf;
10812 if (cond == 0xe)
10813 goto undef;
10814
10815 if (cond == 0xf) {
10816 /* swi */
eaed129d 10817 gen_set_pc_im(s, s->pc);
d4a2dc67 10818 s->svc_imm = extract32(insn, 0, 8);
9ee6e8bb 10819 s->is_jmp = DISAS_SWI;
99c475ab
FB
10820 break;
10821 }
10822 /* generate a conditional jump to next instruction */
e50e6a20 10823 s->condlabel = gen_new_label();
39fb730a 10824 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 10825 s->condjmp = 1;
99c475ab
FB
10826
10827 /* jump to the offset */
5899f386 10828 val = (uint32_t)s->pc + 2;
99c475ab 10829 offset = ((int32_t)insn << 24) >> 24;
5899f386 10830 val += offset << 1;
8aaca4c0 10831 gen_jmp(s, val);
99c475ab
FB
10832 break;
10833
10834 case 14:
358bf29e 10835 if (insn & (1 << 11)) {
9ee6e8bb
PB
10836 if (disas_thumb2_insn(env, s, insn))
10837 goto undef32;
358bf29e
PB
10838 break;
10839 }
9ee6e8bb 10840 /* unconditional branch */
99c475ab
FB
10841 val = (uint32_t)s->pc;
10842 offset = ((int32_t)insn << 21) >> 21;
10843 val += (offset << 1) + 2;
8aaca4c0 10844 gen_jmp(s, val);
99c475ab
FB
10845 break;
10846
10847 case 15:
9ee6e8bb 10848 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 10849 goto undef32;
9ee6e8bb 10850 break;
99c475ab
FB
10851 }
10852 return;
9ee6e8bb 10853undef32:
d4a2dc67 10854 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
9ee6e8bb
PB
10855 return;
10856illegal_op:
99c475ab 10857undef:
d4a2dc67 10858 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized());
99c475ab
FB
10859}
10860
2c0262af
FB
10861/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
10862 basic block 'tb'. If search_pc is TRUE, also generate PC
10863 information for each intermediate instruction. */
5639c3f2 10864static inline void gen_intermediate_code_internal(ARMCPU *cpu,
2cfc5f17 10865 TranslationBlock *tb,
5639c3f2 10866 bool search_pc)
2c0262af 10867{
ed2803da 10868 CPUState *cs = CPU(cpu);
5639c3f2 10869 CPUARMState *env = &cpu->env;
2c0262af 10870 DisasContext dc1, *dc = &dc1;
a1d1bb31 10871 CPUBreakpoint *bp;
2c0262af
FB
10872 uint16_t *gen_opc_end;
10873 int j, lj;
0fa85d43 10874 target_ulong pc_start;
0a2461fa 10875 target_ulong next_page_start;
2e70f6ef
PB
10876 int num_insns;
10877 int max_insns;
3b46e624 10878
2c0262af 10879 /* generate intermediate code */
40f860cd
PM
10880
10881 /* The A64 decoder has its own top level loop, because it doesn't need
10882 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
10883 */
10884 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
10885 gen_intermediate_code_internal_a64(cpu, tb, search_pc);
10886 return;
10887 }
10888
0fa85d43 10889 pc_start = tb->pc;
3b46e624 10890
2c0262af
FB
10891 dc->tb = tb;
10892
92414b31 10893 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
10894
10895 dc->is_jmp = DISAS_NEXT;
10896 dc->pc = pc_start;
ed2803da 10897 dc->singlestep_enabled = cs->singlestep_enabled;
e50e6a20 10898 dc->condjmp = 0;
3926cc84 10899
40f860cd
PM
10900 dc->aarch64 = 0;
10901 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
10902 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
10903 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
10904 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
3926cc84 10905#if !defined(CONFIG_USER_ONLY)
40f860cd 10906 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
3926cc84 10907#endif
2c7ffc41 10908 dc->cpacr_fpen = ARM_TBFLAG_CPACR_FPEN(tb->flags);
40f860cd
PM
10909 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
10910 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
10911 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
60322b39
PM
10912 dc->cp_regs = cpu->cp_regs;
10913 dc->current_pl = arm_current_pl(env);
a984e42c 10914 dc->features = env->features;
40f860cd 10915
a7812ae4
PB
10916 cpu_F0s = tcg_temp_new_i32();
10917 cpu_F1s = tcg_temp_new_i32();
10918 cpu_F0d = tcg_temp_new_i64();
10919 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
10920 cpu_V0 = cpu_F0d;
10921 cpu_V1 = cpu_F1d;
e677137d 10922 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 10923 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 10924 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 10925 lj = -1;
2e70f6ef
PB
10926 num_insns = 0;
10927 max_insns = tb->cflags & CF_COUNT_MASK;
10928 if (max_insns == 0)
10929 max_insns = CF_COUNT_MASK;
10930
806f352d 10931 gen_tb_start();
e12ce78d 10932
3849902c
PM
10933 tcg_clear_temp_count();
10934
e12ce78d
PM
10935 /* A note on handling of the condexec (IT) bits:
10936 *
10937 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 10938 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 10939 * (1) if the condexec bits are not already zero then we write
0ecb72a5 10940 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
10941 * to do it at the end of the block. (For example if we don't do this
10942 * it's hard to identify whether we can safely skip writing condexec
10943 * at the end of the TB, which we definitely want to do for the case
10944 * where a TB doesn't do anything with the IT state at all.)
10945 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 10946 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
10947 * This is done both for leaving the TB at the end, and for leaving
10948 * it because of an exception we know will happen, which is done in
10949 * gen_exception_insn(). The latter is necessary because we need to
10950 * leave the TB with the PC/IT state just prior to execution of the
10951 * instruction which caused the exception.
10952 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 10953 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d
PM
10954 * This is handled in the same way as restoration of the
10955 * PC in these situations: we will be called again with search_pc=1
10956 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
10957 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
10958 * this to restore the condexec bits.
e12ce78d
PM
10959 *
10960 * Note that there are no instructions which can read the condexec
10961 * bits, and none which can write non-static values to them, so
0ecb72a5 10962 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
10963 * middle of a TB.
10964 */
10965
9ee6e8bb
PB
10966 /* Reset the conditional execution bits immediately. This avoids
10967 complications trying to do it at the end of the block. */
98eac7ca 10968 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 10969 {
39d5492a 10970 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 10971 tcg_gen_movi_i32(tmp, 0);
d9ba4830 10972 store_cpu_field(tmp, condexec_bits);
8f01245e 10973 }
2c0262af 10974 do {
fbb4a2e3
PB
10975#ifdef CONFIG_USER_ONLY
10976 /* Intercept jump to the magic kernel page. */
40f860cd 10977 if (dc->pc >= 0xffff0000) {
fbb4a2e3
PB
10978 /* We always get here via a jump, so know we are not in a
10979 conditional execution block. */
d4a2dc67 10980 gen_exception_internal(EXCP_KERNEL_TRAP);
fbb4a2e3
PB
10981 dc->is_jmp = DISAS_UPDATE;
10982 break;
10983 }
10984#else
9ee6e8bb
PB
10985 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
10986 /* We always get here via a jump, so know we are not in a
10987 conditional execution block. */
d4a2dc67 10988 gen_exception_internal(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
10989 dc->is_jmp = DISAS_UPDATE;
10990 break;
9ee6e8bb
PB
10991 }
10992#endif
10993
f0c3c505
AF
10994 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
10995 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
a1d1bb31 10996 if (bp->pc == dc->pc) {
d4a2dc67 10997 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
10998 /* Advance PC so that clearing the breakpoint will
10999 invalidate this TB. */
11000 dc->pc += 2;
11001 goto done_generating;
1fddef4b
FB
11002 }
11003 }
11004 }
2c0262af 11005 if (search_pc) {
92414b31 11006 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
11007 if (lj < j) {
11008 lj++;
11009 while (lj < j)
ab1103de 11010 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 11011 }
25983cad 11012 tcg_ctx.gen_opc_pc[lj] = dc->pc;
e12ce78d 11013 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
ab1103de 11014 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 11015 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 11016 }
e50e6a20 11017
2e70f6ef
PB
11018 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
11019 gen_io_start();
11020
fdefe51c 11021 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5642463a
PM
11022 tcg_gen_debug_insn_start(dc->pc);
11023 }
11024
40f860cd 11025 if (dc->thumb) {
9ee6e8bb
PB
11026 disas_thumb_insn(env, dc);
11027 if (dc->condexec_mask) {
11028 dc->condexec_cond = (dc->condexec_cond & 0xe)
11029 | ((dc->condexec_mask >> 4) & 1);
11030 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
11031 if (dc->condexec_mask == 0) {
11032 dc->condexec_cond = 0;
11033 }
11034 }
11035 } else {
11036 disas_arm_insn(env, dc);
11037 }
e50e6a20
FB
11038
11039 if (dc->condjmp && !dc->is_jmp) {
11040 gen_set_label(dc->condlabel);
11041 dc->condjmp = 0;
11042 }
3849902c
PM
11043
11044 if (tcg_check_temp_count()) {
0a2461fa
AG
11045 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
11046 dc->pc);
3849902c
PM
11047 }
11048
aaf2d97d 11049 /* Translation stops when a conditional branch is encountered.
e50e6a20 11050 * Otherwise the subsequent code could get translated several times.
b5ff1b31 11051 * Also stop translation when a page boundary is reached. This
bf20dc07 11052 * ensures prefetch aborts occur at the right place. */
2e70f6ef 11053 num_insns ++;
efd7f486 11054 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
ed2803da 11055 !cs->singlestep_enabled &&
1b530a6d 11056 !singlestep &&
2e70f6ef
PB
11057 dc->pc < next_page_start &&
11058 num_insns < max_insns);
11059
11060 if (tb->cflags & CF_LAST_IO) {
11061 if (dc->condjmp) {
11062 /* FIXME: This can theoretically happen with self-modifying
11063 code. */
a47dddd7 11064 cpu_abort(cs, "IO on conditional branch instruction");
2e70f6ef
PB
11065 }
11066 gen_io_end();
11067 }
9ee6e8bb 11068
b5ff1b31 11069 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
11070 instruction was a conditional branch or trap, and the PC has
11071 already been written. */
ed2803da 11072 if (unlikely(cs->singlestep_enabled)) {
8aaca4c0 11073 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 11074 if (dc->condjmp) {
9ee6e8bb
PB
11075 gen_set_condexec(dc);
11076 if (dc->is_jmp == DISAS_SWI) {
d4a2dc67 11077 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
9ee6e8bb 11078 } else {
d4a2dc67 11079 gen_exception_internal(EXCP_DEBUG);
9ee6e8bb 11080 }
e50e6a20
FB
11081 gen_set_label(dc->condlabel);
11082 }
11083 if (dc->condjmp || !dc->is_jmp) {
eaed129d 11084 gen_set_pc_im(dc, dc->pc);
e50e6a20 11085 dc->condjmp = 0;
8aaca4c0 11086 }
9ee6e8bb
PB
11087 gen_set_condexec(dc);
11088 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d4a2dc67 11089 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
9ee6e8bb
PB
11090 } else {
11091 /* FIXME: Single stepping a WFI insn will not halt
11092 the CPU. */
d4a2dc67 11093 gen_exception_internal(EXCP_DEBUG);
9ee6e8bb 11094 }
8aaca4c0 11095 } else {
9ee6e8bb
PB
11096 /* While branches must always occur at the end of an IT block,
11097 there are a few other things that can cause us to terminate
65626741 11098 the TB in the middle of an IT block:
9ee6e8bb
PB
11099 - Exception generating instructions (bkpt, swi, undefined).
11100 - Page boundaries.
11101 - Hardware watchpoints.
11102 Hardware breakpoints have already been handled and skip this code.
11103 */
11104 gen_set_condexec(dc);
8aaca4c0 11105 switch(dc->is_jmp) {
8aaca4c0 11106 case DISAS_NEXT:
6e256c93 11107 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
11108 break;
11109 default:
11110 case DISAS_JUMP:
11111 case DISAS_UPDATE:
11112 /* indicate that the hash table must be used to find the next TB */
57fec1fe 11113 tcg_gen_exit_tb(0);
8aaca4c0
FB
11114 break;
11115 case DISAS_TB_JUMP:
11116 /* nothing more to generate */
11117 break;
9ee6e8bb 11118 case DISAS_WFI:
1ce94f81 11119 gen_helper_wfi(cpu_env);
9ee6e8bb 11120 break;
72c1d3af
PM
11121 case DISAS_WFE:
11122 gen_helper_wfe(cpu_env);
11123 break;
9ee6e8bb 11124 case DISAS_SWI:
d4a2dc67 11125 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
9ee6e8bb 11126 break;
8aaca4c0 11127 }
e50e6a20
FB
11128 if (dc->condjmp) {
11129 gen_set_label(dc->condlabel);
9ee6e8bb 11130 gen_set_condexec(dc);
6e256c93 11131 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
11132 dc->condjmp = 0;
11133 }
2c0262af 11134 }
2e70f6ef 11135
9ee6e8bb 11136done_generating:
806f352d 11137 gen_tb_end(tb, num_insns);
efd7f486 11138 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2c0262af
FB
11139
11140#ifdef DEBUG_DISAS
8fec2b8c 11141 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
11142 qemu_log("----------------\n");
11143 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 11144 log_target_disas(env, pc_start, dc->pc - pc_start,
d8fd2954 11145 dc->thumb | (dc->bswap_code << 1));
93fcfe39 11146 qemu_log("\n");
2c0262af
FB
11147 }
11148#endif
b5ff1b31 11149 if (search_pc) {
92414b31 11150 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
b5ff1b31
FB
11151 lj++;
11152 while (lj <= j)
ab1103de 11153 tcg_ctx.gen_opc_instr_start[lj++] = 0;
b5ff1b31 11154 } else {
2c0262af 11155 tb->size = dc->pc - pc_start;
2e70f6ef 11156 tb->icount = num_insns;
b5ff1b31 11157 }
2c0262af
FB
11158}
11159
0ecb72a5 11160void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 11161{
5639c3f2 11162 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, false);
2c0262af
FB
11163}
11164
0ecb72a5 11165void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
2c0262af 11166{
5639c3f2 11167 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, true);
2c0262af
FB
11168}
11169
b5ff1b31 11170static const char *cpu_mode_names[16] = {
28c9457d
EI
11171 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
11172 "???", "???", "hyp", "und", "???", "???", "???", "sys"
b5ff1b31 11173};
9ee6e8bb 11174
878096ee
AF
11175void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
11176 int flags)
2c0262af 11177{
878096ee
AF
11178 ARMCPU *cpu = ARM_CPU(cs);
11179 CPUARMState *env = &cpu->env;
2c0262af 11180 int i;
b5ff1b31 11181 uint32_t psr;
2c0262af 11182
17731115
PM
11183 if (is_a64(env)) {
11184 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
11185 return;
11186 }
11187
2c0262af 11188 for(i=0;i<16;i++) {
7fe48483 11189 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 11190 if ((i % 4) == 3)
7fe48483 11191 cpu_fprintf(f, "\n");
2c0262af 11192 else
7fe48483 11193 cpu_fprintf(f, " ");
2c0262af 11194 }
b5ff1b31 11195 psr = cpsr_read(env);
687fa640
TS
11196 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
11197 psr,
b5ff1b31
FB
11198 psr & (1 << 31) ? 'N' : '-',
11199 psr & (1 << 30) ? 'Z' : '-',
11200 psr & (1 << 29) ? 'C' : '-',
11201 psr & (1 << 28) ? 'V' : '-',
5fafdf24 11202 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 11203 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 11204
f2617cfc
PM
11205 if (flags & CPU_DUMP_FPU) {
11206 int numvfpregs = 0;
11207 if (arm_feature(env, ARM_FEATURE_VFP)) {
11208 numvfpregs += 16;
11209 }
11210 if (arm_feature(env, ARM_FEATURE_VFP3)) {
11211 numvfpregs += 16;
11212 }
11213 for (i = 0; i < numvfpregs; i++) {
11214 uint64_t v = float64_val(env->vfp.regs[i]);
11215 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
11216 i * 2, (uint32_t)v,
11217 i * 2 + 1, (uint32_t)(v >> 32),
11218 i, v);
11219 }
11220 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 11221 }
2c0262af 11222}
a6b025d3 11223
0ecb72a5 11224void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 11225{
3926cc84
AG
11226 if (is_a64(env)) {
11227 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
40f860cd 11228 env->condexec_bits = 0;
3926cc84
AG
11229 } else {
11230 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
40f860cd 11231 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
3926cc84 11232 }
d2856f1a 11233}
This page took 2.980249 seconds and 4 git commands to generate.