]> Git Repo - qemu.git/blame - target-arm/translate.c
target-arm: Report a valid L1Ip field in CTR_EL0 for CPU type "any"
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
ccd38087 28#include "internals.h"
76cad711 29#include "disas/disas.h"
57fec1fe 30#include "tcg-op.h"
1de7afc9 31#include "qemu/log.h"
534df156 32#include "qemu/bitops.h"
1d854765 33#include "arm_ldst.h"
1497c961 34
2ef6175a
RH
35#include "exec/helper-proto.h"
36#include "exec/helper-gen.h"
2c0262af 37
a7e30d84
LV
38#include "trace-tcg.h"
39
40
be5e7a76
DES
41#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
42#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
43/* currently all emulated v5 cores are also v5TE, so don't bother */
44#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
45#define ENABLE_ARCH_5J 0
46#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
47#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
48#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
49#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
81e69fb0 50#define ENABLE_ARCH_8 arm_feature(env, ARM_FEATURE_V8)
b5ff1b31 51
86753403 52#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 53
f570c61e 54#include "translate.h"
e12ce78d
PM
55static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
56
b5ff1b31
FB
57#if defined(CONFIG_USER_ONLY)
58#define IS_USER(s) 1
59#else
60#define IS_USER(s) (s->user)
61#endif
62
3407ad0e 63TCGv_ptr cpu_env;
ad69471c 64/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 65static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 66static TCGv_i32 cpu_R[16];
66c374de 67static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
03d05e2d
PM
68static TCGv_i64 cpu_exclusive_addr;
69static TCGv_i64 cpu_exclusive_val;
426f5abc 70#ifdef CONFIG_USER_ONLY
03d05e2d 71static TCGv_i64 cpu_exclusive_test;
426f5abc
PB
72static TCGv_i32 cpu_exclusive_info;
73#endif
ad69471c 74
b26eefb6 75/* FIXME: These should be removed. */
39d5492a 76static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 77static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 78
022c62cb 79#include "exec/gen-icount.h"
2e70f6ef 80
155c3eac
FN
81static const char *regnames[] =
82 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
83 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
84
b26eefb6
PB
85/* initialize TCG globals. */
86void arm_translate_init(void)
87{
155c3eac
FN
88 int i;
89
a7812ae4
PB
90 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
91
155c3eac
FN
92 for (i = 0; i < 16; i++) {
93 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 94 offsetof(CPUARMState, regs[i]),
155c3eac
FN
95 regnames[i]);
96 }
66c374de
AJ
97 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
98 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
99 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
100 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
101
03d05e2d 102 cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 103 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
03d05e2d 104 cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 105 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 106#ifdef CONFIG_USER_ONLY
03d05e2d 107 cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 108 offsetof(CPUARMState, exclusive_test), "exclusive_test");
426f5abc 109 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 110 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 111#endif
155c3eac 112
14ade10f 113 a64_translate_init();
b26eefb6
PB
114}
115
39d5492a 116static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 117{
39d5492a 118 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
119 tcg_gen_ld_i32(tmp, cpu_env, offset);
120 return tmp;
121}
122
0ecb72a5 123#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 124
39d5492a 125static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
126{
127 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 128 tcg_temp_free_i32(var);
d9ba4830
PB
129}
130
131#define store_cpu_field(var, name) \
0ecb72a5 132 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 133
b26eefb6 134/* Set a variable to the value of a CPU register. */
39d5492a 135static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
136{
137 if (reg == 15) {
138 uint32_t addr;
b90372ad 139 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
140 if (s->thumb)
141 addr = (long)s->pc + 2;
142 else
143 addr = (long)s->pc + 4;
144 tcg_gen_movi_i32(var, addr);
145 } else {
155c3eac 146 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
147 }
148}
149
150/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 151static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 152{
39d5492a 153 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
154 load_reg_var(s, tmp, reg);
155 return tmp;
156}
157
158/* Set a CPU register. The source must be a temporary and will be
159 marked as dead. */
39d5492a 160static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
161{
162 if (reg == 15) {
163 tcg_gen_andi_i32(var, var, ~1);
164 s->is_jmp = DISAS_JUMP;
165 }
155c3eac 166 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 167 tcg_temp_free_i32(var);
b26eefb6
PB
168}
169
b26eefb6 170/* Value extensions. */
86831435
PB
171#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
172#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
173#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
174#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
175
1497c961
PB
176#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
177#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 178
b26eefb6 179
39d5492a 180static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 181{
39d5492a 182 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 183 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
184 tcg_temp_free_i32(tmp_mask);
185}
d9ba4830
PB
186/* Set NZCV flags from the high 4 bits of var. */
187#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
188
d4a2dc67 189static void gen_exception_internal(int excp)
d9ba4830 190{
d4a2dc67
PM
191 TCGv_i32 tcg_excp = tcg_const_i32(excp);
192
193 assert(excp_is_internal(excp));
194 gen_helper_exception_internal(cpu_env, tcg_excp);
195 tcg_temp_free_i32(tcg_excp);
196}
197
198static void gen_exception(int excp, uint32_t syndrome)
199{
200 TCGv_i32 tcg_excp = tcg_const_i32(excp);
201 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
202
203 gen_helper_exception_with_syndrome(cpu_env, tcg_excp, tcg_syn);
204 tcg_temp_free_i32(tcg_syn);
205 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
206}
207
50225ad0
PM
208static void gen_ss_advance(DisasContext *s)
209{
210 /* If the singlestep state is Active-not-pending, advance to
211 * Active-pending.
212 */
213 if (s->ss_active) {
214 s->pstate_ss = 0;
215 gen_helper_clear_pstate_ss(cpu_env);
216 }
217}
218
219static void gen_step_complete_exception(DisasContext *s)
220{
221 /* We just completed step of an insn. Move from Active-not-pending
222 * to Active-pending, and then also take the swstep exception.
223 * This corresponds to making the (IMPDEF) choice to prioritize
224 * swstep exceptions over asynchronous exceptions taken to an exception
225 * level where debug is disabled. This choice has the advantage that
226 * we do not need to maintain internal state corresponding to the
227 * ISV/EX syndrome bits between completion of the step and generation
228 * of the exception, and our syndrome information is always correct.
229 */
230 gen_ss_advance(s);
231 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex));
232 s->is_jmp = DISAS_EXC;
233}
234
39d5492a 235static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 236{
39d5492a
PM
237 TCGv_i32 tmp1 = tcg_temp_new_i32();
238 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
239 tcg_gen_ext16s_i32(tmp1, a);
240 tcg_gen_ext16s_i32(tmp2, b);
3670669c 241 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 242 tcg_temp_free_i32(tmp2);
3670669c
PB
243 tcg_gen_sari_i32(a, a, 16);
244 tcg_gen_sari_i32(b, b, 16);
245 tcg_gen_mul_i32(b, b, a);
246 tcg_gen_mov_i32(a, tmp1);
7d1b0095 247 tcg_temp_free_i32(tmp1);
3670669c
PB
248}
249
250/* Byteswap each halfword. */
39d5492a 251static void gen_rev16(TCGv_i32 var)
3670669c 252{
39d5492a 253 TCGv_i32 tmp = tcg_temp_new_i32();
3670669c
PB
254 tcg_gen_shri_i32(tmp, var, 8);
255 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
256 tcg_gen_shli_i32(var, var, 8);
257 tcg_gen_andi_i32(var, var, 0xff00ff00);
258 tcg_gen_or_i32(var, var, tmp);
7d1b0095 259 tcg_temp_free_i32(tmp);
3670669c
PB
260}
261
262/* Byteswap low halfword and sign extend. */
39d5492a 263static void gen_revsh(TCGv_i32 var)
3670669c 264{
1a855029
AJ
265 tcg_gen_ext16u_i32(var, var);
266 tcg_gen_bswap16_i32(var, var);
267 tcg_gen_ext16s_i32(var, var);
3670669c
PB
268}
269
270/* Unsigned bitfield extract. */
39d5492a 271static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
3670669c
PB
272{
273 if (shift)
274 tcg_gen_shri_i32(var, var, shift);
275 tcg_gen_andi_i32(var, var, mask);
276}
277
278/* Signed bitfield extract. */
39d5492a 279static void gen_sbfx(TCGv_i32 var, int shift, int width)
3670669c
PB
280{
281 uint32_t signbit;
282
283 if (shift)
284 tcg_gen_sari_i32(var, var, shift);
285 if (shift + width < 32) {
286 signbit = 1u << (width - 1);
287 tcg_gen_andi_i32(var, var, (1u << width) - 1);
288 tcg_gen_xori_i32(var, var, signbit);
289 tcg_gen_subi_i32(var, var, signbit);
290 }
291}
292
838fa72d 293/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 294static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 295{
838fa72d
AJ
296 TCGv_i64 tmp64 = tcg_temp_new_i64();
297
298 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 299 tcg_temp_free_i32(b);
838fa72d
AJ
300 tcg_gen_shli_i64(tmp64, tmp64, 32);
301 tcg_gen_add_i64(a, tmp64, a);
302
303 tcg_temp_free_i64(tmp64);
304 return a;
305}
306
307/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 308static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
309{
310 TCGv_i64 tmp64 = tcg_temp_new_i64();
311
312 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 313 tcg_temp_free_i32(b);
838fa72d
AJ
314 tcg_gen_shli_i64(tmp64, tmp64, 32);
315 tcg_gen_sub_i64(a, tmp64, a);
316
317 tcg_temp_free_i64(tmp64);
318 return a;
3670669c
PB
319}
320
5e3f878a 321/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 322static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 323{
39d5492a
PM
324 TCGv_i32 lo = tcg_temp_new_i32();
325 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 326 TCGv_i64 ret;
5e3f878a 327
831d7fe8 328 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 329 tcg_temp_free_i32(a);
7d1b0095 330 tcg_temp_free_i32(b);
831d7fe8
RH
331
332 ret = tcg_temp_new_i64();
333 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
334 tcg_temp_free_i32(lo);
335 tcg_temp_free_i32(hi);
831d7fe8
RH
336
337 return ret;
5e3f878a
PB
338}
339
39d5492a 340static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 341{
39d5492a
PM
342 TCGv_i32 lo = tcg_temp_new_i32();
343 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 344 TCGv_i64 ret;
5e3f878a 345
831d7fe8 346 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 347 tcg_temp_free_i32(a);
7d1b0095 348 tcg_temp_free_i32(b);
831d7fe8
RH
349
350 ret = tcg_temp_new_i64();
351 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
352 tcg_temp_free_i32(lo);
353 tcg_temp_free_i32(hi);
831d7fe8
RH
354
355 return ret;
5e3f878a
PB
356}
357
8f01245e 358/* Swap low and high halfwords. */
39d5492a 359static void gen_swap_half(TCGv_i32 var)
8f01245e 360{
39d5492a 361 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
362 tcg_gen_shri_i32(tmp, var, 16);
363 tcg_gen_shli_i32(var, var, 16);
364 tcg_gen_or_i32(var, var, tmp);
7d1b0095 365 tcg_temp_free_i32(tmp);
8f01245e
PB
366}
367
b26eefb6
PB
368/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
369 tmp = (t0 ^ t1) & 0x8000;
370 t0 &= ~0x8000;
371 t1 &= ~0x8000;
372 t0 = (t0 + t1) ^ tmp;
373 */
374
39d5492a 375static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 376{
39d5492a 377 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
378 tcg_gen_xor_i32(tmp, t0, t1);
379 tcg_gen_andi_i32(tmp, tmp, 0x8000);
380 tcg_gen_andi_i32(t0, t0, ~0x8000);
381 tcg_gen_andi_i32(t1, t1, ~0x8000);
382 tcg_gen_add_i32(t0, t0, t1);
383 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
384 tcg_temp_free_i32(tmp);
385 tcg_temp_free_i32(t1);
b26eefb6
PB
386}
387
388/* Set CF to the top bit of var. */
39d5492a 389static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 390{
66c374de 391 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
392}
393
394/* Set N and Z flags from var. */
39d5492a 395static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 396{
66c374de
AJ
397 tcg_gen_mov_i32(cpu_NF, var);
398 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
399}
400
401/* T0 += T1 + CF. */
39d5492a 402static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 403{
396e467c 404 tcg_gen_add_i32(t0, t0, t1);
66c374de 405 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
406}
407
e9bb4aa9 408/* dest = T0 + T1 + CF. */
39d5492a 409static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 410{
e9bb4aa9 411 tcg_gen_add_i32(dest, t0, t1);
66c374de 412 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
413}
414
3670669c 415/* dest = T0 - T1 + CF - 1. */
39d5492a 416static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 417{
3670669c 418 tcg_gen_sub_i32(dest, t0, t1);
66c374de 419 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 420 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
421}
422
72485ec4 423/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 424static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 425{
39d5492a 426 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
427 tcg_gen_movi_i32(tmp, 0);
428 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 429 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 430 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
431 tcg_gen_xor_i32(tmp, t0, t1);
432 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
433 tcg_temp_free_i32(tmp);
434 tcg_gen_mov_i32(dest, cpu_NF);
435}
436
49b4c31e 437/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 438static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 439{
39d5492a 440 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
441 if (TCG_TARGET_HAS_add2_i32) {
442 tcg_gen_movi_i32(tmp, 0);
443 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 444 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
445 } else {
446 TCGv_i64 q0 = tcg_temp_new_i64();
447 TCGv_i64 q1 = tcg_temp_new_i64();
448 tcg_gen_extu_i32_i64(q0, t0);
449 tcg_gen_extu_i32_i64(q1, t1);
450 tcg_gen_add_i64(q0, q0, q1);
451 tcg_gen_extu_i32_i64(q1, cpu_CF);
452 tcg_gen_add_i64(q0, q0, q1);
453 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
454 tcg_temp_free_i64(q0);
455 tcg_temp_free_i64(q1);
456 }
457 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
458 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
459 tcg_gen_xor_i32(tmp, t0, t1);
460 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
461 tcg_temp_free_i32(tmp);
462 tcg_gen_mov_i32(dest, cpu_NF);
463}
464
72485ec4 465/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 466static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 467{
39d5492a 468 TCGv_i32 tmp;
72485ec4
AJ
469 tcg_gen_sub_i32(cpu_NF, t0, t1);
470 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
471 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
472 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
473 tmp = tcg_temp_new_i32();
474 tcg_gen_xor_i32(tmp, t0, t1);
475 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
476 tcg_temp_free_i32(tmp);
477 tcg_gen_mov_i32(dest, cpu_NF);
478}
479
e77f0832 480/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 481static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 482{
39d5492a 483 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
484 tcg_gen_not_i32(tmp, t1);
485 gen_adc_CC(dest, t0, tmp);
39d5492a 486 tcg_temp_free_i32(tmp);
2de68a49
RH
487}
488
365af80e 489#define GEN_SHIFT(name) \
39d5492a 490static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 491{ \
39d5492a 492 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
493 tmp1 = tcg_temp_new_i32(); \
494 tcg_gen_andi_i32(tmp1, t1, 0xff); \
495 tmp2 = tcg_const_i32(0); \
496 tmp3 = tcg_const_i32(0x1f); \
497 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
498 tcg_temp_free_i32(tmp3); \
499 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
500 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
501 tcg_temp_free_i32(tmp2); \
502 tcg_temp_free_i32(tmp1); \
503}
504GEN_SHIFT(shl)
505GEN_SHIFT(shr)
506#undef GEN_SHIFT
507
39d5492a 508static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 509{
39d5492a 510 TCGv_i32 tmp1, tmp2;
365af80e
AJ
511 tmp1 = tcg_temp_new_i32();
512 tcg_gen_andi_i32(tmp1, t1, 0xff);
513 tmp2 = tcg_const_i32(0x1f);
514 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
515 tcg_temp_free_i32(tmp2);
516 tcg_gen_sar_i32(dest, t0, tmp1);
517 tcg_temp_free_i32(tmp1);
518}
519
39d5492a 520static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 521{
39d5492a
PM
522 TCGv_i32 c0 = tcg_const_i32(0);
523 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
524 tcg_gen_neg_i32(tmp, src);
525 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
526 tcg_temp_free_i32(c0);
527 tcg_temp_free_i32(tmp);
528}
ad69471c 529
39d5492a 530static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 531{
9a119ff6 532 if (shift == 0) {
66c374de 533 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 534 } else {
66c374de
AJ
535 tcg_gen_shri_i32(cpu_CF, var, shift);
536 if (shift != 31) {
537 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
538 }
9a119ff6 539 }
9a119ff6 540}
b26eefb6 541
9a119ff6 542/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
543static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
544 int shift, int flags)
9a119ff6
PB
545{
546 switch (shiftop) {
547 case 0: /* LSL */
548 if (shift != 0) {
549 if (flags)
550 shifter_out_im(var, 32 - shift);
551 tcg_gen_shli_i32(var, var, shift);
552 }
553 break;
554 case 1: /* LSR */
555 if (shift == 0) {
556 if (flags) {
66c374de 557 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
558 }
559 tcg_gen_movi_i32(var, 0);
560 } else {
561 if (flags)
562 shifter_out_im(var, shift - 1);
563 tcg_gen_shri_i32(var, var, shift);
564 }
565 break;
566 case 2: /* ASR */
567 if (shift == 0)
568 shift = 32;
569 if (flags)
570 shifter_out_im(var, shift - 1);
571 if (shift == 32)
572 shift = 31;
573 tcg_gen_sari_i32(var, var, shift);
574 break;
575 case 3: /* ROR/RRX */
576 if (shift != 0) {
577 if (flags)
578 shifter_out_im(var, shift - 1);
f669df27 579 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 580 } else {
39d5492a 581 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 582 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
583 if (flags)
584 shifter_out_im(var, 0);
585 tcg_gen_shri_i32(var, var, 1);
b26eefb6 586 tcg_gen_or_i32(var, var, tmp);
7d1b0095 587 tcg_temp_free_i32(tmp);
b26eefb6
PB
588 }
589 }
590};
591
39d5492a
PM
592static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
593 TCGv_i32 shift, int flags)
8984bd2e
PB
594{
595 if (flags) {
596 switch (shiftop) {
9ef39277
BS
597 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
598 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
599 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
600 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
601 }
602 } else {
603 switch (shiftop) {
365af80e
AJ
604 case 0:
605 gen_shl(var, var, shift);
606 break;
607 case 1:
608 gen_shr(var, var, shift);
609 break;
610 case 2:
611 gen_sar(var, var, shift);
612 break;
f669df27
AJ
613 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
614 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
615 }
616 }
7d1b0095 617 tcg_temp_free_i32(shift);
8984bd2e
PB
618}
619
6ddbc6e4
PB
620#define PAS_OP(pfx) \
621 switch (op2) { \
622 case 0: gen_pas_helper(glue(pfx,add16)); break; \
623 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
624 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
625 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
626 case 4: gen_pas_helper(glue(pfx,add8)); break; \
627 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
628 }
39d5492a 629static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 630{
a7812ae4 631 TCGv_ptr tmp;
6ddbc6e4
PB
632
633 switch (op1) {
634#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
635 case 1:
a7812ae4 636 tmp = tcg_temp_new_ptr();
0ecb72a5 637 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 638 PAS_OP(s)
b75263d6 639 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
640 break;
641 case 5:
a7812ae4 642 tmp = tcg_temp_new_ptr();
0ecb72a5 643 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 644 PAS_OP(u)
b75263d6 645 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
646 break;
647#undef gen_pas_helper
648#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
649 case 2:
650 PAS_OP(q);
651 break;
652 case 3:
653 PAS_OP(sh);
654 break;
655 case 6:
656 PAS_OP(uq);
657 break;
658 case 7:
659 PAS_OP(uh);
660 break;
661#undef gen_pas_helper
662 }
663}
9ee6e8bb
PB
664#undef PAS_OP
665
6ddbc6e4
PB
666/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
667#define PAS_OP(pfx) \
ed89a2f1 668 switch (op1) { \
6ddbc6e4
PB
669 case 0: gen_pas_helper(glue(pfx,add8)); break; \
670 case 1: gen_pas_helper(glue(pfx,add16)); break; \
671 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
672 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
673 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
674 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
675 }
39d5492a 676static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 677{
a7812ae4 678 TCGv_ptr tmp;
6ddbc6e4 679
ed89a2f1 680 switch (op2) {
6ddbc6e4
PB
681#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
682 case 0:
a7812ae4 683 tmp = tcg_temp_new_ptr();
0ecb72a5 684 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 685 PAS_OP(s)
b75263d6 686 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
687 break;
688 case 4:
a7812ae4 689 tmp = tcg_temp_new_ptr();
0ecb72a5 690 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 691 PAS_OP(u)
b75263d6 692 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
693 break;
694#undef gen_pas_helper
695#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
696 case 1:
697 PAS_OP(q);
698 break;
699 case 2:
700 PAS_OP(sh);
701 break;
702 case 5:
703 PAS_OP(uq);
704 break;
705 case 6:
706 PAS_OP(uh);
707 break;
708#undef gen_pas_helper
709 }
710}
9ee6e8bb
PB
711#undef PAS_OP
712
39fb730a
AG
713/*
714 * generate a conditional branch based on ARM condition code cc.
715 * This is common between ARM and Aarch64 targets.
716 */
717void arm_gen_test_cc(int cc, int label)
d9ba4830 718{
39d5492a 719 TCGv_i32 tmp;
d9ba4830
PB
720 int inv;
721
d9ba4830
PB
722 switch (cc) {
723 case 0: /* eq: Z */
66c374de 724 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
725 break;
726 case 1: /* ne: !Z */
66c374de 727 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
728 break;
729 case 2: /* cs: C */
66c374de 730 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
d9ba4830
PB
731 break;
732 case 3: /* cc: !C */
66c374de 733 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
d9ba4830
PB
734 break;
735 case 4: /* mi: N */
66c374de 736 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
d9ba4830
PB
737 break;
738 case 5: /* pl: !N */
66c374de 739 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
d9ba4830
PB
740 break;
741 case 6: /* vs: V */
66c374de 742 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
d9ba4830
PB
743 break;
744 case 7: /* vc: !V */
66c374de 745 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
d9ba4830
PB
746 break;
747 case 8: /* hi: C && !Z */
748 inv = gen_new_label();
66c374de
AJ
749 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
750 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
751 gen_set_label(inv);
752 break;
753 case 9: /* ls: !C || Z */
66c374de
AJ
754 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
755 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
756 break;
757 case 10: /* ge: N == V -> N ^ V == 0 */
66c374de
AJ
758 tmp = tcg_temp_new_i32();
759 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 760 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 761 tcg_temp_free_i32(tmp);
d9ba4830
PB
762 break;
763 case 11: /* lt: N != V -> N ^ V != 0 */
66c374de
AJ
764 tmp = tcg_temp_new_i32();
765 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 766 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 767 tcg_temp_free_i32(tmp);
d9ba4830
PB
768 break;
769 case 12: /* gt: !Z && N == V */
770 inv = gen_new_label();
66c374de
AJ
771 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
772 tmp = tcg_temp_new_i32();
773 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 774 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 775 tcg_temp_free_i32(tmp);
d9ba4830
PB
776 gen_set_label(inv);
777 break;
778 case 13: /* le: Z || N != V */
66c374de
AJ
779 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
780 tmp = tcg_temp_new_i32();
781 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 782 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 783 tcg_temp_free_i32(tmp);
d9ba4830
PB
784 break;
785 default:
786 fprintf(stderr, "Bad condition code 0x%x\n", cc);
787 abort();
788 }
d9ba4830 789}
2c0262af 790
b1d8e52e 791static const uint8_t table_logic_cc[16] = {
2c0262af
FB
792 1, /* and */
793 1, /* xor */
794 0, /* sub */
795 0, /* rsb */
796 0, /* add */
797 0, /* adc */
798 0, /* sbc */
799 0, /* rsc */
800 1, /* andl */
801 1, /* xorl */
802 0, /* cmp */
803 0, /* cmn */
804 1, /* orr */
805 1, /* mov */
806 1, /* bic */
807 1, /* mvn */
808};
3b46e624 809
d9ba4830
PB
810/* Set PC and Thumb state from an immediate address. */
811static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 812{
39d5492a 813 TCGv_i32 tmp;
99c475ab 814
b26eefb6 815 s->is_jmp = DISAS_UPDATE;
d9ba4830 816 if (s->thumb != (addr & 1)) {
7d1b0095 817 tmp = tcg_temp_new_i32();
d9ba4830 818 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 819 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 820 tcg_temp_free_i32(tmp);
d9ba4830 821 }
155c3eac 822 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
823}
824
825/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 826static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 827{
d9ba4830 828 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
829 tcg_gen_andi_i32(cpu_R[15], var, ~1);
830 tcg_gen_andi_i32(var, var, 1);
831 store_cpu_field(var, thumb);
d9ba4830
PB
832}
833
21aeb343
JR
834/* Variant of store_reg which uses branch&exchange logic when storing
835 to r15 in ARM architecture v7 and above. The source must be a temporary
836 and will be marked as dead. */
0ecb72a5 837static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
39d5492a 838 int reg, TCGv_i32 var)
21aeb343
JR
839{
840 if (reg == 15 && ENABLE_ARCH_7) {
841 gen_bx(s, var);
842 } else {
843 store_reg(s, reg, var);
844 }
845}
846
be5e7a76
DES
847/* Variant of store_reg which uses branch&exchange logic when storing
848 * to r15 in ARM architecture v5T and above. This is used for storing
849 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
850 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
0ecb72a5 851static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
39d5492a 852 int reg, TCGv_i32 var)
be5e7a76
DES
853{
854 if (reg == 15 && ENABLE_ARCH_5) {
855 gen_bx(s, var);
856 } else {
857 store_reg(s, reg, var);
858 }
859}
860
08307563
PM
861/* Abstractions of "generate code to do a guest load/store for
862 * AArch32", where a vaddr is always 32 bits (and is zero
863 * extended if we're a 64 bit core) and data is also
864 * 32 bits unless specifically doing a 64 bit access.
865 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 866 * that the address argument is TCGv_i32 rather than TCGv.
08307563
PM
867 */
868#if TARGET_LONG_BITS == 32
869
09f78135
RH
870#define DO_GEN_LD(SUFF, OPC) \
871static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563 872{ \
09f78135 873 tcg_gen_qemu_ld_i32(val, addr, index, OPC); \
08307563
PM
874}
875
09f78135
RH
876#define DO_GEN_ST(SUFF, OPC) \
877static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563 878{ \
09f78135 879 tcg_gen_qemu_st_i32(val, addr, index, OPC); \
08307563
PM
880}
881
882static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
883{
09f78135 884 tcg_gen_qemu_ld_i64(val, addr, index, MO_TEQ);
08307563
PM
885}
886
887static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
888{
09f78135 889 tcg_gen_qemu_st_i64(val, addr, index, MO_TEQ);
08307563
PM
890}
891
892#else
893
09f78135
RH
894#define DO_GEN_LD(SUFF, OPC) \
895static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563
PM
896{ \
897 TCGv addr64 = tcg_temp_new(); \
08307563 898 tcg_gen_extu_i32_i64(addr64, addr); \
09f78135 899 tcg_gen_qemu_ld_i32(val, addr64, index, OPC); \
08307563 900 tcg_temp_free(addr64); \
08307563
PM
901}
902
09f78135
RH
903#define DO_GEN_ST(SUFF, OPC) \
904static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563
PM
905{ \
906 TCGv addr64 = tcg_temp_new(); \
08307563 907 tcg_gen_extu_i32_i64(addr64, addr); \
09f78135 908 tcg_gen_qemu_st_i32(val, addr64, index, OPC); \
08307563 909 tcg_temp_free(addr64); \
08307563
PM
910}
911
912static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
913{
914 TCGv addr64 = tcg_temp_new();
915 tcg_gen_extu_i32_i64(addr64, addr);
09f78135 916 tcg_gen_qemu_ld_i64(val, addr64, index, MO_TEQ);
08307563
PM
917 tcg_temp_free(addr64);
918}
919
920static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
921{
922 TCGv addr64 = tcg_temp_new();
923 tcg_gen_extu_i32_i64(addr64, addr);
09f78135 924 tcg_gen_qemu_st_i64(val, addr64, index, MO_TEQ);
08307563
PM
925 tcg_temp_free(addr64);
926}
927
928#endif
929
09f78135
RH
930DO_GEN_LD(8s, MO_SB)
931DO_GEN_LD(8u, MO_UB)
932DO_GEN_LD(16s, MO_TESW)
933DO_GEN_LD(16u, MO_TEUW)
934DO_GEN_LD(32u, MO_TEUL)
935DO_GEN_ST(8, MO_UB)
936DO_GEN_ST(16, MO_TEUW)
937DO_GEN_ST(32, MO_TEUL)
08307563 938
eaed129d 939static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
5e3f878a 940{
40f860cd 941 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
942}
943
37e6456e
PM
944static inline void gen_hvc(DisasContext *s, int imm16)
945{
946 /* The pre HVC helper handles cases when HVC gets trapped
947 * as an undefined insn by runtime configuration (ie before
948 * the insn really executes).
949 */
950 gen_set_pc_im(s, s->pc - 4);
951 gen_helper_pre_hvc(cpu_env);
952 /* Otherwise we will treat this as a real exception which
953 * happens after execution of the insn. (The distinction matters
954 * for the PC value reported to the exception handler and also
955 * for single stepping.)
956 */
957 s->svc_imm = imm16;
958 gen_set_pc_im(s, s->pc);
959 s->is_jmp = DISAS_HVC;
960}
961
962static inline void gen_smc(DisasContext *s)
963{
964 /* As with HVC, we may take an exception either before or after
965 * the insn executes.
966 */
967 TCGv_i32 tmp;
968
969 gen_set_pc_im(s, s->pc - 4);
970 tmp = tcg_const_i32(syn_aa32_smc());
971 gen_helper_pre_smc(cpu_env, tmp);
972 tcg_temp_free_i32(tmp);
973 gen_set_pc_im(s, s->pc);
974 s->is_jmp = DISAS_SMC;
975}
976
d4a2dc67
PM
977static inline void
978gen_set_condexec (DisasContext *s)
979{
980 if (s->condexec_mask) {
981 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
982 TCGv_i32 tmp = tcg_temp_new_i32();
983 tcg_gen_movi_i32(tmp, val);
984 store_cpu_field(tmp, condexec_bits);
985 }
986}
987
988static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
989{
990 gen_set_condexec(s);
991 gen_set_pc_im(s, s->pc - offset);
992 gen_exception_internal(excp);
993 s->is_jmp = DISAS_JUMP;
994}
995
996static void gen_exception_insn(DisasContext *s, int offset, int excp, int syn)
997{
998 gen_set_condexec(s);
999 gen_set_pc_im(s, s->pc - offset);
1000 gen_exception(excp, syn);
1001 s->is_jmp = DISAS_JUMP;
1002}
1003
b5ff1b31
FB
1004/* Force a TB lookup after an instruction that changes the CPU state. */
1005static inline void gen_lookup_tb(DisasContext *s)
1006{
a6445c52 1007 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
1008 s->is_jmp = DISAS_UPDATE;
1009}
1010
b0109805 1011static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1012 TCGv_i32 var)
2c0262af 1013{
1e8d4eec 1014 int val, rm, shift, shiftop;
39d5492a 1015 TCGv_i32 offset;
2c0262af
FB
1016
1017 if (!(insn & (1 << 25))) {
1018 /* immediate */
1019 val = insn & 0xfff;
1020 if (!(insn & (1 << 23)))
1021 val = -val;
537730b9 1022 if (val != 0)
b0109805 1023 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1024 } else {
1025 /* shift/register */
1026 rm = (insn) & 0xf;
1027 shift = (insn >> 7) & 0x1f;
1e8d4eec 1028 shiftop = (insn >> 5) & 3;
b26eefb6 1029 offset = load_reg(s, rm);
9a119ff6 1030 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1031 if (!(insn & (1 << 23)))
b0109805 1032 tcg_gen_sub_i32(var, var, offset);
2c0262af 1033 else
b0109805 1034 tcg_gen_add_i32(var, var, offset);
7d1b0095 1035 tcg_temp_free_i32(offset);
2c0262af
FB
1036 }
1037}
1038
191f9a93 1039static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1040 int extra, TCGv_i32 var)
2c0262af
FB
1041{
1042 int val, rm;
39d5492a 1043 TCGv_i32 offset;
3b46e624 1044
2c0262af
FB
1045 if (insn & (1 << 22)) {
1046 /* immediate */
1047 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1048 if (!(insn & (1 << 23)))
1049 val = -val;
18acad92 1050 val += extra;
537730b9 1051 if (val != 0)
b0109805 1052 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1053 } else {
1054 /* register */
191f9a93 1055 if (extra)
b0109805 1056 tcg_gen_addi_i32(var, var, extra);
2c0262af 1057 rm = (insn) & 0xf;
b26eefb6 1058 offset = load_reg(s, rm);
2c0262af 1059 if (!(insn & (1 << 23)))
b0109805 1060 tcg_gen_sub_i32(var, var, offset);
2c0262af 1061 else
b0109805 1062 tcg_gen_add_i32(var, var, offset);
7d1b0095 1063 tcg_temp_free_i32(offset);
2c0262af
FB
1064 }
1065}
1066
5aaebd13
PM
1067static TCGv_ptr get_fpstatus_ptr(int neon)
1068{
1069 TCGv_ptr statusptr = tcg_temp_new_ptr();
1070 int offset;
1071 if (neon) {
0ecb72a5 1072 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1073 } else {
0ecb72a5 1074 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1075 }
1076 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1077 return statusptr;
1078}
1079
4373f3ce
PB
1080#define VFP_OP2(name) \
1081static inline void gen_vfp_##name(int dp) \
1082{ \
ae1857ec
PM
1083 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1084 if (dp) { \
1085 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1086 } else { \
1087 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1088 } \
1089 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1090}
1091
4373f3ce
PB
1092VFP_OP2(add)
1093VFP_OP2(sub)
1094VFP_OP2(mul)
1095VFP_OP2(div)
1096
1097#undef VFP_OP2
1098
605a6aed
PM
1099static inline void gen_vfp_F1_mul(int dp)
1100{
1101 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1102 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1103 if (dp) {
ae1857ec 1104 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1105 } else {
ae1857ec 1106 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1107 }
ae1857ec 1108 tcg_temp_free_ptr(fpst);
605a6aed
PM
1109}
1110
1111static inline void gen_vfp_F1_neg(int dp)
1112{
1113 /* Like gen_vfp_neg() but put result in F1 */
1114 if (dp) {
1115 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1116 } else {
1117 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1118 }
1119}
1120
4373f3ce
PB
1121static inline void gen_vfp_abs(int dp)
1122{
1123 if (dp)
1124 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1125 else
1126 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1127}
1128
1129static inline void gen_vfp_neg(int dp)
1130{
1131 if (dp)
1132 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1133 else
1134 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1135}
1136
1137static inline void gen_vfp_sqrt(int dp)
1138{
1139 if (dp)
1140 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1141 else
1142 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1143}
1144
1145static inline void gen_vfp_cmp(int dp)
1146{
1147 if (dp)
1148 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1149 else
1150 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1151}
1152
1153static inline void gen_vfp_cmpe(int dp)
1154{
1155 if (dp)
1156 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1157 else
1158 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1159}
1160
1161static inline void gen_vfp_F1_ld0(int dp)
1162{
1163 if (dp)
5b340b51 1164 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1165 else
5b340b51 1166 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1167}
1168
5500b06c
PM
1169#define VFP_GEN_ITOF(name) \
1170static inline void gen_vfp_##name(int dp, int neon) \
1171{ \
5aaebd13 1172 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1173 if (dp) { \
1174 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1175 } else { \
1176 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1177 } \
b7fa9214 1178 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1179}
1180
5500b06c
PM
1181VFP_GEN_ITOF(uito)
1182VFP_GEN_ITOF(sito)
1183#undef VFP_GEN_ITOF
4373f3ce 1184
5500b06c
PM
1185#define VFP_GEN_FTOI(name) \
1186static inline void gen_vfp_##name(int dp, int neon) \
1187{ \
5aaebd13 1188 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1189 if (dp) { \
1190 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1191 } else { \
1192 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1193 } \
b7fa9214 1194 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1195}
1196
5500b06c
PM
1197VFP_GEN_FTOI(toui)
1198VFP_GEN_FTOI(touiz)
1199VFP_GEN_FTOI(tosi)
1200VFP_GEN_FTOI(tosiz)
1201#undef VFP_GEN_FTOI
4373f3ce 1202
16d5b3ca 1203#define VFP_GEN_FIX(name, round) \
5500b06c 1204static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1205{ \
39d5492a 1206 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1207 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1208 if (dp) { \
16d5b3ca
WN
1209 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1210 statusptr); \
5500b06c 1211 } else { \
16d5b3ca
WN
1212 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1213 statusptr); \
5500b06c 1214 } \
b75263d6 1215 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1216 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1217}
16d5b3ca
WN
1218VFP_GEN_FIX(tosh, _round_to_zero)
1219VFP_GEN_FIX(tosl, _round_to_zero)
1220VFP_GEN_FIX(touh, _round_to_zero)
1221VFP_GEN_FIX(toul, _round_to_zero)
1222VFP_GEN_FIX(shto, )
1223VFP_GEN_FIX(slto, )
1224VFP_GEN_FIX(uhto, )
1225VFP_GEN_FIX(ulto, )
4373f3ce 1226#undef VFP_GEN_FIX
9ee6e8bb 1227
39d5492a 1228static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1229{
08307563 1230 if (dp) {
6ce2faf4 1231 gen_aa32_ld64(cpu_F0d, addr, get_mem_index(s));
08307563 1232 } else {
6ce2faf4 1233 gen_aa32_ld32u(cpu_F0s, addr, get_mem_index(s));
08307563 1234 }
b5ff1b31
FB
1235}
1236
39d5492a 1237static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1238{
08307563 1239 if (dp) {
6ce2faf4 1240 gen_aa32_st64(cpu_F0d, addr, get_mem_index(s));
08307563 1241 } else {
6ce2faf4 1242 gen_aa32_st32(cpu_F0s, addr, get_mem_index(s));
08307563 1243 }
b5ff1b31
FB
1244}
1245
8e96005d
FB
1246static inline long
1247vfp_reg_offset (int dp, int reg)
1248{
1249 if (dp)
1250 return offsetof(CPUARMState, vfp.regs[reg]);
1251 else if (reg & 1) {
1252 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1253 + offsetof(CPU_DoubleU, l.upper);
1254 } else {
1255 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1256 + offsetof(CPU_DoubleU, l.lower);
1257 }
1258}
9ee6e8bb
PB
1259
1260/* Return the offset of a 32-bit piece of a NEON register.
1261 zero is the least significant end of the register. */
1262static inline long
1263neon_reg_offset (int reg, int n)
1264{
1265 int sreg;
1266 sreg = reg * 2 + n;
1267 return vfp_reg_offset(0, sreg);
1268}
1269
39d5492a 1270static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1271{
39d5492a 1272 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1273 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1274 return tmp;
1275}
1276
39d5492a 1277static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1278{
1279 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1280 tcg_temp_free_i32(var);
8f8e3aa4
PB
1281}
1282
a7812ae4 1283static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1284{
1285 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1286}
1287
a7812ae4 1288static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1289{
1290 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1291}
1292
4373f3ce
PB
1293#define tcg_gen_ld_f32 tcg_gen_ld_i32
1294#define tcg_gen_ld_f64 tcg_gen_ld_i64
1295#define tcg_gen_st_f32 tcg_gen_st_i32
1296#define tcg_gen_st_f64 tcg_gen_st_i64
1297
b7bcbe95
FB
1298static inline void gen_mov_F0_vreg(int dp, int reg)
1299{
1300 if (dp)
4373f3ce 1301 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1302 else
4373f3ce 1303 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1304}
1305
1306static inline void gen_mov_F1_vreg(int dp, int reg)
1307{
1308 if (dp)
4373f3ce 1309 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1310 else
4373f3ce 1311 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1312}
1313
1314static inline void gen_mov_vreg_F0(int dp, int reg)
1315{
1316 if (dp)
4373f3ce 1317 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1318 else
4373f3ce 1319 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1320}
1321
18c9b560
AZ
1322#define ARM_CP_RW_BIT (1 << 20)
1323
a7812ae4 1324static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1325{
0ecb72a5 1326 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1327}
1328
a7812ae4 1329static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1330{
0ecb72a5 1331 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1332}
1333
39d5492a 1334static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1335{
39d5492a 1336 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1337 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1338 return var;
e677137d
PB
1339}
1340
39d5492a 1341static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1342{
0ecb72a5 1343 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1344 tcg_temp_free_i32(var);
e677137d
PB
1345}
1346
1347static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1348{
1349 iwmmxt_store_reg(cpu_M0, rn);
1350}
1351
1352static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1353{
1354 iwmmxt_load_reg(cpu_M0, rn);
1355}
1356
1357static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1358{
1359 iwmmxt_load_reg(cpu_V1, rn);
1360 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1361}
1362
1363static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1364{
1365 iwmmxt_load_reg(cpu_V1, rn);
1366 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1367}
1368
1369static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1370{
1371 iwmmxt_load_reg(cpu_V1, rn);
1372 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1373}
1374
1375#define IWMMXT_OP(name) \
1376static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1377{ \
1378 iwmmxt_load_reg(cpu_V1, rn); \
1379 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1380}
1381
477955bd
PM
1382#define IWMMXT_OP_ENV(name) \
1383static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1384{ \
1385 iwmmxt_load_reg(cpu_V1, rn); \
1386 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1387}
1388
1389#define IWMMXT_OP_ENV_SIZE(name) \
1390IWMMXT_OP_ENV(name##b) \
1391IWMMXT_OP_ENV(name##w) \
1392IWMMXT_OP_ENV(name##l)
e677137d 1393
477955bd 1394#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1395static inline void gen_op_iwmmxt_##name##_M0(void) \
1396{ \
477955bd 1397 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1398}
1399
1400IWMMXT_OP(maddsq)
1401IWMMXT_OP(madduq)
1402IWMMXT_OP(sadb)
1403IWMMXT_OP(sadw)
1404IWMMXT_OP(mulslw)
1405IWMMXT_OP(mulshw)
1406IWMMXT_OP(mululw)
1407IWMMXT_OP(muluhw)
1408IWMMXT_OP(macsw)
1409IWMMXT_OP(macuw)
1410
477955bd
PM
1411IWMMXT_OP_ENV_SIZE(unpackl)
1412IWMMXT_OP_ENV_SIZE(unpackh)
1413
1414IWMMXT_OP_ENV1(unpacklub)
1415IWMMXT_OP_ENV1(unpackluw)
1416IWMMXT_OP_ENV1(unpacklul)
1417IWMMXT_OP_ENV1(unpackhub)
1418IWMMXT_OP_ENV1(unpackhuw)
1419IWMMXT_OP_ENV1(unpackhul)
1420IWMMXT_OP_ENV1(unpacklsb)
1421IWMMXT_OP_ENV1(unpacklsw)
1422IWMMXT_OP_ENV1(unpacklsl)
1423IWMMXT_OP_ENV1(unpackhsb)
1424IWMMXT_OP_ENV1(unpackhsw)
1425IWMMXT_OP_ENV1(unpackhsl)
1426
1427IWMMXT_OP_ENV_SIZE(cmpeq)
1428IWMMXT_OP_ENV_SIZE(cmpgtu)
1429IWMMXT_OP_ENV_SIZE(cmpgts)
1430
1431IWMMXT_OP_ENV_SIZE(mins)
1432IWMMXT_OP_ENV_SIZE(minu)
1433IWMMXT_OP_ENV_SIZE(maxs)
1434IWMMXT_OP_ENV_SIZE(maxu)
1435
1436IWMMXT_OP_ENV_SIZE(subn)
1437IWMMXT_OP_ENV_SIZE(addn)
1438IWMMXT_OP_ENV_SIZE(subu)
1439IWMMXT_OP_ENV_SIZE(addu)
1440IWMMXT_OP_ENV_SIZE(subs)
1441IWMMXT_OP_ENV_SIZE(adds)
1442
1443IWMMXT_OP_ENV(avgb0)
1444IWMMXT_OP_ENV(avgb1)
1445IWMMXT_OP_ENV(avgw0)
1446IWMMXT_OP_ENV(avgw1)
e677137d 1447
477955bd
PM
1448IWMMXT_OP_ENV(packuw)
1449IWMMXT_OP_ENV(packul)
1450IWMMXT_OP_ENV(packuq)
1451IWMMXT_OP_ENV(packsw)
1452IWMMXT_OP_ENV(packsl)
1453IWMMXT_OP_ENV(packsq)
e677137d 1454
e677137d
PB
1455static void gen_op_iwmmxt_set_mup(void)
1456{
39d5492a 1457 TCGv_i32 tmp;
e677137d
PB
1458 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1459 tcg_gen_ori_i32(tmp, tmp, 2);
1460 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1461}
1462
1463static void gen_op_iwmmxt_set_cup(void)
1464{
39d5492a 1465 TCGv_i32 tmp;
e677137d
PB
1466 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1467 tcg_gen_ori_i32(tmp, tmp, 1);
1468 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1469}
1470
1471static void gen_op_iwmmxt_setpsr_nz(void)
1472{
39d5492a 1473 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1474 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1475 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1476}
1477
1478static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1479{
1480 iwmmxt_load_reg(cpu_V1, rn);
86831435 1481 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1482 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1483}
1484
39d5492a
PM
1485static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1486 TCGv_i32 dest)
18c9b560
AZ
1487{
1488 int rd;
1489 uint32_t offset;
39d5492a 1490 TCGv_i32 tmp;
18c9b560
AZ
1491
1492 rd = (insn >> 16) & 0xf;
da6b5335 1493 tmp = load_reg(s, rd);
18c9b560
AZ
1494
1495 offset = (insn & 0xff) << ((insn >> 7) & 2);
1496 if (insn & (1 << 24)) {
1497 /* Pre indexed */
1498 if (insn & (1 << 23))
da6b5335 1499 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1500 else
da6b5335
FN
1501 tcg_gen_addi_i32(tmp, tmp, -offset);
1502 tcg_gen_mov_i32(dest, tmp);
18c9b560 1503 if (insn & (1 << 21))
da6b5335
FN
1504 store_reg(s, rd, tmp);
1505 else
7d1b0095 1506 tcg_temp_free_i32(tmp);
18c9b560
AZ
1507 } else if (insn & (1 << 21)) {
1508 /* Post indexed */
da6b5335 1509 tcg_gen_mov_i32(dest, tmp);
18c9b560 1510 if (insn & (1 << 23))
da6b5335 1511 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1512 else
da6b5335
FN
1513 tcg_gen_addi_i32(tmp, tmp, -offset);
1514 store_reg(s, rd, tmp);
18c9b560
AZ
1515 } else if (!(insn & (1 << 23)))
1516 return 1;
1517 return 0;
1518}
1519
39d5492a 1520static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1521{
1522 int rd = (insn >> 0) & 0xf;
39d5492a 1523 TCGv_i32 tmp;
18c9b560 1524
da6b5335
FN
1525 if (insn & (1 << 8)) {
1526 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1527 return 1;
da6b5335
FN
1528 } else {
1529 tmp = iwmmxt_load_creg(rd);
1530 }
1531 } else {
7d1b0095 1532 tmp = tcg_temp_new_i32();
da6b5335
FN
1533 iwmmxt_load_reg(cpu_V0, rd);
1534 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1535 }
1536 tcg_gen_andi_i32(tmp, tmp, mask);
1537 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1538 tcg_temp_free_i32(tmp);
18c9b560
AZ
1539 return 0;
1540}
1541
a1c7273b 1542/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1543 (ie. an undefined instruction). */
0ecb72a5 1544static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
1545{
1546 int rd, wrd;
1547 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1548 TCGv_i32 addr;
1549 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1550
1551 if ((insn & 0x0e000e00) == 0x0c000000) {
1552 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1553 wrd = insn & 0xf;
1554 rdlo = (insn >> 12) & 0xf;
1555 rdhi = (insn >> 16) & 0xf;
1556 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1557 iwmmxt_load_reg(cpu_V0, wrd);
1558 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1559 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1560 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1561 } else { /* TMCRR */
da6b5335
FN
1562 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1563 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1564 gen_op_iwmmxt_set_mup();
1565 }
1566 return 0;
1567 }
1568
1569 wrd = (insn >> 12) & 0xf;
7d1b0095 1570 addr = tcg_temp_new_i32();
da6b5335 1571 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1572 tcg_temp_free_i32(addr);
18c9b560 1573 return 1;
da6b5335 1574 }
18c9b560
AZ
1575 if (insn & ARM_CP_RW_BIT) {
1576 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1577 tmp = tcg_temp_new_i32();
6ce2faf4 1578 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
da6b5335 1579 iwmmxt_store_creg(wrd, tmp);
18c9b560 1580 } else {
e677137d
PB
1581 i = 1;
1582 if (insn & (1 << 8)) {
1583 if (insn & (1 << 22)) { /* WLDRD */
6ce2faf4 1584 gen_aa32_ld64(cpu_M0, addr, get_mem_index(s));
e677137d
PB
1585 i = 0;
1586 } else { /* WLDRW wRd */
29531141 1587 tmp = tcg_temp_new_i32();
6ce2faf4 1588 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
e677137d
PB
1589 }
1590 } else {
29531141 1591 tmp = tcg_temp_new_i32();
e677137d 1592 if (insn & (1 << 22)) { /* WLDRH */
6ce2faf4 1593 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
e677137d 1594 } else { /* WLDRB */
6ce2faf4 1595 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
e677137d
PB
1596 }
1597 }
1598 if (i) {
1599 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1600 tcg_temp_free_i32(tmp);
e677137d 1601 }
18c9b560
AZ
1602 gen_op_iwmmxt_movq_wRn_M0(wrd);
1603 }
1604 } else {
1605 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1606 tmp = iwmmxt_load_creg(wrd);
6ce2faf4 1607 gen_aa32_st32(tmp, addr, get_mem_index(s));
18c9b560
AZ
1608 } else {
1609 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1610 tmp = tcg_temp_new_i32();
e677137d
PB
1611 if (insn & (1 << 8)) {
1612 if (insn & (1 << 22)) { /* WSTRD */
6ce2faf4 1613 gen_aa32_st64(cpu_M0, addr, get_mem_index(s));
e677137d
PB
1614 } else { /* WSTRW wRd */
1615 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
6ce2faf4 1616 gen_aa32_st32(tmp, addr, get_mem_index(s));
e677137d
PB
1617 }
1618 } else {
1619 if (insn & (1 << 22)) { /* WSTRH */
1620 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
6ce2faf4 1621 gen_aa32_st16(tmp, addr, get_mem_index(s));
e677137d
PB
1622 } else { /* WSTRB */
1623 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
6ce2faf4 1624 gen_aa32_st8(tmp, addr, get_mem_index(s));
e677137d
PB
1625 }
1626 }
18c9b560 1627 }
29531141 1628 tcg_temp_free_i32(tmp);
18c9b560 1629 }
7d1b0095 1630 tcg_temp_free_i32(addr);
18c9b560
AZ
1631 return 0;
1632 }
1633
1634 if ((insn & 0x0f000000) != 0x0e000000)
1635 return 1;
1636
1637 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1638 case 0x000: /* WOR */
1639 wrd = (insn >> 12) & 0xf;
1640 rd0 = (insn >> 0) & 0xf;
1641 rd1 = (insn >> 16) & 0xf;
1642 gen_op_iwmmxt_movq_M0_wRn(rd0);
1643 gen_op_iwmmxt_orq_M0_wRn(rd1);
1644 gen_op_iwmmxt_setpsr_nz();
1645 gen_op_iwmmxt_movq_wRn_M0(wrd);
1646 gen_op_iwmmxt_set_mup();
1647 gen_op_iwmmxt_set_cup();
1648 break;
1649 case 0x011: /* TMCR */
1650 if (insn & 0xf)
1651 return 1;
1652 rd = (insn >> 12) & 0xf;
1653 wrd = (insn >> 16) & 0xf;
1654 switch (wrd) {
1655 case ARM_IWMMXT_wCID:
1656 case ARM_IWMMXT_wCASF:
1657 break;
1658 case ARM_IWMMXT_wCon:
1659 gen_op_iwmmxt_set_cup();
1660 /* Fall through. */
1661 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1662 tmp = iwmmxt_load_creg(wrd);
1663 tmp2 = load_reg(s, rd);
f669df27 1664 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1665 tcg_temp_free_i32(tmp2);
da6b5335 1666 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1667 break;
1668 case ARM_IWMMXT_wCGR0:
1669 case ARM_IWMMXT_wCGR1:
1670 case ARM_IWMMXT_wCGR2:
1671 case ARM_IWMMXT_wCGR3:
1672 gen_op_iwmmxt_set_cup();
da6b5335
FN
1673 tmp = load_reg(s, rd);
1674 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1675 break;
1676 default:
1677 return 1;
1678 }
1679 break;
1680 case 0x100: /* WXOR */
1681 wrd = (insn >> 12) & 0xf;
1682 rd0 = (insn >> 0) & 0xf;
1683 rd1 = (insn >> 16) & 0xf;
1684 gen_op_iwmmxt_movq_M0_wRn(rd0);
1685 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1686 gen_op_iwmmxt_setpsr_nz();
1687 gen_op_iwmmxt_movq_wRn_M0(wrd);
1688 gen_op_iwmmxt_set_mup();
1689 gen_op_iwmmxt_set_cup();
1690 break;
1691 case 0x111: /* TMRC */
1692 if (insn & 0xf)
1693 return 1;
1694 rd = (insn >> 12) & 0xf;
1695 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1696 tmp = iwmmxt_load_creg(wrd);
1697 store_reg(s, rd, tmp);
18c9b560
AZ
1698 break;
1699 case 0x300: /* WANDN */
1700 wrd = (insn >> 12) & 0xf;
1701 rd0 = (insn >> 0) & 0xf;
1702 rd1 = (insn >> 16) & 0xf;
1703 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1704 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1705 gen_op_iwmmxt_andq_M0_wRn(rd1);
1706 gen_op_iwmmxt_setpsr_nz();
1707 gen_op_iwmmxt_movq_wRn_M0(wrd);
1708 gen_op_iwmmxt_set_mup();
1709 gen_op_iwmmxt_set_cup();
1710 break;
1711 case 0x200: /* WAND */
1712 wrd = (insn >> 12) & 0xf;
1713 rd0 = (insn >> 0) & 0xf;
1714 rd1 = (insn >> 16) & 0xf;
1715 gen_op_iwmmxt_movq_M0_wRn(rd0);
1716 gen_op_iwmmxt_andq_M0_wRn(rd1);
1717 gen_op_iwmmxt_setpsr_nz();
1718 gen_op_iwmmxt_movq_wRn_M0(wrd);
1719 gen_op_iwmmxt_set_mup();
1720 gen_op_iwmmxt_set_cup();
1721 break;
1722 case 0x810: case 0xa10: /* WMADD */
1723 wrd = (insn >> 12) & 0xf;
1724 rd0 = (insn >> 0) & 0xf;
1725 rd1 = (insn >> 16) & 0xf;
1726 gen_op_iwmmxt_movq_M0_wRn(rd0);
1727 if (insn & (1 << 21))
1728 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1729 else
1730 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1731 gen_op_iwmmxt_movq_wRn_M0(wrd);
1732 gen_op_iwmmxt_set_mup();
1733 break;
1734 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1735 wrd = (insn >> 12) & 0xf;
1736 rd0 = (insn >> 16) & 0xf;
1737 rd1 = (insn >> 0) & 0xf;
1738 gen_op_iwmmxt_movq_M0_wRn(rd0);
1739 switch ((insn >> 22) & 3) {
1740 case 0:
1741 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1742 break;
1743 case 1:
1744 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1745 break;
1746 case 2:
1747 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1748 break;
1749 case 3:
1750 return 1;
1751 }
1752 gen_op_iwmmxt_movq_wRn_M0(wrd);
1753 gen_op_iwmmxt_set_mup();
1754 gen_op_iwmmxt_set_cup();
1755 break;
1756 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1757 wrd = (insn >> 12) & 0xf;
1758 rd0 = (insn >> 16) & 0xf;
1759 rd1 = (insn >> 0) & 0xf;
1760 gen_op_iwmmxt_movq_M0_wRn(rd0);
1761 switch ((insn >> 22) & 3) {
1762 case 0:
1763 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1764 break;
1765 case 1:
1766 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1767 break;
1768 case 2:
1769 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1770 break;
1771 case 3:
1772 return 1;
1773 }
1774 gen_op_iwmmxt_movq_wRn_M0(wrd);
1775 gen_op_iwmmxt_set_mup();
1776 gen_op_iwmmxt_set_cup();
1777 break;
1778 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1779 wrd = (insn >> 12) & 0xf;
1780 rd0 = (insn >> 16) & 0xf;
1781 rd1 = (insn >> 0) & 0xf;
1782 gen_op_iwmmxt_movq_M0_wRn(rd0);
1783 if (insn & (1 << 22))
1784 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1785 else
1786 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1787 if (!(insn & (1 << 20)))
1788 gen_op_iwmmxt_addl_M0_wRn(wrd);
1789 gen_op_iwmmxt_movq_wRn_M0(wrd);
1790 gen_op_iwmmxt_set_mup();
1791 break;
1792 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1793 wrd = (insn >> 12) & 0xf;
1794 rd0 = (insn >> 16) & 0xf;
1795 rd1 = (insn >> 0) & 0xf;
1796 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1797 if (insn & (1 << 21)) {
1798 if (insn & (1 << 20))
1799 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1800 else
1801 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1802 } else {
1803 if (insn & (1 << 20))
1804 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1805 else
1806 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1807 }
18c9b560
AZ
1808 gen_op_iwmmxt_movq_wRn_M0(wrd);
1809 gen_op_iwmmxt_set_mup();
1810 break;
1811 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1812 wrd = (insn >> 12) & 0xf;
1813 rd0 = (insn >> 16) & 0xf;
1814 rd1 = (insn >> 0) & 0xf;
1815 gen_op_iwmmxt_movq_M0_wRn(rd0);
1816 if (insn & (1 << 21))
1817 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1818 else
1819 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1820 if (!(insn & (1 << 20))) {
e677137d
PB
1821 iwmmxt_load_reg(cpu_V1, wrd);
1822 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1823 }
1824 gen_op_iwmmxt_movq_wRn_M0(wrd);
1825 gen_op_iwmmxt_set_mup();
1826 break;
1827 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1828 wrd = (insn >> 12) & 0xf;
1829 rd0 = (insn >> 16) & 0xf;
1830 rd1 = (insn >> 0) & 0xf;
1831 gen_op_iwmmxt_movq_M0_wRn(rd0);
1832 switch ((insn >> 22) & 3) {
1833 case 0:
1834 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1835 break;
1836 case 1:
1837 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1838 break;
1839 case 2:
1840 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1841 break;
1842 case 3:
1843 return 1;
1844 }
1845 gen_op_iwmmxt_movq_wRn_M0(wrd);
1846 gen_op_iwmmxt_set_mup();
1847 gen_op_iwmmxt_set_cup();
1848 break;
1849 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1850 wrd = (insn >> 12) & 0xf;
1851 rd0 = (insn >> 16) & 0xf;
1852 rd1 = (insn >> 0) & 0xf;
1853 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1854 if (insn & (1 << 22)) {
1855 if (insn & (1 << 20))
1856 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1857 else
1858 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1859 } else {
1860 if (insn & (1 << 20))
1861 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1862 else
1863 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1864 }
18c9b560
AZ
1865 gen_op_iwmmxt_movq_wRn_M0(wrd);
1866 gen_op_iwmmxt_set_mup();
1867 gen_op_iwmmxt_set_cup();
1868 break;
1869 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1870 wrd = (insn >> 12) & 0xf;
1871 rd0 = (insn >> 16) & 0xf;
1872 rd1 = (insn >> 0) & 0xf;
1873 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1874 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1875 tcg_gen_andi_i32(tmp, tmp, 7);
1876 iwmmxt_load_reg(cpu_V1, rd1);
1877 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1878 tcg_temp_free_i32(tmp);
18c9b560
AZ
1879 gen_op_iwmmxt_movq_wRn_M0(wrd);
1880 gen_op_iwmmxt_set_mup();
1881 break;
1882 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1883 if (((insn >> 6) & 3) == 3)
1884 return 1;
18c9b560
AZ
1885 rd = (insn >> 12) & 0xf;
1886 wrd = (insn >> 16) & 0xf;
da6b5335 1887 tmp = load_reg(s, rd);
18c9b560
AZ
1888 gen_op_iwmmxt_movq_M0_wRn(wrd);
1889 switch ((insn >> 6) & 3) {
1890 case 0:
da6b5335
FN
1891 tmp2 = tcg_const_i32(0xff);
1892 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1893 break;
1894 case 1:
da6b5335
FN
1895 tmp2 = tcg_const_i32(0xffff);
1896 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1897 break;
1898 case 2:
da6b5335
FN
1899 tmp2 = tcg_const_i32(0xffffffff);
1900 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1901 break;
da6b5335 1902 default:
39d5492a
PM
1903 TCGV_UNUSED_I32(tmp2);
1904 TCGV_UNUSED_I32(tmp3);
18c9b560 1905 }
da6b5335 1906 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
1907 tcg_temp_free_i32(tmp3);
1908 tcg_temp_free_i32(tmp2);
7d1b0095 1909 tcg_temp_free_i32(tmp);
18c9b560
AZ
1910 gen_op_iwmmxt_movq_wRn_M0(wrd);
1911 gen_op_iwmmxt_set_mup();
1912 break;
1913 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1914 rd = (insn >> 12) & 0xf;
1915 wrd = (insn >> 16) & 0xf;
da6b5335 1916 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1917 return 1;
1918 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1919 tmp = tcg_temp_new_i32();
18c9b560
AZ
1920 switch ((insn >> 22) & 3) {
1921 case 0:
da6b5335
FN
1922 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1923 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1924 if (insn & 8) {
1925 tcg_gen_ext8s_i32(tmp, tmp);
1926 } else {
1927 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1928 }
1929 break;
1930 case 1:
da6b5335
FN
1931 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1932 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1933 if (insn & 8) {
1934 tcg_gen_ext16s_i32(tmp, tmp);
1935 } else {
1936 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1937 }
1938 break;
1939 case 2:
da6b5335
FN
1940 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1941 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1942 break;
18c9b560 1943 }
da6b5335 1944 store_reg(s, rd, tmp);
18c9b560
AZ
1945 break;
1946 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1947 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1948 return 1;
da6b5335 1949 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1950 switch ((insn >> 22) & 3) {
1951 case 0:
da6b5335 1952 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1953 break;
1954 case 1:
da6b5335 1955 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1956 break;
1957 case 2:
da6b5335 1958 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1959 break;
18c9b560 1960 }
da6b5335
FN
1961 tcg_gen_shli_i32(tmp, tmp, 28);
1962 gen_set_nzcv(tmp);
7d1b0095 1963 tcg_temp_free_i32(tmp);
18c9b560
AZ
1964 break;
1965 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1966 if (((insn >> 6) & 3) == 3)
1967 return 1;
18c9b560
AZ
1968 rd = (insn >> 12) & 0xf;
1969 wrd = (insn >> 16) & 0xf;
da6b5335 1970 tmp = load_reg(s, rd);
18c9b560
AZ
1971 switch ((insn >> 6) & 3) {
1972 case 0:
da6b5335 1973 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1974 break;
1975 case 1:
da6b5335 1976 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1977 break;
1978 case 2:
da6b5335 1979 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1980 break;
18c9b560 1981 }
7d1b0095 1982 tcg_temp_free_i32(tmp);
18c9b560
AZ
1983 gen_op_iwmmxt_movq_wRn_M0(wrd);
1984 gen_op_iwmmxt_set_mup();
1985 break;
1986 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1987 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1988 return 1;
da6b5335 1989 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1990 tmp2 = tcg_temp_new_i32();
da6b5335 1991 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1992 switch ((insn >> 22) & 3) {
1993 case 0:
1994 for (i = 0; i < 7; i ++) {
da6b5335
FN
1995 tcg_gen_shli_i32(tmp2, tmp2, 4);
1996 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1997 }
1998 break;
1999 case 1:
2000 for (i = 0; i < 3; i ++) {
da6b5335
FN
2001 tcg_gen_shli_i32(tmp2, tmp2, 8);
2002 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2003 }
2004 break;
2005 case 2:
da6b5335
FN
2006 tcg_gen_shli_i32(tmp2, tmp2, 16);
2007 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2008 break;
18c9b560 2009 }
da6b5335 2010 gen_set_nzcv(tmp);
7d1b0095
PM
2011 tcg_temp_free_i32(tmp2);
2012 tcg_temp_free_i32(tmp);
18c9b560
AZ
2013 break;
2014 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2015 wrd = (insn >> 12) & 0xf;
2016 rd0 = (insn >> 16) & 0xf;
2017 gen_op_iwmmxt_movq_M0_wRn(rd0);
2018 switch ((insn >> 22) & 3) {
2019 case 0:
e677137d 2020 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2021 break;
2022 case 1:
e677137d 2023 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2024 break;
2025 case 2:
e677137d 2026 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2027 break;
2028 case 3:
2029 return 1;
2030 }
2031 gen_op_iwmmxt_movq_wRn_M0(wrd);
2032 gen_op_iwmmxt_set_mup();
2033 break;
2034 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2035 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2036 return 1;
da6b5335 2037 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2038 tmp2 = tcg_temp_new_i32();
da6b5335 2039 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2040 switch ((insn >> 22) & 3) {
2041 case 0:
2042 for (i = 0; i < 7; i ++) {
da6b5335
FN
2043 tcg_gen_shli_i32(tmp2, tmp2, 4);
2044 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2045 }
2046 break;
2047 case 1:
2048 for (i = 0; i < 3; i ++) {
da6b5335
FN
2049 tcg_gen_shli_i32(tmp2, tmp2, 8);
2050 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2051 }
2052 break;
2053 case 2:
da6b5335
FN
2054 tcg_gen_shli_i32(tmp2, tmp2, 16);
2055 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2056 break;
18c9b560 2057 }
da6b5335 2058 gen_set_nzcv(tmp);
7d1b0095
PM
2059 tcg_temp_free_i32(tmp2);
2060 tcg_temp_free_i32(tmp);
18c9b560
AZ
2061 break;
2062 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2063 rd = (insn >> 12) & 0xf;
2064 rd0 = (insn >> 16) & 0xf;
da6b5335 2065 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2066 return 1;
2067 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2068 tmp = tcg_temp_new_i32();
18c9b560
AZ
2069 switch ((insn >> 22) & 3) {
2070 case 0:
da6b5335 2071 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2072 break;
2073 case 1:
da6b5335 2074 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2075 break;
2076 case 2:
da6b5335 2077 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2078 break;
18c9b560 2079 }
da6b5335 2080 store_reg(s, rd, tmp);
18c9b560
AZ
2081 break;
2082 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2083 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2084 wrd = (insn >> 12) & 0xf;
2085 rd0 = (insn >> 16) & 0xf;
2086 rd1 = (insn >> 0) & 0xf;
2087 gen_op_iwmmxt_movq_M0_wRn(rd0);
2088 switch ((insn >> 22) & 3) {
2089 case 0:
2090 if (insn & (1 << 21))
2091 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2092 else
2093 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2094 break;
2095 case 1:
2096 if (insn & (1 << 21))
2097 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2098 else
2099 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2100 break;
2101 case 2:
2102 if (insn & (1 << 21))
2103 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2104 else
2105 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2106 break;
2107 case 3:
2108 return 1;
2109 }
2110 gen_op_iwmmxt_movq_wRn_M0(wrd);
2111 gen_op_iwmmxt_set_mup();
2112 gen_op_iwmmxt_set_cup();
2113 break;
2114 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2115 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2116 wrd = (insn >> 12) & 0xf;
2117 rd0 = (insn >> 16) & 0xf;
2118 gen_op_iwmmxt_movq_M0_wRn(rd0);
2119 switch ((insn >> 22) & 3) {
2120 case 0:
2121 if (insn & (1 << 21))
2122 gen_op_iwmmxt_unpacklsb_M0();
2123 else
2124 gen_op_iwmmxt_unpacklub_M0();
2125 break;
2126 case 1:
2127 if (insn & (1 << 21))
2128 gen_op_iwmmxt_unpacklsw_M0();
2129 else
2130 gen_op_iwmmxt_unpackluw_M0();
2131 break;
2132 case 2:
2133 if (insn & (1 << 21))
2134 gen_op_iwmmxt_unpacklsl_M0();
2135 else
2136 gen_op_iwmmxt_unpacklul_M0();
2137 break;
2138 case 3:
2139 return 1;
2140 }
2141 gen_op_iwmmxt_movq_wRn_M0(wrd);
2142 gen_op_iwmmxt_set_mup();
2143 gen_op_iwmmxt_set_cup();
2144 break;
2145 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2146 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2147 wrd = (insn >> 12) & 0xf;
2148 rd0 = (insn >> 16) & 0xf;
2149 gen_op_iwmmxt_movq_M0_wRn(rd0);
2150 switch ((insn >> 22) & 3) {
2151 case 0:
2152 if (insn & (1 << 21))
2153 gen_op_iwmmxt_unpackhsb_M0();
2154 else
2155 gen_op_iwmmxt_unpackhub_M0();
2156 break;
2157 case 1:
2158 if (insn & (1 << 21))
2159 gen_op_iwmmxt_unpackhsw_M0();
2160 else
2161 gen_op_iwmmxt_unpackhuw_M0();
2162 break;
2163 case 2:
2164 if (insn & (1 << 21))
2165 gen_op_iwmmxt_unpackhsl_M0();
2166 else
2167 gen_op_iwmmxt_unpackhul_M0();
2168 break;
2169 case 3:
2170 return 1;
2171 }
2172 gen_op_iwmmxt_movq_wRn_M0(wrd);
2173 gen_op_iwmmxt_set_mup();
2174 gen_op_iwmmxt_set_cup();
2175 break;
2176 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2177 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2178 if (((insn >> 22) & 3) == 0)
2179 return 1;
18c9b560
AZ
2180 wrd = (insn >> 12) & 0xf;
2181 rd0 = (insn >> 16) & 0xf;
2182 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2183 tmp = tcg_temp_new_i32();
da6b5335 2184 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2185 tcg_temp_free_i32(tmp);
18c9b560 2186 return 1;
da6b5335 2187 }
18c9b560 2188 switch ((insn >> 22) & 3) {
18c9b560 2189 case 1:
477955bd 2190 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2191 break;
2192 case 2:
477955bd 2193 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2194 break;
2195 case 3:
477955bd 2196 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2197 break;
2198 }
7d1b0095 2199 tcg_temp_free_i32(tmp);
18c9b560
AZ
2200 gen_op_iwmmxt_movq_wRn_M0(wrd);
2201 gen_op_iwmmxt_set_mup();
2202 gen_op_iwmmxt_set_cup();
2203 break;
2204 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2205 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2206 if (((insn >> 22) & 3) == 0)
2207 return 1;
18c9b560
AZ
2208 wrd = (insn >> 12) & 0xf;
2209 rd0 = (insn >> 16) & 0xf;
2210 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2211 tmp = tcg_temp_new_i32();
da6b5335 2212 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2213 tcg_temp_free_i32(tmp);
18c9b560 2214 return 1;
da6b5335 2215 }
18c9b560 2216 switch ((insn >> 22) & 3) {
18c9b560 2217 case 1:
477955bd 2218 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2219 break;
2220 case 2:
477955bd 2221 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2222 break;
2223 case 3:
477955bd 2224 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2225 break;
2226 }
7d1b0095 2227 tcg_temp_free_i32(tmp);
18c9b560
AZ
2228 gen_op_iwmmxt_movq_wRn_M0(wrd);
2229 gen_op_iwmmxt_set_mup();
2230 gen_op_iwmmxt_set_cup();
2231 break;
2232 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2233 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2234 if (((insn >> 22) & 3) == 0)
2235 return 1;
18c9b560
AZ
2236 wrd = (insn >> 12) & 0xf;
2237 rd0 = (insn >> 16) & 0xf;
2238 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2239 tmp = tcg_temp_new_i32();
da6b5335 2240 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2241 tcg_temp_free_i32(tmp);
18c9b560 2242 return 1;
da6b5335 2243 }
18c9b560 2244 switch ((insn >> 22) & 3) {
18c9b560 2245 case 1:
477955bd 2246 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2247 break;
2248 case 2:
477955bd 2249 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2250 break;
2251 case 3:
477955bd 2252 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2253 break;
2254 }
7d1b0095 2255 tcg_temp_free_i32(tmp);
18c9b560
AZ
2256 gen_op_iwmmxt_movq_wRn_M0(wrd);
2257 gen_op_iwmmxt_set_mup();
2258 gen_op_iwmmxt_set_cup();
2259 break;
2260 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2261 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2262 if (((insn >> 22) & 3) == 0)
2263 return 1;
18c9b560
AZ
2264 wrd = (insn >> 12) & 0xf;
2265 rd0 = (insn >> 16) & 0xf;
2266 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2267 tmp = tcg_temp_new_i32();
18c9b560 2268 switch ((insn >> 22) & 3) {
18c9b560 2269 case 1:
da6b5335 2270 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2271 tcg_temp_free_i32(tmp);
18c9b560 2272 return 1;
da6b5335 2273 }
477955bd 2274 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2275 break;
2276 case 2:
da6b5335 2277 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2278 tcg_temp_free_i32(tmp);
18c9b560 2279 return 1;
da6b5335 2280 }
477955bd 2281 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2282 break;
2283 case 3:
da6b5335 2284 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2285 tcg_temp_free_i32(tmp);
18c9b560 2286 return 1;
da6b5335 2287 }
477955bd 2288 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2289 break;
2290 }
7d1b0095 2291 tcg_temp_free_i32(tmp);
18c9b560
AZ
2292 gen_op_iwmmxt_movq_wRn_M0(wrd);
2293 gen_op_iwmmxt_set_mup();
2294 gen_op_iwmmxt_set_cup();
2295 break;
2296 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2297 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2298 wrd = (insn >> 12) & 0xf;
2299 rd0 = (insn >> 16) & 0xf;
2300 rd1 = (insn >> 0) & 0xf;
2301 gen_op_iwmmxt_movq_M0_wRn(rd0);
2302 switch ((insn >> 22) & 3) {
2303 case 0:
2304 if (insn & (1 << 21))
2305 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2306 else
2307 gen_op_iwmmxt_minub_M0_wRn(rd1);
2308 break;
2309 case 1:
2310 if (insn & (1 << 21))
2311 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2312 else
2313 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2314 break;
2315 case 2:
2316 if (insn & (1 << 21))
2317 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2318 else
2319 gen_op_iwmmxt_minul_M0_wRn(rd1);
2320 break;
2321 case 3:
2322 return 1;
2323 }
2324 gen_op_iwmmxt_movq_wRn_M0(wrd);
2325 gen_op_iwmmxt_set_mup();
2326 break;
2327 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2328 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2329 wrd = (insn >> 12) & 0xf;
2330 rd0 = (insn >> 16) & 0xf;
2331 rd1 = (insn >> 0) & 0xf;
2332 gen_op_iwmmxt_movq_M0_wRn(rd0);
2333 switch ((insn >> 22) & 3) {
2334 case 0:
2335 if (insn & (1 << 21))
2336 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2337 else
2338 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2339 break;
2340 case 1:
2341 if (insn & (1 << 21))
2342 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2343 else
2344 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2345 break;
2346 case 2:
2347 if (insn & (1 << 21))
2348 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2349 else
2350 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2351 break;
2352 case 3:
2353 return 1;
2354 }
2355 gen_op_iwmmxt_movq_wRn_M0(wrd);
2356 gen_op_iwmmxt_set_mup();
2357 break;
2358 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2359 case 0x402: case 0x502: case 0x602: case 0x702:
2360 wrd = (insn >> 12) & 0xf;
2361 rd0 = (insn >> 16) & 0xf;
2362 rd1 = (insn >> 0) & 0xf;
2363 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2364 tmp = tcg_const_i32((insn >> 20) & 3);
2365 iwmmxt_load_reg(cpu_V1, rd1);
2366 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2367 tcg_temp_free_i32(tmp);
18c9b560
AZ
2368 gen_op_iwmmxt_movq_wRn_M0(wrd);
2369 gen_op_iwmmxt_set_mup();
2370 break;
2371 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2372 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2373 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2374 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2375 wrd = (insn >> 12) & 0xf;
2376 rd0 = (insn >> 16) & 0xf;
2377 rd1 = (insn >> 0) & 0xf;
2378 gen_op_iwmmxt_movq_M0_wRn(rd0);
2379 switch ((insn >> 20) & 0xf) {
2380 case 0x0:
2381 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2382 break;
2383 case 0x1:
2384 gen_op_iwmmxt_subub_M0_wRn(rd1);
2385 break;
2386 case 0x3:
2387 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2388 break;
2389 case 0x4:
2390 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2391 break;
2392 case 0x5:
2393 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2394 break;
2395 case 0x7:
2396 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2397 break;
2398 case 0x8:
2399 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2400 break;
2401 case 0x9:
2402 gen_op_iwmmxt_subul_M0_wRn(rd1);
2403 break;
2404 case 0xb:
2405 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2406 break;
2407 default:
2408 return 1;
2409 }
2410 gen_op_iwmmxt_movq_wRn_M0(wrd);
2411 gen_op_iwmmxt_set_mup();
2412 gen_op_iwmmxt_set_cup();
2413 break;
2414 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2415 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2416 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2417 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2418 wrd = (insn >> 12) & 0xf;
2419 rd0 = (insn >> 16) & 0xf;
2420 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2421 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2422 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2423 tcg_temp_free_i32(tmp);
18c9b560
AZ
2424 gen_op_iwmmxt_movq_wRn_M0(wrd);
2425 gen_op_iwmmxt_set_mup();
2426 gen_op_iwmmxt_set_cup();
2427 break;
2428 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2429 case 0x418: case 0x518: case 0x618: case 0x718:
2430 case 0x818: case 0x918: case 0xa18: case 0xb18:
2431 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2432 wrd = (insn >> 12) & 0xf;
2433 rd0 = (insn >> 16) & 0xf;
2434 rd1 = (insn >> 0) & 0xf;
2435 gen_op_iwmmxt_movq_M0_wRn(rd0);
2436 switch ((insn >> 20) & 0xf) {
2437 case 0x0:
2438 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2439 break;
2440 case 0x1:
2441 gen_op_iwmmxt_addub_M0_wRn(rd1);
2442 break;
2443 case 0x3:
2444 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2445 break;
2446 case 0x4:
2447 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2448 break;
2449 case 0x5:
2450 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2451 break;
2452 case 0x7:
2453 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2454 break;
2455 case 0x8:
2456 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2457 break;
2458 case 0x9:
2459 gen_op_iwmmxt_addul_M0_wRn(rd1);
2460 break;
2461 case 0xb:
2462 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2463 break;
2464 default:
2465 return 1;
2466 }
2467 gen_op_iwmmxt_movq_wRn_M0(wrd);
2468 gen_op_iwmmxt_set_mup();
2469 gen_op_iwmmxt_set_cup();
2470 break;
2471 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2472 case 0x408: case 0x508: case 0x608: case 0x708:
2473 case 0x808: case 0x908: case 0xa08: case 0xb08:
2474 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2475 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2476 return 1;
18c9b560
AZ
2477 wrd = (insn >> 12) & 0xf;
2478 rd0 = (insn >> 16) & 0xf;
2479 rd1 = (insn >> 0) & 0xf;
2480 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2481 switch ((insn >> 22) & 3) {
18c9b560
AZ
2482 case 1:
2483 if (insn & (1 << 21))
2484 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2485 else
2486 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2487 break;
2488 case 2:
2489 if (insn & (1 << 21))
2490 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2491 else
2492 gen_op_iwmmxt_packul_M0_wRn(rd1);
2493 break;
2494 case 3:
2495 if (insn & (1 << 21))
2496 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2497 else
2498 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2499 break;
2500 }
2501 gen_op_iwmmxt_movq_wRn_M0(wrd);
2502 gen_op_iwmmxt_set_mup();
2503 gen_op_iwmmxt_set_cup();
2504 break;
2505 case 0x201: case 0x203: case 0x205: case 0x207:
2506 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2507 case 0x211: case 0x213: case 0x215: case 0x217:
2508 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2509 wrd = (insn >> 5) & 0xf;
2510 rd0 = (insn >> 12) & 0xf;
2511 rd1 = (insn >> 0) & 0xf;
2512 if (rd0 == 0xf || rd1 == 0xf)
2513 return 1;
2514 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2515 tmp = load_reg(s, rd0);
2516 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2517 switch ((insn >> 16) & 0xf) {
2518 case 0x0: /* TMIA */
da6b5335 2519 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2520 break;
2521 case 0x8: /* TMIAPH */
da6b5335 2522 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2523 break;
2524 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2525 if (insn & (1 << 16))
da6b5335 2526 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2527 if (insn & (1 << 17))
da6b5335
FN
2528 tcg_gen_shri_i32(tmp2, tmp2, 16);
2529 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2530 break;
2531 default:
7d1b0095
PM
2532 tcg_temp_free_i32(tmp2);
2533 tcg_temp_free_i32(tmp);
18c9b560
AZ
2534 return 1;
2535 }
7d1b0095
PM
2536 tcg_temp_free_i32(tmp2);
2537 tcg_temp_free_i32(tmp);
18c9b560
AZ
2538 gen_op_iwmmxt_movq_wRn_M0(wrd);
2539 gen_op_iwmmxt_set_mup();
2540 break;
2541 default:
2542 return 1;
2543 }
2544
2545 return 0;
2546}
2547
a1c7273b 2548/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2549 (ie. an undefined instruction). */
0ecb72a5 2550static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
2551{
2552 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2553 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2554
2555 if ((insn & 0x0ff00f10) == 0x0e200010) {
2556 /* Multiply with Internal Accumulate Format */
2557 rd0 = (insn >> 12) & 0xf;
2558 rd1 = insn & 0xf;
2559 acc = (insn >> 5) & 7;
2560
2561 if (acc != 0)
2562 return 1;
2563
3a554c0f
FN
2564 tmp = load_reg(s, rd0);
2565 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2566 switch ((insn >> 16) & 0xf) {
2567 case 0x0: /* MIA */
3a554c0f 2568 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2569 break;
2570 case 0x8: /* MIAPH */
3a554c0f 2571 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2572 break;
2573 case 0xc: /* MIABB */
2574 case 0xd: /* MIABT */
2575 case 0xe: /* MIATB */
2576 case 0xf: /* MIATT */
18c9b560 2577 if (insn & (1 << 16))
3a554c0f 2578 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2579 if (insn & (1 << 17))
3a554c0f
FN
2580 tcg_gen_shri_i32(tmp2, tmp2, 16);
2581 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2582 break;
2583 default:
2584 return 1;
2585 }
7d1b0095
PM
2586 tcg_temp_free_i32(tmp2);
2587 tcg_temp_free_i32(tmp);
18c9b560
AZ
2588
2589 gen_op_iwmmxt_movq_wRn_M0(acc);
2590 return 0;
2591 }
2592
2593 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2594 /* Internal Accumulator Access Format */
2595 rdhi = (insn >> 16) & 0xf;
2596 rdlo = (insn >> 12) & 0xf;
2597 acc = insn & 7;
2598
2599 if (acc != 0)
2600 return 1;
2601
2602 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2603 iwmmxt_load_reg(cpu_V0, acc);
2604 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2605 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2606 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2607 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2608 } else { /* MAR */
3a554c0f
FN
2609 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2610 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2611 }
2612 return 0;
2613 }
2614
2615 return 1;
2616}
2617
9ee6e8bb
PB
2618#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2619#define VFP_SREG(insn, bigbit, smallbit) \
2620 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2621#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2622 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2623 reg = (((insn) >> (bigbit)) & 0x0f) \
2624 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2625 } else { \
2626 if (insn & (1 << (smallbit))) \
2627 return 1; \
2628 reg = ((insn) >> (bigbit)) & 0x0f; \
2629 }} while (0)
2630
2631#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2632#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2633#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2634#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2635#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2636#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2637
4373f3ce 2638/* Move between integer and VFP cores. */
39d5492a 2639static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2640{
39d5492a 2641 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2642 tcg_gen_mov_i32(tmp, cpu_F0s);
2643 return tmp;
2644}
2645
39d5492a 2646static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2647{
2648 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2649 tcg_temp_free_i32(tmp);
4373f3ce
PB
2650}
2651
39d5492a 2652static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2653{
39d5492a 2654 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2655 if (shift)
2656 tcg_gen_shri_i32(var, var, shift);
86831435 2657 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2658 tcg_gen_shli_i32(tmp, var, 8);
2659 tcg_gen_or_i32(var, var, tmp);
2660 tcg_gen_shli_i32(tmp, var, 16);
2661 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2662 tcg_temp_free_i32(tmp);
ad69471c
PB
2663}
2664
39d5492a 2665static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2666{
39d5492a 2667 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2668 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2669 tcg_gen_shli_i32(tmp, var, 16);
2670 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2671 tcg_temp_free_i32(tmp);
ad69471c
PB
2672}
2673
39d5492a 2674static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2675{
39d5492a 2676 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2677 tcg_gen_andi_i32(var, var, 0xffff0000);
2678 tcg_gen_shri_i32(tmp, var, 16);
2679 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2680 tcg_temp_free_i32(tmp);
ad69471c
PB
2681}
2682
39d5492a 2683static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2684{
2685 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2686 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2687 switch (size) {
2688 case 0:
6ce2faf4 2689 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
8e18cde3
PM
2690 gen_neon_dup_u8(tmp, 0);
2691 break;
2692 case 1:
6ce2faf4 2693 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
8e18cde3
PM
2694 gen_neon_dup_low16(tmp);
2695 break;
2696 case 2:
6ce2faf4 2697 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8e18cde3
PM
2698 break;
2699 default: /* Avoid compiler warnings. */
2700 abort();
2701 }
2702 return tmp;
2703}
2704
04731fb5
WN
2705static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2706 uint32_t dp)
2707{
2708 uint32_t cc = extract32(insn, 20, 2);
2709
2710 if (dp) {
2711 TCGv_i64 frn, frm, dest;
2712 TCGv_i64 tmp, zero, zf, nf, vf;
2713
2714 zero = tcg_const_i64(0);
2715
2716 frn = tcg_temp_new_i64();
2717 frm = tcg_temp_new_i64();
2718 dest = tcg_temp_new_i64();
2719
2720 zf = tcg_temp_new_i64();
2721 nf = tcg_temp_new_i64();
2722 vf = tcg_temp_new_i64();
2723
2724 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2725 tcg_gen_ext_i32_i64(nf, cpu_NF);
2726 tcg_gen_ext_i32_i64(vf, cpu_VF);
2727
2728 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2729 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2730 switch (cc) {
2731 case 0: /* eq: Z */
2732 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2733 frn, frm);
2734 break;
2735 case 1: /* vs: V */
2736 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2737 frn, frm);
2738 break;
2739 case 2: /* ge: N == V -> N ^ V == 0 */
2740 tmp = tcg_temp_new_i64();
2741 tcg_gen_xor_i64(tmp, vf, nf);
2742 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2743 frn, frm);
2744 tcg_temp_free_i64(tmp);
2745 break;
2746 case 3: /* gt: !Z && N == V */
2747 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2748 frn, frm);
2749 tmp = tcg_temp_new_i64();
2750 tcg_gen_xor_i64(tmp, vf, nf);
2751 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2752 dest, frm);
2753 tcg_temp_free_i64(tmp);
2754 break;
2755 }
2756 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2757 tcg_temp_free_i64(frn);
2758 tcg_temp_free_i64(frm);
2759 tcg_temp_free_i64(dest);
2760
2761 tcg_temp_free_i64(zf);
2762 tcg_temp_free_i64(nf);
2763 tcg_temp_free_i64(vf);
2764
2765 tcg_temp_free_i64(zero);
2766 } else {
2767 TCGv_i32 frn, frm, dest;
2768 TCGv_i32 tmp, zero;
2769
2770 zero = tcg_const_i32(0);
2771
2772 frn = tcg_temp_new_i32();
2773 frm = tcg_temp_new_i32();
2774 dest = tcg_temp_new_i32();
2775 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2776 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2777 switch (cc) {
2778 case 0: /* eq: Z */
2779 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2780 frn, frm);
2781 break;
2782 case 1: /* vs: V */
2783 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2784 frn, frm);
2785 break;
2786 case 2: /* ge: N == V -> N ^ V == 0 */
2787 tmp = tcg_temp_new_i32();
2788 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2789 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2790 frn, frm);
2791 tcg_temp_free_i32(tmp);
2792 break;
2793 case 3: /* gt: !Z && N == V */
2794 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2795 frn, frm);
2796 tmp = tcg_temp_new_i32();
2797 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2798 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2799 dest, frm);
2800 tcg_temp_free_i32(tmp);
2801 break;
2802 }
2803 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2804 tcg_temp_free_i32(frn);
2805 tcg_temp_free_i32(frm);
2806 tcg_temp_free_i32(dest);
2807
2808 tcg_temp_free_i32(zero);
2809 }
2810
2811 return 0;
2812}
2813
40cfacdd
WN
2814static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2815 uint32_t rm, uint32_t dp)
2816{
2817 uint32_t vmin = extract32(insn, 6, 1);
2818 TCGv_ptr fpst = get_fpstatus_ptr(0);
2819
2820 if (dp) {
2821 TCGv_i64 frn, frm, dest;
2822
2823 frn = tcg_temp_new_i64();
2824 frm = tcg_temp_new_i64();
2825 dest = tcg_temp_new_i64();
2826
2827 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2828 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2829 if (vmin) {
f71a2ae5 2830 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 2831 } else {
f71a2ae5 2832 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
2833 }
2834 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2835 tcg_temp_free_i64(frn);
2836 tcg_temp_free_i64(frm);
2837 tcg_temp_free_i64(dest);
2838 } else {
2839 TCGv_i32 frn, frm, dest;
2840
2841 frn = tcg_temp_new_i32();
2842 frm = tcg_temp_new_i32();
2843 dest = tcg_temp_new_i32();
2844
2845 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2846 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2847 if (vmin) {
f71a2ae5 2848 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 2849 } else {
f71a2ae5 2850 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
2851 }
2852 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2853 tcg_temp_free_i32(frn);
2854 tcg_temp_free_i32(frm);
2855 tcg_temp_free_i32(dest);
2856 }
2857
2858 tcg_temp_free_ptr(fpst);
2859 return 0;
2860}
2861
7655f39b
WN
2862static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2863 int rounding)
2864{
2865 TCGv_ptr fpst = get_fpstatus_ptr(0);
2866 TCGv_i32 tcg_rmode;
2867
2868 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2869 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2870
2871 if (dp) {
2872 TCGv_i64 tcg_op;
2873 TCGv_i64 tcg_res;
2874 tcg_op = tcg_temp_new_i64();
2875 tcg_res = tcg_temp_new_i64();
2876 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2877 gen_helper_rintd(tcg_res, tcg_op, fpst);
2878 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2879 tcg_temp_free_i64(tcg_op);
2880 tcg_temp_free_i64(tcg_res);
2881 } else {
2882 TCGv_i32 tcg_op;
2883 TCGv_i32 tcg_res;
2884 tcg_op = tcg_temp_new_i32();
2885 tcg_res = tcg_temp_new_i32();
2886 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2887 gen_helper_rints(tcg_res, tcg_op, fpst);
2888 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2889 tcg_temp_free_i32(tcg_op);
2890 tcg_temp_free_i32(tcg_res);
2891 }
2892
2893 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2894 tcg_temp_free_i32(tcg_rmode);
2895
2896 tcg_temp_free_ptr(fpst);
2897 return 0;
2898}
2899
c9975a83
WN
2900static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2901 int rounding)
2902{
2903 bool is_signed = extract32(insn, 7, 1);
2904 TCGv_ptr fpst = get_fpstatus_ptr(0);
2905 TCGv_i32 tcg_rmode, tcg_shift;
2906
2907 tcg_shift = tcg_const_i32(0);
2908
2909 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2910 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2911
2912 if (dp) {
2913 TCGv_i64 tcg_double, tcg_res;
2914 TCGv_i32 tcg_tmp;
2915 /* Rd is encoded as a single precision register even when the source
2916 * is double precision.
2917 */
2918 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
2919 tcg_double = tcg_temp_new_i64();
2920 tcg_res = tcg_temp_new_i64();
2921 tcg_tmp = tcg_temp_new_i32();
2922 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
2923 if (is_signed) {
2924 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
2925 } else {
2926 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
2927 }
2928 tcg_gen_trunc_i64_i32(tcg_tmp, tcg_res);
2929 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
2930 tcg_temp_free_i32(tcg_tmp);
2931 tcg_temp_free_i64(tcg_res);
2932 tcg_temp_free_i64(tcg_double);
2933 } else {
2934 TCGv_i32 tcg_single, tcg_res;
2935 tcg_single = tcg_temp_new_i32();
2936 tcg_res = tcg_temp_new_i32();
2937 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
2938 if (is_signed) {
2939 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
2940 } else {
2941 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
2942 }
2943 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
2944 tcg_temp_free_i32(tcg_res);
2945 tcg_temp_free_i32(tcg_single);
2946 }
2947
2948 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2949 tcg_temp_free_i32(tcg_rmode);
2950
2951 tcg_temp_free_i32(tcg_shift);
2952
2953 tcg_temp_free_ptr(fpst);
2954
2955 return 0;
2956}
7655f39b
WN
2957
2958/* Table for converting the most common AArch32 encoding of
2959 * rounding mode to arm_fprounding order (which matches the
2960 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
2961 */
2962static const uint8_t fp_decode_rm[] = {
2963 FPROUNDING_TIEAWAY,
2964 FPROUNDING_TIEEVEN,
2965 FPROUNDING_POSINF,
2966 FPROUNDING_NEGINF,
2967};
2968
04731fb5
WN
2969static int disas_vfp_v8_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
2970{
2971 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
2972
2973 if (!arm_feature(env, ARM_FEATURE_V8)) {
2974 return 1;
2975 }
2976
2977 if (dp) {
2978 VFP_DREG_D(rd, insn);
2979 VFP_DREG_N(rn, insn);
2980 VFP_DREG_M(rm, insn);
2981 } else {
2982 rd = VFP_SREG_D(insn);
2983 rn = VFP_SREG_N(insn);
2984 rm = VFP_SREG_M(insn);
2985 }
2986
2987 if ((insn & 0x0f800e50) == 0x0e000a00) {
2988 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
2989 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
2990 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
2991 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
2992 /* VRINTA, VRINTN, VRINTP, VRINTM */
2993 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
2994 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
2995 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
2996 /* VCVTA, VCVTN, VCVTP, VCVTM */
2997 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
2998 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
2999 }
3000 return 1;
3001}
3002
a1c7273b 3003/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3004 (ie. an undefined instruction). */
0ecb72a5 3005static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
b7bcbe95
FB
3006{
3007 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3008 int dp, veclen;
39d5492a
PM
3009 TCGv_i32 addr;
3010 TCGv_i32 tmp;
3011 TCGv_i32 tmp2;
b7bcbe95 3012
40f137e1
PB
3013 if (!arm_feature(env, ARM_FEATURE_VFP))
3014 return 1;
3015
2c7ffc41
PM
3016 /* FIXME: this access check should not take precedence over UNDEF
3017 * for invalid encodings; we will generate incorrect syndrome information
3018 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3019 */
3020 if (!s->cpacr_fpen) {
3021 gen_exception_insn(s, 4, EXCP_UDEF,
3022 syn_fp_access_trap(1, 0xe, s->thumb));
3023 return 0;
3024 }
3025
5df8bac1 3026 if (!s->vfp_enabled) {
9ee6e8bb 3027 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3028 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3029 return 1;
3030 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3031 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3032 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3033 return 1;
a50c0f51 3034 }
40f137e1 3035 }
6a57f3eb
WN
3036
3037 if (extract32(insn, 28, 4) == 0xf) {
3038 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3039 * only used in v8 and above.
3040 */
04731fb5 3041 return disas_vfp_v8_insn(env, s, insn);
6a57f3eb
WN
3042 }
3043
b7bcbe95
FB
3044 dp = ((insn & 0xf00) == 0xb00);
3045 switch ((insn >> 24) & 0xf) {
3046 case 0xe:
3047 if (insn & (1 << 4)) {
3048 /* single register transfer */
b7bcbe95
FB
3049 rd = (insn >> 12) & 0xf;
3050 if (dp) {
9ee6e8bb
PB
3051 int size;
3052 int pass;
3053
3054 VFP_DREG_N(rn, insn);
3055 if (insn & 0xf)
b7bcbe95 3056 return 1;
9ee6e8bb
PB
3057 if (insn & 0x00c00060
3058 && !arm_feature(env, ARM_FEATURE_NEON))
3059 return 1;
3060
3061 pass = (insn >> 21) & 1;
3062 if (insn & (1 << 22)) {
3063 size = 0;
3064 offset = ((insn >> 5) & 3) * 8;
3065 } else if (insn & (1 << 5)) {
3066 size = 1;
3067 offset = (insn & (1 << 6)) ? 16 : 0;
3068 } else {
3069 size = 2;
3070 offset = 0;
3071 }
18c9b560 3072 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3073 /* vfp->arm */
ad69471c 3074 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3075 switch (size) {
3076 case 0:
9ee6e8bb 3077 if (offset)
ad69471c 3078 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3079 if (insn & (1 << 23))
ad69471c 3080 gen_uxtb(tmp);
9ee6e8bb 3081 else
ad69471c 3082 gen_sxtb(tmp);
9ee6e8bb
PB
3083 break;
3084 case 1:
9ee6e8bb
PB
3085 if (insn & (1 << 23)) {
3086 if (offset) {
ad69471c 3087 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3088 } else {
ad69471c 3089 gen_uxth(tmp);
9ee6e8bb
PB
3090 }
3091 } else {
3092 if (offset) {
ad69471c 3093 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3094 } else {
ad69471c 3095 gen_sxth(tmp);
9ee6e8bb
PB
3096 }
3097 }
3098 break;
3099 case 2:
9ee6e8bb
PB
3100 break;
3101 }
ad69471c 3102 store_reg(s, rd, tmp);
b7bcbe95
FB
3103 } else {
3104 /* arm->vfp */
ad69471c 3105 tmp = load_reg(s, rd);
9ee6e8bb
PB
3106 if (insn & (1 << 23)) {
3107 /* VDUP */
3108 if (size == 0) {
ad69471c 3109 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3110 } else if (size == 1) {
ad69471c 3111 gen_neon_dup_low16(tmp);
9ee6e8bb 3112 }
cbbccffc 3113 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3114 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3115 tcg_gen_mov_i32(tmp2, tmp);
3116 neon_store_reg(rn, n, tmp2);
3117 }
3118 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3119 } else {
3120 /* VMOV */
3121 switch (size) {
3122 case 0:
ad69471c 3123 tmp2 = neon_load_reg(rn, pass);
d593c48e 3124 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3125 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3126 break;
3127 case 1:
ad69471c 3128 tmp2 = neon_load_reg(rn, pass);
d593c48e 3129 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3130 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3131 break;
3132 case 2:
9ee6e8bb
PB
3133 break;
3134 }
ad69471c 3135 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3136 }
b7bcbe95 3137 }
9ee6e8bb
PB
3138 } else { /* !dp */
3139 if ((insn & 0x6f) != 0x00)
3140 return 1;
3141 rn = VFP_SREG_N(insn);
18c9b560 3142 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3143 /* vfp->arm */
3144 if (insn & (1 << 21)) {
3145 /* system register */
40f137e1 3146 rn >>= 1;
9ee6e8bb 3147
b7bcbe95 3148 switch (rn) {
40f137e1 3149 case ARM_VFP_FPSID:
4373f3ce 3150 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3151 VFP3 restricts all id registers to privileged
3152 accesses. */
3153 if (IS_USER(s)
3154 && arm_feature(env, ARM_FEATURE_VFP3))
3155 return 1;
4373f3ce 3156 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3157 break;
40f137e1 3158 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3159 if (IS_USER(s))
3160 return 1;
4373f3ce 3161 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3162 break;
40f137e1
PB
3163 case ARM_VFP_FPINST:
3164 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3165 /* Not present in VFP3. */
3166 if (IS_USER(s)
3167 || arm_feature(env, ARM_FEATURE_VFP3))
3168 return 1;
4373f3ce 3169 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3170 break;
40f137e1 3171 case ARM_VFP_FPSCR:
601d70b9 3172 if (rd == 15) {
4373f3ce
PB
3173 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3174 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3175 } else {
7d1b0095 3176 tmp = tcg_temp_new_i32();
4373f3ce
PB
3177 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3178 }
b7bcbe95 3179 break;
a50c0f51
PM
3180 case ARM_VFP_MVFR2:
3181 if (!arm_feature(env, ARM_FEATURE_V8)) {
3182 return 1;
3183 }
3184 /* fall through */
9ee6e8bb
PB
3185 case ARM_VFP_MVFR0:
3186 case ARM_VFP_MVFR1:
3187 if (IS_USER(s)
06ed5d66 3188 || !arm_feature(env, ARM_FEATURE_MVFR))
9ee6e8bb 3189 return 1;
4373f3ce 3190 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3191 break;
b7bcbe95
FB
3192 default:
3193 return 1;
3194 }
3195 } else {
3196 gen_mov_F0_vreg(0, rn);
4373f3ce 3197 tmp = gen_vfp_mrs();
b7bcbe95
FB
3198 }
3199 if (rd == 15) {
b5ff1b31 3200 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3201 gen_set_nzcv(tmp);
7d1b0095 3202 tcg_temp_free_i32(tmp);
4373f3ce
PB
3203 } else {
3204 store_reg(s, rd, tmp);
3205 }
b7bcbe95
FB
3206 } else {
3207 /* arm->vfp */
b7bcbe95 3208 if (insn & (1 << 21)) {
40f137e1 3209 rn >>= 1;
b7bcbe95
FB
3210 /* system register */
3211 switch (rn) {
40f137e1 3212 case ARM_VFP_FPSID:
9ee6e8bb
PB
3213 case ARM_VFP_MVFR0:
3214 case ARM_VFP_MVFR1:
b7bcbe95
FB
3215 /* Writes are ignored. */
3216 break;
40f137e1 3217 case ARM_VFP_FPSCR:
e4c1cfa5 3218 tmp = load_reg(s, rd);
4373f3ce 3219 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3220 tcg_temp_free_i32(tmp);
b5ff1b31 3221 gen_lookup_tb(s);
b7bcbe95 3222 break;
40f137e1 3223 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3224 if (IS_USER(s))
3225 return 1;
71b3c3de
JR
3226 /* TODO: VFP subarchitecture support.
3227 * For now, keep the EN bit only */
e4c1cfa5 3228 tmp = load_reg(s, rd);
71b3c3de 3229 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3230 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3231 gen_lookup_tb(s);
3232 break;
3233 case ARM_VFP_FPINST:
3234 case ARM_VFP_FPINST2:
e4c1cfa5 3235 tmp = load_reg(s, rd);
4373f3ce 3236 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3237 break;
b7bcbe95
FB
3238 default:
3239 return 1;
3240 }
3241 } else {
e4c1cfa5 3242 tmp = load_reg(s, rd);
4373f3ce 3243 gen_vfp_msr(tmp);
b7bcbe95
FB
3244 gen_mov_vreg_F0(0, rn);
3245 }
3246 }
3247 }
3248 } else {
3249 /* data processing */
3250 /* The opcode is in bits 23, 21, 20 and 6. */
3251 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3252 if (dp) {
3253 if (op == 15) {
3254 /* rn is opcode */
3255 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3256 } else {
3257 /* rn is register number */
9ee6e8bb 3258 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3259 }
3260
239c20c7
WN
3261 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3262 ((rn & 0x1e) == 0x6))) {
3263 /* Integer or single/half precision destination. */
9ee6e8bb 3264 rd = VFP_SREG_D(insn);
b7bcbe95 3265 } else {
9ee6e8bb 3266 VFP_DREG_D(rd, insn);
b7bcbe95 3267 }
04595bf6 3268 if (op == 15 &&
239c20c7
WN
3269 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3270 ((rn & 0x1e) == 0x4))) {
3271 /* VCVT from int or half precision is always from S reg
3272 * regardless of dp bit. VCVT with immediate frac_bits
3273 * has same format as SREG_M.
04595bf6
PM
3274 */
3275 rm = VFP_SREG_M(insn);
b7bcbe95 3276 } else {
9ee6e8bb 3277 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3278 }
3279 } else {
9ee6e8bb 3280 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3281 if (op == 15 && rn == 15) {
3282 /* Double precision destination. */
9ee6e8bb
PB
3283 VFP_DREG_D(rd, insn);
3284 } else {
3285 rd = VFP_SREG_D(insn);
3286 }
04595bf6
PM
3287 /* NB that we implicitly rely on the encoding for the frac_bits
3288 * in VCVT of fixed to float being the same as that of an SREG_M
3289 */
9ee6e8bb 3290 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3291 }
3292
69d1fc22 3293 veclen = s->vec_len;
b7bcbe95
FB
3294 if (op == 15 && rn > 3)
3295 veclen = 0;
3296
3297 /* Shut up compiler warnings. */
3298 delta_m = 0;
3299 delta_d = 0;
3300 bank_mask = 0;
3b46e624 3301
b7bcbe95
FB
3302 if (veclen > 0) {
3303 if (dp)
3304 bank_mask = 0xc;
3305 else
3306 bank_mask = 0x18;
3307
3308 /* Figure out what type of vector operation this is. */
3309 if ((rd & bank_mask) == 0) {
3310 /* scalar */
3311 veclen = 0;
3312 } else {
3313 if (dp)
69d1fc22 3314 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3315 else
69d1fc22 3316 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3317
3318 if ((rm & bank_mask) == 0) {
3319 /* mixed scalar/vector */
3320 delta_m = 0;
3321 } else {
3322 /* vector */
3323 delta_m = delta_d;
3324 }
3325 }
3326 }
3327
3328 /* Load the initial operands. */
3329 if (op == 15) {
3330 switch (rn) {
3331 case 16:
3332 case 17:
3333 /* Integer source */
3334 gen_mov_F0_vreg(0, rm);
3335 break;
3336 case 8:
3337 case 9:
3338 /* Compare */
3339 gen_mov_F0_vreg(dp, rd);
3340 gen_mov_F1_vreg(dp, rm);
3341 break;
3342 case 10:
3343 case 11:
3344 /* Compare with zero */
3345 gen_mov_F0_vreg(dp, rd);
3346 gen_vfp_F1_ld0(dp);
3347 break;
9ee6e8bb
PB
3348 case 20:
3349 case 21:
3350 case 22:
3351 case 23:
644ad806
PB
3352 case 28:
3353 case 29:
3354 case 30:
3355 case 31:
9ee6e8bb
PB
3356 /* Source and destination the same. */
3357 gen_mov_F0_vreg(dp, rd);
3358 break;
6e0c0ed1
PM
3359 case 4:
3360 case 5:
3361 case 6:
3362 case 7:
239c20c7
WN
3363 /* VCVTB, VCVTT: only present with the halfprec extension
3364 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3365 * (we choose to UNDEF)
6e0c0ed1 3366 */
239c20c7
WN
3367 if ((dp && !arm_feature(env, ARM_FEATURE_V8)) ||
3368 !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3369 return 1;
3370 }
239c20c7
WN
3371 if (!extract32(rn, 1, 1)) {
3372 /* Half precision source. */
3373 gen_mov_F0_vreg(0, rm);
3374 break;
3375 }
6e0c0ed1 3376 /* Otherwise fall through */
b7bcbe95
FB
3377 default:
3378 /* One source operand. */
3379 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3380 break;
b7bcbe95
FB
3381 }
3382 } else {
3383 /* Two source operands. */
3384 gen_mov_F0_vreg(dp, rn);
3385 gen_mov_F1_vreg(dp, rm);
3386 }
3387
3388 for (;;) {
3389 /* Perform the calculation. */
3390 switch (op) {
605a6aed
PM
3391 case 0: /* VMLA: fd + (fn * fm) */
3392 /* Note that order of inputs to the add matters for NaNs */
3393 gen_vfp_F1_mul(dp);
3394 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3395 gen_vfp_add(dp);
3396 break;
605a6aed 3397 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3398 gen_vfp_mul(dp);
605a6aed
PM
3399 gen_vfp_F1_neg(dp);
3400 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3401 gen_vfp_add(dp);
3402 break;
605a6aed
PM
3403 case 2: /* VNMLS: -fd + (fn * fm) */
3404 /* Note that it isn't valid to replace (-A + B) with (B - A)
3405 * or similar plausible looking simplifications
3406 * because this will give wrong results for NaNs.
3407 */
3408 gen_vfp_F1_mul(dp);
3409 gen_mov_F0_vreg(dp, rd);
3410 gen_vfp_neg(dp);
3411 gen_vfp_add(dp);
b7bcbe95 3412 break;
605a6aed 3413 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3414 gen_vfp_mul(dp);
605a6aed
PM
3415 gen_vfp_F1_neg(dp);
3416 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3417 gen_vfp_neg(dp);
605a6aed 3418 gen_vfp_add(dp);
b7bcbe95
FB
3419 break;
3420 case 4: /* mul: fn * fm */
3421 gen_vfp_mul(dp);
3422 break;
3423 case 5: /* nmul: -(fn * fm) */
3424 gen_vfp_mul(dp);
3425 gen_vfp_neg(dp);
3426 break;
3427 case 6: /* add: fn + fm */
3428 gen_vfp_add(dp);
3429 break;
3430 case 7: /* sub: fn - fm */
3431 gen_vfp_sub(dp);
3432 break;
3433 case 8: /* div: fn / fm */
3434 gen_vfp_div(dp);
3435 break;
da97f52c
PM
3436 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3437 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3438 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3439 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3440 /* These are fused multiply-add, and must be done as one
3441 * floating point operation with no rounding between the
3442 * multiplication and addition steps.
3443 * NB that doing the negations here as separate steps is
3444 * correct : an input NaN should come out with its sign bit
3445 * flipped if it is a negated-input.
3446 */
3447 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
3448 return 1;
3449 }
3450 if (dp) {
3451 TCGv_ptr fpst;
3452 TCGv_i64 frd;
3453 if (op & 1) {
3454 /* VFNMS, VFMS */
3455 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3456 }
3457 frd = tcg_temp_new_i64();
3458 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3459 if (op & 2) {
3460 /* VFNMA, VFNMS */
3461 gen_helper_vfp_negd(frd, frd);
3462 }
3463 fpst = get_fpstatus_ptr(0);
3464 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3465 cpu_F1d, frd, fpst);
3466 tcg_temp_free_ptr(fpst);
3467 tcg_temp_free_i64(frd);
3468 } else {
3469 TCGv_ptr fpst;
3470 TCGv_i32 frd;
3471 if (op & 1) {
3472 /* VFNMS, VFMS */
3473 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3474 }
3475 frd = tcg_temp_new_i32();
3476 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3477 if (op & 2) {
3478 gen_helper_vfp_negs(frd, frd);
3479 }
3480 fpst = get_fpstatus_ptr(0);
3481 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3482 cpu_F1s, frd, fpst);
3483 tcg_temp_free_ptr(fpst);
3484 tcg_temp_free_i32(frd);
3485 }
3486 break;
9ee6e8bb
PB
3487 case 14: /* fconst */
3488 if (!arm_feature(env, ARM_FEATURE_VFP3))
3489 return 1;
3490
3491 n = (insn << 12) & 0x80000000;
3492 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3493 if (dp) {
3494 if (i & 0x40)
3495 i |= 0x3f80;
3496 else
3497 i |= 0x4000;
3498 n |= i << 16;
4373f3ce 3499 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3500 } else {
3501 if (i & 0x40)
3502 i |= 0x780;
3503 else
3504 i |= 0x800;
3505 n |= i << 19;
5b340b51 3506 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3507 }
9ee6e8bb 3508 break;
b7bcbe95
FB
3509 case 15: /* extension space */
3510 switch (rn) {
3511 case 0: /* cpy */
3512 /* no-op */
3513 break;
3514 case 1: /* abs */
3515 gen_vfp_abs(dp);
3516 break;
3517 case 2: /* neg */
3518 gen_vfp_neg(dp);
3519 break;
3520 case 3: /* sqrt */
3521 gen_vfp_sqrt(dp);
3522 break;
239c20c7 3523 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
60011498
PB
3524 tmp = gen_vfp_mrs();
3525 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3526 if (dp) {
3527 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3528 cpu_env);
3529 } else {
3530 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3531 cpu_env);
3532 }
7d1b0095 3533 tcg_temp_free_i32(tmp);
60011498 3534 break;
239c20c7 3535 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
60011498
PB
3536 tmp = gen_vfp_mrs();
3537 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3538 if (dp) {
3539 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3540 cpu_env);
3541 } else {
3542 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3543 cpu_env);
3544 }
7d1b0095 3545 tcg_temp_free_i32(tmp);
60011498 3546 break;
239c20c7 3547 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
7d1b0095 3548 tmp = tcg_temp_new_i32();
239c20c7
WN
3549 if (dp) {
3550 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3551 cpu_env);
3552 } else {
3553 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3554 cpu_env);
3555 }
60011498
PB
3556 gen_mov_F0_vreg(0, rd);
3557 tmp2 = gen_vfp_mrs();
3558 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3559 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3560 tcg_temp_free_i32(tmp2);
60011498
PB
3561 gen_vfp_msr(tmp);
3562 break;
239c20c7 3563 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
7d1b0095 3564 tmp = tcg_temp_new_i32();
239c20c7
WN
3565 if (dp) {
3566 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3567 cpu_env);
3568 } else {
3569 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3570 cpu_env);
3571 }
60011498
PB
3572 tcg_gen_shli_i32(tmp, tmp, 16);
3573 gen_mov_F0_vreg(0, rd);
3574 tmp2 = gen_vfp_mrs();
3575 tcg_gen_ext16u_i32(tmp2, tmp2);
3576 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3577 tcg_temp_free_i32(tmp2);
60011498
PB
3578 gen_vfp_msr(tmp);
3579 break;
b7bcbe95
FB
3580 case 8: /* cmp */
3581 gen_vfp_cmp(dp);
3582 break;
3583 case 9: /* cmpe */
3584 gen_vfp_cmpe(dp);
3585 break;
3586 case 10: /* cmpz */
3587 gen_vfp_cmp(dp);
3588 break;
3589 case 11: /* cmpez */
3590 gen_vfp_F1_ld0(dp);
3591 gen_vfp_cmpe(dp);
3592 break;
664c6733
WN
3593 case 12: /* vrintr */
3594 {
3595 TCGv_ptr fpst = get_fpstatus_ptr(0);
3596 if (dp) {
3597 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3598 } else {
3599 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3600 }
3601 tcg_temp_free_ptr(fpst);
3602 break;
3603 }
a290c62a
WN
3604 case 13: /* vrintz */
3605 {
3606 TCGv_ptr fpst = get_fpstatus_ptr(0);
3607 TCGv_i32 tcg_rmode;
3608 tcg_rmode = tcg_const_i32(float_round_to_zero);
3609 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3610 if (dp) {
3611 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3612 } else {
3613 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3614 }
3615 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3616 tcg_temp_free_i32(tcg_rmode);
3617 tcg_temp_free_ptr(fpst);
3618 break;
3619 }
4e82bc01
WN
3620 case 14: /* vrintx */
3621 {
3622 TCGv_ptr fpst = get_fpstatus_ptr(0);
3623 if (dp) {
3624 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3625 } else {
3626 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3627 }
3628 tcg_temp_free_ptr(fpst);
3629 break;
3630 }
b7bcbe95
FB
3631 case 15: /* single<->double conversion */
3632 if (dp)
4373f3ce 3633 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3634 else
4373f3ce 3635 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3636 break;
3637 case 16: /* fuito */
5500b06c 3638 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3639 break;
3640 case 17: /* fsito */
5500b06c 3641 gen_vfp_sito(dp, 0);
b7bcbe95 3642 break;
9ee6e8bb
PB
3643 case 20: /* fshto */
3644 if (!arm_feature(env, ARM_FEATURE_VFP3))
3645 return 1;
5500b06c 3646 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3647 break;
3648 case 21: /* fslto */
3649 if (!arm_feature(env, ARM_FEATURE_VFP3))
3650 return 1;
5500b06c 3651 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3652 break;
3653 case 22: /* fuhto */
3654 if (!arm_feature(env, ARM_FEATURE_VFP3))
3655 return 1;
5500b06c 3656 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3657 break;
3658 case 23: /* fulto */
3659 if (!arm_feature(env, ARM_FEATURE_VFP3))
3660 return 1;
5500b06c 3661 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3662 break;
b7bcbe95 3663 case 24: /* ftoui */
5500b06c 3664 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3665 break;
3666 case 25: /* ftouiz */
5500b06c 3667 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3668 break;
3669 case 26: /* ftosi */
5500b06c 3670 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3671 break;
3672 case 27: /* ftosiz */
5500b06c 3673 gen_vfp_tosiz(dp, 0);
b7bcbe95 3674 break;
9ee6e8bb
PB
3675 case 28: /* ftosh */
3676 if (!arm_feature(env, ARM_FEATURE_VFP3))
3677 return 1;
5500b06c 3678 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3679 break;
3680 case 29: /* ftosl */
3681 if (!arm_feature(env, ARM_FEATURE_VFP3))
3682 return 1;
5500b06c 3683 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3684 break;
3685 case 30: /* ftouh */
3686 if (!arm_feature(env, ARM_FEATURE_VFP3))
3687 return 1;
5500b06c 3688 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3689 break;
3690 case 31: /* ftoul */
3691 if (!arm_feature(env, ARM_FEATURE_VFP3))
3692 return 1;
5500b06c 3693 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3694 break;
b7bcbe95 3695 default: /* undefined */
b7bcbe95
FB
3696 return 1;
3697 }
3698 break;
3699 default: /* undefined */
b7bcbe95
FB
3700 return 1;
3701 }
3702
3703 /* Write back the result. */
239c20c7
WN
3704 if (op == 15 && (rn >= 8 && rn <= 11)) {
3705 /* Comparison, do nothing. */
3706 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3707 (rn & 0x1e) == 0x6)) {
3708 /* VCVT double to int: always integer result.
3709 * VCVT double to half precision is always a single
3710 * precision result.
3711 */
b7bcbe95 3712 gen_mov_vreg_F0(0, rd);
239c20c7 3713 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
3714 /* conversion */
3715 gen_mov_vreg_F0(!dp, rd);
239c20c7 3716 } else {
b7bcbe95 3717 gen_mov_vreg_F0(dp, rd);
239c20c7 3718 }
b7bcbe95
FB
3719
3720 /* break out of the loop if we have finished */
3721 if (veclen == 0)
3722 break;
3723
3724 if (op == 15 && delta_m == 0) {
3725 /* single source one-many */
3726 while (veclen--) {
3727 rd = ((rd + delta_d) & (bank_mask - 1))
3728 | (rd & bank_mask);
3729 gen_mov_vreg_F0(dp, rd);
3730 }
3731 break;
3732 }
3733 /* Setup the next operands. */
3734 veclen--;
3735 rd = ((rd + delta_d) & (bank_mask - 1))
3736 | (rd & bank_mask);
3737
3738 if (op == 15) {
3739 /* One source operand. */
3740 rm = ((rm + delta_m) & (bank_mask - 1))
3741 | (rm & bank_mask);
3742 gen_mov_F0_vreg(dp, rm);
3743 } else {
3744 /* Two source operands. */
3745 rn = ((rn + delta_d) & (bank_mask - 1))
3746 | (rn & bank_mask);
3747 gen_mov_F0_vreg(dp, rn);
3748 if (delta_m) {
3749 rm = ((rm + delta_m) & (bank_mask - 1))
3750 | (rm & bank_mask);
3751 gen_mov_F1_vreg(dp, rm);
3752 }
3753 }
3754 }
3755 }
3756 break;
3757 case 0xc:
3758 case 0xd:
8387da81 3759 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3760 /* two-register transfer */
3761 rn = (insn >> 16) & 0xf;
3762 rd = (insn >> 12) & 0xf;
3763 if (dp) {
9ee6e8bb
PB
3764 VFP_DREG_M(rm, insn);
3765 } else {
3766 rm = VFP_SREG_M(insn);
3767 }
b7bcbe95 3768
18c9b560 3769 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3770 /* vfp->arm */
3771 if (dp) {
4373f3ce
PB
3772 gen_mov_F0_vreg(0, rm * 2);
3773 tmp = gen_vfp_mrs();
3774 store_reg(s, rd, tmp);
3775 gen_mov_F0_vreg(0, rm * 2 + 1);
3776 tmp = gen_vfp_mrs();
3777 store_reg(s, rn, tmp);
b7bcbe95
FB
3778 } else {
3779 gen_mov_F0_vreg(0, rm);
4373f3ce 3780 tmp = gen_vfp_mrs();
8387da81 3781 store_reg(s, rd, tmp);
b7bcbe95 3782 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3783 tmp = gen_vfp_mrs();
8387da81 3784 store_reg(s, rn, tmp);
b7bcbe95
FB
3785 }
3786 } else {
3787 /* arm->vfp */
3788 if (dp) {
4373f3ce
PB
3789 tmp = load_reg(s, rd);
3790 gen_vfp_msr(tmp);
3791 gen_mov_vreg_F0(0, rm * 2);
3792 tmp = load_reg(s, rn);
3793 gen_vfp_msr(tmp);
3794 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3795 } else {
8387da81 3796 tmp = load_reg(s, rd);
4373f3ce 3797 gen_vfp_msr(tmp);
b7bcbe95 3798 gen_mov_vreg_F0(0, rm);
8387da81 3799 tmp = load_reg(s, rn);
4373f3ce 3800 gen_vfp_msr(tmp);
b7bcbe95
FB
3801 gen_mov_vreg_F0(0, rm + 1);
3802 }
3803 }
3804 } else {
3805 /* Load/store */
3806 rn = (insn >> 16) & 0xf;
3807 if (dp)
9ee6e8bb 3808 VFP_DREG_D(rd, insn);
b7bcbe95 3809 else
9ee6e8bb 3810 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3811 if ((insn & 0x01200000) == 0x01000000) {
3812 /* Single load/store */
3813 offset = (insn & 0xff) << 2;
3814 if ((insn & (1 << 23)) == 0)
3815 offset = -offset;
934814f1
PM
3816 if (s->thumb && rn == 15) {
3817 /* This is actually UNPREDICTABLE */
3818 addr = tcg_temp_new_i32();
3819 tcg_gen_movi_i32(addr, s->pc & ~2);
3820 } else {
3821 addr = load_reg(s, rn);
3822 }
312eea9f 3823 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3824 if (insn & (1 << 20)) {
312eea9f 3825 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3826 gen_mov_vreg_F0(dp, rd);
3827 } else {
3828 gen_mov_F0_vreg(dp, rd);
312eea9f 3829 gen_vfp_st(s, dp, addr);
b7bcbe95 3830 }
7d1b0095 3831 tcg_temp_free_i32(addr);
b7bcbe95
FB
3832 } else {
3833 /* load/store multiple */
934814f1 3834 int w = insn & (1 << 21);
b7bcbe95
FB
3835 if (dp)
3836 n = (insn >> 1) & 0x7f;
3837 else
3838 n = insn & 0xff;
3839
934814f1
PM
3840 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3841 /* P == U , W == 1 => UNDEF */
3842 return 1;
3843 }
3844 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3845 /* UNPREDICTABLE cases for bad immediates: we choose to
3846 * UNDEF to avoid generating huge numbers of TCG ops
3847 */
3848 return 1;
3849 }
3850 if (rn == 15 && w) {
3851 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3852 return 1;
3853 }
3854
3855 if (s->thumb && rn == 15) {
3856 /* This is actually UNPREDICTABLE */
3857 addr = tcg_temp_new_i32();
3858 tcg_gen_movi_i32(addr, s->pc & ~2);
3859 } else {
3860 addr = load_reg(s, rn);
3861 }
b7bcbe95 3862 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3863 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3864
3865 if (dp)
3866 offset = 8;
3867 else
3868 offset = 4;
3869 for (i = 0; i < n; i++) {
18c9b560 3870 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3871 /* load */
312eea9f 3872 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3873 gen_mov_vreg_F0(dp, rd + i);
3874 } else {
3875 /* store */
3876 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3877 gen_vfp_st(s, dp, addr);
b7bcbe95 3878 }
312eea9f 3879 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3880 }
934814f1 3881 if (w) {
b7bcbe95
FB
3882 /* writeback */
3883 if (insn & (1 << 24))
3884 offset = -offset * n;
3885 else if (dp && (insn & 1))
3886 offset = 4;
3887 else
3888 offset = 0;
3889
3890 if (offset != 0)
312eea9f
FN
3891 tcg_gen_addi_i32(addr, addr, offset);
3892 store_reg(s, rn, addr);
3893 } else {
7d1b0095 3894 tcg_temp_free_i32(addr);
b7bcbe95
FB
3895 }
3896 }
3897 }
3898 break;
3899 default:
3900 /* Should never happen. */
3901 return 1;
3902 }
3903 return 0;
3904}
3905
0a2461fa 3906static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
c53be334 3907{
6e256c93
FB
3908 TranslationBlock *tb;
3909
3910 tb = s->tb;
3911 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3912 tcg_gen_goto_tb(n);
eaed129d 3913 gen_set_pc_im(s, dest);
8cfd0495 3914 tcg_gen_exit_tb((uintptr_t)tb + n);
6e256c93 3915 } else {
eaed129d 3916 gen_set_pc_im(s, dest);
57fec1fe 3917 tcg_gen_exit_tb(0);
6e256c93 3918 }
c53be334
FB
3919}
3920
8aaca4c0
FB
3921static inline void gen_jmp (DisasContext *s, uint32_t dest)
3922{
50225ad0 3923 if (unlikely(s->singlestep_enabled || s->ss_active)) {
8aaca4c0 3924 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3925 if (s->thumb)
d9ba4830
PB
3926 dest |= 1;
3927 gen_bx_im(s, dest);
8aaca4c0 3928 } else {
6e256c93 3929 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3930 s->is_jmp = DISAS_TB_JUMP;
3931 }
3932}
3933
39d5492a 3934static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 3935{
ee097184 3936 if (x)
d9ba4830 3937 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3938 else
d9ba4830 3939 gen_sxth(t0);
ee097184 3940 if (y)
d9ba4830 3941 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3942 else
d9ba4830
PB
3943 gen_sxth(t1);
3944 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3945}
3946
3947/* Return the mask of PSR bits set by a MSR instruction. */
0ecb72a5 3948static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3949 uint32_t mask;
3950
3951 mask = 0;
3952 if (flags & (1 << 0))
3953 mask |= 0xff;
3954 if (flags & (1 << 1))
3955 mask |= 0xff00;
3956 if (flags & (1 << 2))
3957 mask |= 0xff0000;
3958 if (flags & (1 << 3))
3959 mask |= 0xff000000;
9ee6e8bb 3960
2ae23e75 3961 /* Mask out undefined bits. */
9ee6e8bb 3962 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3963 if (!arm_feature(env, ARM_FEATURE_V4T))
3964 mask &= ~CPSR_T;
3965 if (!arm_feature(env, ARM_FEATURE_V5))
3966 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3967 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3968 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3969 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3970 mask &= ~CPSR_IT;
4051e12c
PM
3971 /* Mask out execution state and reserved bits. */
3972 if (!spsr) {
3973 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
3974 }
b5ff1b31
FB
3975 /* Mask out privileged bits. */
3976 if (IS_USER(s))
9ee6e8bb 3977 mask &= CPSR_USER;
b5ff1b31
FB
3978 return mask;
3979}
3980
2fbac54b 3981/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 3982static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 3983{
39d5492a 3984 TCGv_i32 tmp;
b5ff1b31
FB
3985 if (spsr) {
3986 /* ??? This is also undefined in system mode. */
3987 if (IS_USER(s))
3988 return 1;
d9ba4830
PB
3989
3990 tmp = load_cpu_field(spsr);
3991 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3992 tcg_gen_andi_i32(t0, t0, mask);
3993 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3994 store_cpu_field(tmp, spsr);
b5ff1b31 3995 } else {
2fbac54b 3996 gen_set_cpsr(t0, mask);
b5ff1b31 3997 }
7d1b0095 3998 tcg_temp_free_i32(t0);
b5ff1b31
FB
3999 gen_lookup_tb(s);
4000 return 0;
4001}
4002
2fbac54b
FN
4003/* Returns nonzero if access to the PSR is not permitted. */
4004static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4005{
39d5492a 4006 TCGv_i32 tmp;
7d1b0095 4007 tmp = tcg_temp_new_i32();
2fbac54b
FN
4008 tcg_gen_movi_i32(tmp, val);
4009 return gen_set_psr(s, mask, spsr, tmp);
4010}
4011
e9bb4aa9 4012/* Generate an old-style exception return. Marks pc as dead. */
39d5492a 4013static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4014{
39d5492a 4015 TCGv_i32 tmp;
e9bb4aa9 4016 store_reg(s, 15, pc);
d9ba4830 4017 tmp = load_cpu_field(spsr);
4051e12c 4018 gen_set_cpsr(tmp, CPSR_ERET_MASK);
7d1b0095 4019 tcg_temp_free_i32(tmp);
b5ff1b31
FB
4020 s->is_jmp = DISAS_UPDATE;
4021}
4022
b0109805 4023/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4024static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4025{
4051e12c 4026 gen_set_cpsr(cpsr, CPSR_ERET_MASK);
7d1b0095 4027 tcg_temp_free_i32(cpsr);
b0109805 4028 store_reg(s, 15, pc);
9ee6e8bb
PB
4029 s->is_jmp = DISAS_UPDATE;
4030}
3b46e624 4031
9ee6e8bb
PB
4032static void gen_nop_hint(DisasContext *s, int val)
4033{
4034 switch (val) {
4035 case 3: /* wfi */
eaed129d 4036 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
4037 s->is_jmp = DISAS_WFI;
4038 break;
4039 case 2: /* wfe */
72c1d3af
PM
4040 gen_set_pc_im(s, s->pc);
4041 s->is_jmp = DISAS_WFE;
4042 break;
9ee6e8bb 4043 case 4: /* sev */
12b10571
MR
4044 case 5: /* sevl */
4045 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4046 default: /* nop */
4047 break;
4048 }
4049}
99c475ab 4050
ad69471c 4051#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4052
39d5492a 4053static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4054{
4055 switch (size) {
dd8fbd78
FN
4056 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4057 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4058 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4059 default: abort();
9ee6e8bb 4060 }
9ee6e8bb
PB
4061}
4062
39d5492a 4063static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4064{
4065 switch (size) {
dd8fbd78
FN
4066 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4067 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4068 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4069 default: return;
4070 }
4071}
4072
4073/* 32-bit pairwise ops end up the same as the elementwise versions. */
4074#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4075#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4076#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4077#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4078
ad69471c
PB
4079#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4080 switch ((size << 1) | u) { \
4081 case 0: \
dd8fbd78 4082 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4083 break; \
4084 case 1: \
dd8fbd78 4085 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4086 break; \
4087 case 2: \
dd8fbd78 4088 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4089 break; \
4090 case 3: \
dd8fbd78 4091 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4092 break; \
4093 case 4: \
dd8fbd78 4094 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4095 break; \
4096 case 5: \
dd8fbd78 4097 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4098 break; \
4099 default: return 1; \
4100 }} while (0)
9ee6e8bb
PB
4101
4102#define GEN_NEON_INTEGER_OP(name) do { \
4103 switch ((size << 1) | u) { \
ad69471c 4104 case 0: \
dd8fbd78 4105 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4106 break; \
4107 case 1: \
dd8fbd78 4108 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4109 break; \
4110 case 2: \
dd8fbd78 4111 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4112 break; \
4113 case 3: \
dd8fbd78 4114 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4115 break; \
4116 case 4: \
dd8fbd78 4117 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4118 break; \
4119 case 5: \
dd8fbd78 4120 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4121 break; \
9ee6e8bb
PB
4122 default: return 1; \
4123 }} while (0)
4124
39d5492a 4125static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4126{
39d5492a 4127 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4128 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4129 return tmp;
9ee6e8bb
PB
4130}
4131
39d5492a 4132static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4133{
dd8fbd78 4134 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4135 tcg_temp_free_i32(var);
9ee6e8bb
PB
4136}
4137
39d5492a 4138static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4139{
39d5492a 4140 TCGv_i32 tmp;
9ee6e8bb 4141 if (size == 1) {
0fad6efc
PM
4142 tmp = neon_load_reg(reg & 7, reg >> 4);
4143 if (reg & 8) {
dd8fbd78 4144 gen_neon_dup_high16(tmp);
0fad6efc
PM
4145 } else {
4146 gen_neon_dup_low16(tmp);
dd8fbd78 4147 }
0fad6efc
PM
4148 } else {
4149 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4150 }
dd8fbd78 4151 return tmp;
9ee6e8bb
PB
4152}
4153
02acedf9 4154static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4155{
39d5492a 4156 TCGv_i32 tmp, tmp2;
600b828c 4157 if (!q && size == 2) {
02acedf9
PM
4158 return 1;
4159 }
4160 tmp = tcg_const_i32(rd);
4161 tmp2 = tcg_const_i32(rm);
4162 if (q) {
4163 switch (size) {
4164 case 0:
02da0b2d 4165 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4166 break;
4167 case 1:
02da0b2d 4168 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4169 break;
4170 case 2:
02da0b2d 4171 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
4172 break;
4173 default:
4174 abort();
4175 }
4176 } else {
4177 switch (size) {
4178 case 0:
02da0b2d 4179 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4180 break;
4181 case 1:
02da0b2d 4182 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4183 break;
4184 default:
4185 abort();
4186 }
4187 }
4188 tcg_temp_free_i32(tmp);
4189 tcg_temp_free_i32(tmp2);
4190 return 0;
19457615
FN
4191}
4192
d68a6f3a 4193static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4194{
39d5492a 4195 TCGv_i32 tmp, tmp2;
600b828c 4196 if (!q && size == 2) {
d68a6f3a
PM
4197 return 1;
4198 }
4199 tmp = tcg_const_i32(rd);
4200 tmp2 = tcg_const_i32(rm);
4201 if (q) {
4202 switch (size) {
4203 case 0:
02da0b2d 4204 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4205 break;
4206 case 1:
02da0b2d 4207 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4208 break;
4209 case 2:
02da0b2d 4210 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
4211 break;
4212 default:
4213 abort();
4214 }
4215 } else {
4216 switch (size) {
4217 case 0:
02da0b2d 4218 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4219 break;
4220 case 1:
02da0b2d 4221 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4222 break;
4223 default:
4224 abort();
4225 }
4226 }
4227 tcg_temp_free_i32(tmp);
4228 tcg_temp_free_i32(tmp2);
4229 return 0;
19457615
FN
4230}
4231
39d5492a 4232static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4233{
39d5492a 4234 TCGv_i32 rd, tmp;
19457615 4235
7d1b0095
PM
4236 rd = tcg_temp_new_i32();
4237 tmp = tcg_temp_new_i32();
19457615
FN
4238
4239 tcg_gen_shli_i32(rd, t0, 8);
4240 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4241 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4242 tcg_gen_or_i32(rd, rd, tmp);
4243
4244 tcg_gen_shri_i32(t1, t1, 8);
4245 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4246 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4247 tcg_gen_or_i32(t1, t1, tmp);
4248 tcg_gen_mov_i32(t0, rd);
4249
7d1b0095
PM
4250 tcg_temp_free_i32(tmp);
4251 tcg_temp_free_i32(rd);
19457615
FN
4252}
4253
39d5492a 4254static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4255{
39d5492a 4256 TCGv_i32 rd, tmp;
19457615 4257
7d1b0095
PM
4258 rd = tcg_temp_new_i32();
4259 tmp = tcg_temp_new_i32();
19457615
FN
4260
4261 tcg_gen_shli_i32(rd, t0, 16);
4262 tcg_gen_andi_i32(tmp, t1, 0xffff);
4263 tcg_gen_or_i32(rd, rd, tmp);
4264 tcg_gen_shri_i32(t1, t1, 16);
4265 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4266 tcg_gen_or_i32(t1, t1, tmp);
4267 tcg_gen_mov_i32(t0, rd);
4268
7d1b0095
PM
4269 tcg_temp_free_i32(tmp);
4270 tcg_temp_free_i32(rd);
19457615
FN
4271}
4272
4273
9ee6e8bb
PB
4274static struct {
4275 int nregs;
4276 int interleave;
4277 int spacing;
4278} neon_ls_element_type[11] = {
4279 {4, 4, 1},
4280 {4, 4, 2},
4281 {4, 1, 1},
4282 {4, 2, 1},
4283 {3, 3, 1},
4284 {3, 3, 2},
4285 {3, 1, 1},
4286 {1, 1, 1},
4287 {2, 2, 1},
4288 {2, 2, 2},
4289 {2, 1, 1}
4290};
4291
4292/* Translate a NEON load/store element instruction. Return nonzero if the
4293 instruction is invalid. */
0ecb72a5 4294static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4295{
4296 int rd, rn, rm;
4297 int op;
4298 int nregs;
4299 int interleave;
84496233 4300 int spacing;
9ee6e8bb
PB
4301 int stride;
4302 int size;
4303 int reg;
4304 int pass;
4305 int load;
4306 int shift;
9ee6e8bb 4307 int n;
39d5492a
PM
4308 TCGv_i32 addr;
4309 TCGv_i32 tmp;
4310 TCGv_i32 tmp2;
84496233 4311 TCGv_i64 tmp64;
9ee6e8bb 4312
2c7ffc41
PM
4313 /* FIXME: this access check should not take precedence over UNDEF
4314 * for invalid encodings; we will generate incorrect syndrome information
4315 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4316 */
4317 if (!s->cpacr_fpen) {
4318 gen_exception_insn(s, 4, EXCP_UDEF,
4319 syn_fp_access_trap(1, 0xe, s->thumb));
4320 return 0;
4321 }
4322
5df8bac1 4323 if (!s->vfp_enabled)
9ee6e8bb
PB
4324 return 1;
4325 VFP_DREG_D(rd, insn);
4326 rn = (insn >> 16) & 0xf;
4327 rm = insn & 0xf;
4328 load = (insn & (1 << 21)) != 0;
4329 if ((insn & (1 << 23)) == 0) {
4330 /* Load store all elements. */
4331 op = (insn >> 8) & 0xf;
4332 size = (insn >> 6) & 3;
84496233 4333 if (op > 10)
9ee6e8bb 4334 return 1;
f2dd89d0
PM
4335 /* Catch UNDEF cases for bad values of align field */
4336 switch (op & 0xc) {
4337 case 4:
4338 if (((insn >> 5) & 1) == 1) {
4339 return 1;
4340 }
4341 break;
4342 case 8:
4343 if (((insn >> 4) & 3) == 3) {
4344 return 1;
4345 }
4346 break;
4347 default:
4348 break;
4349 }
9ee6e8bb
PB
4350 nregs = neon_ls_element_type[op].nregs;
4351 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4352 spacing = neon_ls_element_type[op].spacing;
4353 if (size == 3 && (interleave | spacing) != 1)
4354 return 1;
e318a60b 4355 addr = tcg_temp_new_i32();
dcc65026 4356 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4357 stride = (1 << size) * interleave;
4358 for (reg = 0; reg < nregs; reg++) {
4359 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4360 load_reg_var(s, addr, rn);
4361 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4362 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4363 load_reg_var(s, addr, rn);
4364 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4365 }
84496233 4366 if (size == 3) {
8ed1237d 4367 tmp64 = tcg_temp_new_i64();
84496233 4368 if (load) {
6ce2faf4 4369 gen_aa32_ld64(tmp64, addr, get_mem_index(s));
84496233 4370 neon_store_reg64(tmp64, rd);
84496233 4371 } else {
84496233 4372 neon_load_reg64(tmp64, rd);
6ce2faf4 4373 gen_aa32_st64(tmp64, addr, get_mem_index(s));
84496233 4374 }
8ed1237d 4375 tcg_temp_free_i64(tmp64);
84496233
JR
4376 tcg_gen_addi_i32(addr, addr, stride);
4377 } else {
4378 for (pass = 0; pass < 2; pass++) {
4379 if (size == 2) {
4380 if (load) {
58ab8e96 4381 tmp = tcg_temp_new_i32();
6ce2faf4 4382 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
84496233
JR
4383 neon_store_reg(rd, pass, tmp);
4384 } else {
4385 tmp = neon_load_reg(rd, pass);
6ce2faf4 4386 gen_aa32_st32(tmp, addr, get_mem_index(s));
58ab8e96 4387 tcg_temp_free_i32(tmp);
84496233 4388 }
1b2b1e54 4389 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4390 } else if (size == 1) {
4391 if (load) {
58ab8e96 4392 tmp = tcg_temp_new_i32();
6ce2faf4 4393 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
84496233 4394 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4395 tmp2 = tcg_temp_new_i32();
6ce2faf4 4396 gen_aa32_ld16u(tmp2, addr, get_mem_index(s));
84496233 4397 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4398 tcg_gen_shli_i32(tmp2, tmp2, 16);
4399 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4400 tcg_temp_free_i32(tmp2);
84496233
JR
4401 neon_store_reg(rd, pass, tmp);
4402 } else {
4403 tmp = neon_load_reg(rd, pass);
7d1b0095 4404 tmp2 = tcg_temp_new_i32();
84496233 4405 tcg_gen_shri_i32(tmp2, tmp, 16);
6ce2faf4 4406 gen_aa32_st16(tmp, addr, get_mem_index(s));
58ab8e96 4407 tcg_temp_free_i32(tmp);
84496233 4408 tcg_gen_addi_i32(addr, addr, stride);
6ce2faf4 4409 gen_aa32_st16(tmp2, addr, get_mem_index(s));
58ab8e96 4410 tcg_temp_free_i32(tmp2);
1b2b1e54 4411 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4412 }
84496233
JR
4413 } else /* size == 0 */ {
4414 if (load) {
39d5492a 4415 TCGV_UNUSED_I32(tmp2);
84496233 4416 for (n = 0; n < 4; n++) {
58ab8e96 4417 tmp = tcg_temp_new_i32();
6ce2faf4 4418 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
84496233
JR
4419 tcg_gen_addi_i32(addr, addr, stride);
4420 if (n == 0) {
4421 tmp2 = tmp;
4422 } else {
41ba8341
PB
4423 tcg_gen_shli_i32(tmp, tmp, n * 8);
4424 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4425 tcg_temp_free_i32(tmp);
84496233 4426 }
9ee6e8bb 4427 }
84496233
JR
4428 neon_store_reg(rd, pass, tmp2);
4429 } else {
4430 tmp2 = neon_load_reg(rd, pass);
4431 for (n = 0; n < 4; n++) {
7d1b0095 4432 tmp = tcg_temp_new_i32();
84496233
JR
4433 if (n == 0) {
4434 tcg_gen_mov_i32(tmp, tmp2);
4435 } else {
4436 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4437 }
6ce2faf4 4438 gen_aa32_st8(tmp, addr, get_mem_index(s));
58ab8e96 4439 tcg_temp_free_i32(tmp);
84496233
JR
4440 tcg_gen_addi_i32(addr, addr, stride);
4441 }
7d1b0095 4442 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4443 }
4444 }
4445 }
4446 }
84496233 4447 rd += spacing;
9ee6e8bb 4448 }
e318a60b 4449 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4450 stride = nregs * 8;
4451 } else {
4452 size = (insn >> 10) & 3;
4453 if (size == 3) {
4454 /* Load single element to all lanes. */
8e18cde3
PM
4455 int a = (insn >> 4) & 1;
4456 if (!load) {
9ee6e8bb 4457 return 1;
8e18cde3 4458 }
9ee6e8bb
PB
4459 size = (insn >> 6) & 3;
4460 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4461
4462 if (size == 3) {
4463 if (nregs != 4 || a == 0) {
9ee6e8bb 4464 return 1;
99c475ab 4465 }
8e18cde3
PM
4466 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4467 size = 2;
4468 }
4469 if (nregs == 1 && a == 1 && size == 0) {
4470 return 1;
4471 }
4472 if (nregs == 3 && a == 1) {
4473 return 1;
4474 }
e318a60b 4475 addr = tcg_temp_new_i32();
8e18cde3
PM
4476 load_reg_var(s, addr, rn);
4477 if (nregs == 1) {
4478 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4479 tmp = gen_load_and_replicate(s, addr, size);
4480 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4481 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4482 if (insn & (1 << 5)) {
4483 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4484 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4485 }
4486 tcg_temp_free_i32(tmp);
4487 } else {
4488 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4489 stride = (insn & (1 << 5)) ? 2 : 1;
4490 for (reg = 0; reg < nregs; reg++) {
4491 tmp = gen_load_and_replicate(s, addr, size);
4492 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4493 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4494 tcg_temp_free_i32(tmp);
4495 tcg_gen_addi_i32(addr, addr, 1 << size);
4496 rd += stride;
4497 }
9ee6e8bb 4498 }
e318a60b 4499 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4500 stride = (1 << size) * nregs;
4501 } else {
4502 /* Single element. */
93262b16 4503 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4504 pass = (insn >> 7) & 1;
4505 switch (size) {
4506 case 0:
4507 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4508 stride = 1;
4509 break;
4510 case 1:
4511 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4512 stride = (insn & (1 << 5)) ? 2 : 1;
4513 break;
4514 case 2:
4515 shift = 0;
9ee6e8bb
PB
4516 stride = (insn & (1 << 6)) ? 2 : 1;
4517 break;
4518 default:
4519 abort();
4520 }
4521 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4522 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4523 switch (nregs) {
4524 case 1:
4525 if (((idx & (1 << size)) != 0) ||
4526 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4527 return 1;
4528 }
4529 break;
4530 case 3:
4531 if ((idx & 1) != 0) {
4532 return 1;
4533 }
4534 /* fall through */
4535 case 2:
4536 if (size == 2 && (idx & 2) != 0) {
4537 return 1;
4538 }
4539 break;
4540 case 4:
4541 if ((size == 2) && ((idx & 3) == 3)) {
4542 return 1;
4543 }
4544 break;
4545 default:
4546 abort();
4547 }
4548 if ((rd + stride * (nregs - 1)) > 31) {
4549 /* Attempts to write off the end of the register file
4550 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4551 * the neon_load_reg() would write off the end of the array.
4552 */
4553 return 1;
4554 }
e318a60b 4555 addr = tcg_temp_new_i32();
dcc65026 4556 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4557 for (reg = 0; reg < nregs; reg++) {
4558 if (load) {
58ab8e96 4559 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
4560 switch (size) {
4561 case 0:
6ce2faf4 4562 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4563 break;
4564 case 1:
6ce2faf4 4565 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4566 break;
4567 case 2:
6ce2faf4 4568 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9ee6e8bb 4569 break;
a50f5b91
PB
4570 default: /* Avoid compiler warnings. */
4571 abort();
9ee6e8bb
PB
4572 }
4573 if (size != 2) {
8f8e3aa4 4574 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4575 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4576 shift, size ? 16 : 8);
7d1b0095 4577 tcg_temp_free_i32(tmp2);
9ee6e8bb 4578 }
8f8e3aa4 4579 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4580 } else { /* Store */
8f8e3aa4
PB
4581 tmp = neon_load_reg(rd, pass);
4582 if (shift)
4583 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4584 switch (size) {
4585 case 0:
6ce2faf4 4586 gen_aa32_st8(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4587 break;
4588 case 1:
6ce2faf4 4589 gen_aa32_st16(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4590 break;
4591 case 2:
6ce2faf4 4592 gen_aa32_st32(tmp, addr, get_mem_index(s));
9ee6e8bb 4593 break;
99c475ab 4594 }
58ab8e96 4595 tcg_temp_free_i32(tmp);
99c475ab 4596 }
9ee6e8bb 4597 rd += stride;
1b2b1e54 4598 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4599 }
e318a60b 4600 tcg_temp_free_i32(addr);
9ee6e8bb 4601 stride = nregs * (1 << size);
99c475ab 4602 }
9ee6e8bb
PB
4603 }
4604 if (rm != 15) {
39d5492a 4605 TCGv_i32 base;
b26eefb6
PB
4606
4607 base = load_reg(s, rn);
9ee6e8bb 4608 if (rm == 13) {
b26eefb6 4609 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4610 } else {
39d5492a 4611 TCGv_i32 index;
b26eefb6
PB
4612 index = load_reg(s, rm);
4613 tcg_gen_add_i32(base, base, index);
7d1b0095 4614 tcg_temp_free_i32(index);
9ee6e8bb 4615 }
b26eefb6 4616 store_reg(s, rn, base);
9ee6e8bb
PB
4617 }
4618 return 0;
4619}
3b46e624 4620
8f8e3aa4 4621/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 4622static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
4623{
4624 tcg_gen_and_i32(t, t, c);
f669df27 4625 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4626 tcg_gen_or_i32(dest, t, f);
4627}
4628
39d5492a 4629static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4630{
4631 switch (size) {
4632 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4633 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4634 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4635 default: abort();
4636 }
4637}
4638
39d5492a 4639static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4640{
4641 switch (size) {
02da0b2d
PM
4642 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4643 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4644 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4645 default: abort();
4646 }
4647}
4648
39d5492a 4649static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4650{
4651 switch (size) {
02da0b2d
PM
4652 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4653 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4654 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4655 default: abort();
4656 }
4657}
4658
39d5492a 4659static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
4660{
4661 switch (size) {
02da0b2d
PM
4662 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4663 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4664 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4665 default: abort();
4666 }
4667}
4668
39d5492a 4669static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
4670 int q, int u)
4671{
4672 if (q) {
4673 if (u) {
4674 switch (size) {
4675 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4676 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4677 default: abort();
4678 }
4679 } else {
4680 switch (size) {
4681 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4682 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4683 default: abort();
4684 }
4685 }
4686 } else {
4687 if (u) {
4688 switch (size) {
b408a9b0
CL
4689 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4690 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4691 default: abort();
4692 }
4693 } else {
4694 switch (size) {
4695 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4696 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4697 default: abort();
4698 }
4699 }
4700 }
4701}
4702
39d5492a 4703static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
4704{
4705 if (u) {
4706 switch (size) {
4707 case 0: gen_helper_neon_widen_u8(dest, src); break;
4708 case 1: gen_helper_neon_widen_u16(dest, src); break;
4709 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4710 default: abort();
4711 }
4712 } else {
4713 switch (size) {
4714 case 0: gen_helper_neon_widen_s8(dest, src); break;
4715 case 1: gen_helper_neon_widen_s16(dest, src); break;
4716 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4717 default: abort();
4718 }
4719 }
7d1b0095 4720 tcg_temp_free_i32(src);
ad69471c
PB
4721}
4722
4723static inline void gen_neon_addl(int size)
4724{
4725 switch (size) {
4726 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4727 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4728 case 2: tcg_gen_add_i64(CPU_V001); break;
4729 default: abort();
4730 }
4731}
4732
4733static inline void gen_neon_subl(int size)
4734{
4735 switch (size) {
4736 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4737 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4738 case 2: tcg_gen_sub_i64(CPU_V001); break;
4739 default: abort();
4740 }
4741}
4742
a7812ae4 4743static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4744{
4745 switch (size) {
4746 case 0: gen_helper_neon_negl_u16(var, var); break;
4747 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
4748 case 2:
4749 tcg_gen_neg_i64(var, var);
4750 break;
ad69471c
PB
4751 default: abort();
4752 }
4753}
4754
a7812ae4 4755static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4756{
4757 switch (size) {
02da0b2d
PM
4758 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4759 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4760 default: abort();
4761 }
4762}
4763
39d5492a
PM
4764static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4765 int size, int u)
ad69471c 4766{
a7812ae4 4767 TCGv_i64 tmp;
ad69471c
PB
4768
4769 switch ((size << 1) | u) {
4770 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4771 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4772 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4773 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4774 case 4:
4775 tmp = gen_muls_i64_i32(a, b);
4776 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4777 tcg_temp_free_i64(tmp);
ad69471c
PB
4778 break;
4779 case 5:
4780 tmp = gen_mulu_i64_i32(a, b);
4781 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4782 tcg_temp_free_i64(tmp);
ad69471c
PB
4783 break;
4784 default: abort();
4785 }
c6067f04
CL
4786
4787 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4788 Don't forget to clean them now. */
4789 if (size < 2) {
7d1b0095
PM
4790 tcg_temp_free_i32(a);
4791 tcg_temp_free_i32(b);
c6067f04 4792 }
ad69471c
PB
4793}
4794
39d5492a
PM
4795static void gen_neon_narrow_op(int op, int u, int size,
4796 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
4797{
4798 if (op) {
4799 if (u) {
4800 gen_neon_unarrow_sats(size, dest, src);
4801 } else {
4802 gen_neon_narrow(size, dest, src);
4803 }
4804 } else {
4805 if (u) {
4806 gen_neon_narrow_satu(size, dest, src);
4807 } else {
4808 gen_neon_narrow_sats(size, dest, src);
4809 }
4810 }
4811}
4812
62698be3
PM
4813/* Symbolic constants for op fields for Neon 3-register same-length.
4814 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4815 * table A7-9.
4816 */
4817#define NEON_3R_VHADD 0
4818#define NEON_3R_VQADD 1
4819#define NEON_3R_VRHADD 2
4820#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4821#define NEON_3R_VHSUB 4
4822#define NEON_3R_VQSUB 5
4823#define NEON_3R_VCGT 6
4824#define NEON_3R_VCGE 7
4825#define NEON_3R_VSHL 8
4826#define NEON_3R_VQSHL 9
4827#define NEON_3R_VRSHL 10
4828#define NEON_3R_VQRSHL 11
4829#define NEON_3R_VMAX 12
4830#define NEON_3R_VMIN 13
4831#define NEON_3R_VABD 14
4832#define NEON_3R_VABA 15
4833#define NEON_3R_VADD_VSUB 16
4834#define NEON_3R_VTST_VCEQ 17
4835#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4836#define NEON_3R_VMUL 19
4837#define NEON_3R_VPMAX 20
4838#define NEON_3R_VPMIN 21
4839#define NEON_3R_VQDMULH_VQRDMULH 22
4840#define NEON_3R_VPADD 23
f1ecb913 4841#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
da97f52c 4842#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
4843#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4844#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4845#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4846#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4847#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 4848#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
4849
4850static const uint8_t neon_3r_sizes[] = {
4851 [NEON_3R_VHADD] = 0x7,
4852 [NEON_3R_VQADD] = 0xf,
4853 [NEON_3R_VRHADD] = 0x7,
4854 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4855 [NEON_3R_VHSUB] = 0x7,
4856 [NEON_3R_VQSUB] = 0xf,
4857 [NEON_3R_VCGT] = 0x7,
4858 [NEON_3R_VCGE] = 0x7,
4859 [NEON_3R_VSHL] = 0xf,
4860 [NEON_3R_VQSHL] = 0xf,
4861 [NEON_3R_VRSHL] = 0xf,
4862 [NEON_3R_VQRSHL] = 0xf,
4863 [NEON_3R_VMAX] = 0x7,
4864 [NEON_3R_VMIN] = 0x7,
4865 [NEON_3R_VABD] = 0x7,
4866 [NEON_3R_VABA] = 0x7,
4867 [NEON_3R_VADD_VSUB] = 0xf,
4868 [NEON_3R_VTST_VCEQ] = 0x7,
4869 [NEON_3R_VML] = 0x7,
4870 [NEON_3R_VMUL] = 0x7,
4871 [NEON_3R_VPMAX] = 0x7,
4872 [NEON_3R_VPMIN] = 0x7,
4873 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4874 [NEON_3R_VPADD] = 0x7,
f1ecb913 4875 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
da97f52c 4876 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4877 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4878 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4879 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4880 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4881 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 4882 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4883};
4884
600b828c
PM
4885/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4886 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4887 * table A7-13.
4888 */
4889#define NEON_2RM_VREV64 0
4890#define NEON_2RM_VREV32 1
4891#define NEON_2RM_VREV16 2
4892#define NEON_2RM_VPADDL 4
4893#define NEON_2RM_VPADDL_U 5
9d935509
AB
4894#define NEON_2RM_AESE 6 /* Includes AESD */
4895#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
4896#define NEON_2RM_VCLS 8
4897#define NEON_2RM_VCLZ 9
4898#define NEON_2RM_VCNT 10
4899#define NEON_2RM_VMVN 11
4900#define NEON_2RM_VPADAL 12
4901#define NEON_2RM_VPADAL_U 13
4902#define NEON_2RM_VQABS 14
4903#define NEON_2RM_VQNEG 15
4904#define NEON_2RM_VCGT0 16
4905#define NEON_2RM_VCGE0 17
4906#define NEON_2RM_VCEQ0 18
4907#define NEON_2RM_VCLE0 19
4908#define NEON_2RM_VCLT0 20
f1ecb913 4909#define NEON_2RM_SHA1H 21
600b828c
PM
4910#define NEON_2RM_VABS 22
4911#define NEON_2RM_VNEG 23
4912#define NEON_2RM_VCGT0_F 24
4913#define NEON_2RM_VCGE0_F 25
4914#define NEON_2RM_VCEQ0_F 26
4915#define NEON_2RM_VCLE0_F 27
4916#define NEON_2RM_VCLT0_F 28
4917#define NEON_2RM_VABS_F 30
4918#define NEON_2RM_VNEG_F 31
4919#define NEON_2RM_VSWP 32
4920#define NEON_2RM_VTRN 33
4921#define NEON_2RM_VUZP 34
4922#define NEON_2RM_VZIP 35
4923#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4924#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4925#define NEON_2RM_VSHLL 38
f1ecb913 4926#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 4927#define NEON_2RM_VRINTN 40
2ce70625 4928#define NEON_2RM_VRINTX 41
34f7b0a2
WN
4929#define NEON_2RM_VRINTA 42
4930#define NEON_2RM_VRINTZ 43
600b828c 4931#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 4932#define NEON_2RM_VRINTM 45
600b828c 4933#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 4934#define NEON_2RM_VRINTP 47
901ad525
WN
4935#define NEON_2RM_VCVTAU 48
4936#define NEON_2RM_VCVTAS 49
4937#define NEON_2RM_VCVTNU 50
4938#define NEON_2RM_VCVTNS 51
4939#define NEON_2RM_VCVTPU 52
4940#define NEON_2RM_VCVTPS 53
4941#define NEON_2RM_VCVTMU 54
4942#define NEON_2RM_VCVTMS 55
600b828c
PM
4943#define NEON_2RM_VRECPE 56
4944#define NEON_2RM_VRSQRTE 57
4945#define NEON_2RM_VRECPE_F 58
4946#define NEON_2RM_VRSQRTE_F 59
4947#define NEON_2RM_VCVT_FS 60
4948#define NEON_2RM_VCVT_FU 61
4949#define NEON_2RM_VCVT_SF 62
4950#define NEON_2RM_VCVT_UF 63
4951
4952static int neon_2rm_is_float_op(int op)
4953{
4954 /* Return true if this neon 2reg-misc op is float-to-float */
4955 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 4956 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
4957 op == NEON_2RM_VRINTM ||
4958 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 4959 op >= NEON_2RM_VRECPE_F);
600b828c
PM
4960}
4961
4962/* Each entry in this array has bit n set if the insn allows
4963 * size value n (otherwise it will UNDEF). Since unallocated
4964 * op values will have no bits set they always UNDEF.
4965 */
4966static const uint8_t neon_2rm_sizes[] = {
4967 [NEON_2RM_VREV64] = 0x7,
4968 [NEON_2RM_VREV32] = 0x3,
4969 [NEON_2RM_VREV16] = 0x1,
4970 [NEON_2RM_VPADDL] = 0x7,
4971 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
4972 [NEON_2RM_AESE] = 0x1,
4973 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
4974 [NEON_2RM_VCLS] = 0x7,
4975 [NEON_2RM_VCLZ] = 0x7,
4976 [NEON_2RM_VCNT] = 0x1,
4977 [NEON_2RM_VMVN] = 0x1,
4978 [NEON_2RM_VPADAL] = 0x7,
4979 [NEON_2RM_VPADAL_U] = 0x7,
4980 [NEON_2RM_VQABS] = 0x7,
4981 [NEON_2RM_VQNEG] = 0x7,
4982 [NEON_2RM_VCGT0] = 0x7,
4983 [NEON_2RM_VCGE0] = 0x7,
4984 [NEON_2RM_VCEQ0] = 0x7,
4985 [NEON_2RM_VCLE0] = 0x7,
4986 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 4987 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
4988 [NEON_2RM_VABS] = 0x7,
4989 [NEON_2RM_VNEG] = 0x7,
4990 [NEON_2RM_VCGT0_F] = 0x4,
4991 [NEON_2RM_VCGE0_F] = 0x4,
4992 [NEON_2RM_VCEQ0_F] = 0x4,
4993 [NEON_2RM_VCLE0_F] = 0x4,
4994 [NEON_2RM_VCLT0_F] = 0x4,
4995 [NEON_2RM_VABS_F] = 0x4,
4996 [NEON_2RM_VNEG_F] = 0x4,
4997 [NEON_2RM_VSWP] = 0x1,
4998 [NEON_2RM_VTRN] = 0x7,
4999 [NEON_2RM_VUZP] = 0x7,
5000 [NEON_2RM_VZIP] = 0x7,
5001 [NEON_2RM_VMOVN] = 0x7,
5002 [NEON_2RM_VQMOVN] = 0x7,
5003 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5004 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5005 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5006 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5007 [NEON_2RM_VRINTA] = 0x4,
5008 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5009 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5010 [NEON_2RM_VRINTM] = 0x4,
600b828c 5011 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5012 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5013 [NEON_2RM_VCVTAU] = 0x4,
5014 [NEON_2RM_VCVTAS] = 0x4,
5015 [NEON_2RM_VCVTNU] = 0x4,
5016 [NEON_2RM_VCVTNS] = 0x4,
5017 [NEON_2RM_VCVTPU] = 0x4,
5018 [NEON_2RM_VCVTPS] = 0x4,
5019 [NEON_2RM_VCVTMU] = 0x4,
5020 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5021 [NEON_2RM_VRECPE] = 0x4,
5022 [NEON_2RM_VRSQRTE] = 0x4,
5023 [NEON_2RM_VRECPE_F] = 0x4,
5024 [NEON_2RM_VRSQRTE_F] = 0x4,
5025 [NEON_2RM_VCVT_FS] = 0x4,
5026 [NEON_2RM_VCVT_FU] = 0x4,
5027 [NEON_2RM_VCVT_SF] = 0x4,
5028 [NEON_2RM_VCVT_UF] = 0x4,
5029};
5030
9ee6e8bb
PB
5031/* Translate a NEON data processing instruction. Return nonzero if the
5032 instruction is invalid.
ad69471c
PB
5033 We process data in a mixture of 32-bit and 64-bit chunks.
5034 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 5035
0ecb72a5 5036static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5037{
5038 int op;
5039 int q;
5040 int rd, rn, rm;
5041 int size;
5042 int shift;
5043 int pass;
5044 int count;
5045 int pairwise;
5046 int u;
ca9a32e4 5047 uint32_t imm, mask;
39d5492a 5048 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 5049 TCGv_i64 tmp64;
9ee6e8bb 5050
2c7ffc41
PM
5051 /* FIXME: this access check should not take precedence over UNDEF
5052 * for invalid encodings; we will generate incorrect syndrome information
5053 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5054 */
5055 if (!s->cpacr_fpen) {
5056 gen_exception_insn(s, 4, EXCP_UDEF,
5057 syn_fp_access_trap(1, 0xe, s->thumb));
5058 return 0;
5059 }
5060
5df8bac1 5061 if (!s->vfp_enabled)
9ee6e8bb
PB
5062 return 1;
5063 q = (insn & (1 << 6)) != 0;
5064 u = (insn >> 24) & 1;
5065 VFP_DREG_D(rd, insn);
5066 VFP_DREG_N(rn, insn);
5067 VFP_DREG_M(rm, insn);
5068 size = (insn >> 20) & 3;
5069 if ((insn & (1 << 23)) == 0) {
5070 /* Three register same length. */
5071 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5072 /* Catch invalid op and bad size combinations: UNDEF */
5073 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5074 return 1;
5075 }
25f84f79
PM
5076 /* All insns of this form UNDEF for either this condition or the
5077 * superset of cases "Q==1"; we catch the latter later.
5078 */
5079 if (q && ((rd | rn | rm) & 1)) {
5080 return 1;
5081 }
f1ecb913
AB
5082 /*
5083 * The SHA-1/SHA-256 3-register instructions require special treatment
5084 * here, as their size field is overloaded as an op type selector, and
5085 * they all consume their input in a single pass.
5086 */
5087 if (op == NEON_3R_SHA) {
5088 if (!q) {
5089 return 1;
5090 }
5091 if (!u) { /* SHA-1 */
5092 if (!arm_feature(env, ARM_FEATURE_V8_SHA1)) {
5093 return 1;
5094 }
5095 tmp = tcg_const_i32(rd);
5096 tmp2 = tcg_const_i32(rn);
5097 tmp3 = tcg_const_i32(rm);
5098 tmp4 = tcg_const_i32(size);
5099 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5100 tcg_temp_free_i32(tmp4);
5101 } else { /* SHA-256 */
5102 if (!arm_feature(env, ARM_FEATURE_V8_SHA256) || size == 3) {
5103 return 1;
5104 }
5105 tmp = tcg_const_i32(rd);
5106 tmp2 = tcg_const_i32(rn);
5107 tmp3 = tcg_const_i32(rm);
5108 switch (size) {
5109 case 0:
5110 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5111 break;
5112 case 1:
5113 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5114 break;
5115 case 2:
5116 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5117 break;
5118 }
5119 }
5120 tcg_temp_free_i32(tmp);
5121 tcg_temp_free_i32(tmp2);
5122 tcg_temp_free_i32(tmp3);
5123 return 0;
5124 }
62698be3
PM
5125 if (size == 3 && op != NEON_3R_LOGIC) {
5126 /* 64-bit element instructions. */
9ee6e8bb 5127 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5128 neon_load_reg64(cpu_V0, rn + pass);
5129 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5130 switch (op) {
62698be3 5131 case NEON_3R_VQADD:
9ee6e8bb 5132 if (u) {
02da0b2d
PM
5133 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5134 cpu_V0, cpu_V1);
2c0262af 5135 } else {
02da0b2d
PM
5136 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5137 cpu_V0, cpu_V1);
2c0262af 5138 }
9ee6e8bb 5139 break;
62698be3 5140 case NEON_3R_VQSUB:
9ee6e8bb 5141 if (u) {
02da0b2d
PM
5142 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5143 cpu_V0, cpu_V1);
ad69471c 5144 } else {
02da0b2d
PM
5145 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5146 cpu_V0, cpu_V1);
ad69471c
PB
5147 }
5148 break;
62698be3 5149 case NEON_3R_VSHL:
ad69471c
PB
5150 if (u) {
5151 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5152 } else {
5153 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5154 }
5155 break;
62698be3 5156 case NEON_3R_VQSHL:
ad69471c 5157 if (u) {
02da0b2d
PM
5158 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5159 cpu_V1, cpu_V0);
ad69471c 5160 } else {
02da0b2d
PM
5161 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5162 cpu_V1, cpu_V0);
ad69471c
PB
5163 }
5164 break;
62698be3 5165 case NEON_3R_VRSHL:
ad69471c
PB
5166 if (u) {
5167 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5168 } else {
ad69471c
PB
5169 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5170 }
5171 break;
62698be3 5172 case NEON_3R_VQRSHL:
ad69471c 5173 if (u) {
02da0b2d
PM
5174 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5175 cpu_V1, cpu_V0);
ad69471c 5176 } else {
02da0b2d
PM
5177 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5178 cpu_V1, cpu_V0);
1e8d4eec 5179 }
9ee6e8bb 5180 break;
62698be3 5181 case NEON_3R_VADD_VSUB:
9ee6e8bb 5182 if (u) {
ad69471c 5183 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5184 } else {
ad69471c 5185 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5186 }
5187 break;
5188 default:
5189 abort();
2c0262af 5190 }
ad69471c 5191 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5192 }
9ee6e8bb 5193 return 0;
2c0262af 5194 }
25f84f79 5195 pairwise = 0;
9ee6e8bb 5196 switch (op) {
62698be3
PM
5197 case NEON_3R_VSHL:
5198 case NEON_3R_VQSHL:
5199 case NEON_3R_VRSHL:
5200 case NEON_3R_VQRSHL:
9ee6e8bb 5201 {
ad69471c
PB
5202 int rtmp;
5203 /* Shift instruction operands are reversed. */
5204 rtmp = rn;
9ee6e8bb 5205 rn = rm;
ad69471c 5206 rm = rtmp;
9ee6e8bb 5207 }
2c0262af 5208 break;
25f84f79
PM
5209 case NEON_3R_VPADD:
5210 if (u) {
5211 return 1;
5212 }
5213 /* Fall through */
62698be3
PM
5214 case NEON_3R_VPMAX:
5215 case NEON_3R_VPMIN:
9ee6e8bb 5216 pairwise = 1;
2c0262af 5217 break;
25f84f79
PM
5218 case NEON_3R_FLOAT_ARITH:
5219 pairwise = (u && size < 2); /* if VPADD (float) */
5220 break;
5221 case NEON_3R_FLOAT_MINMAX:
5222 pairwise = u; /* if VPMIN/VPMAX (float) */
5223 break;
5224 case NEON_3R_FLOAT_CMP:
5225 if (!u && size) {
5226 /* no encoding for U=0 C=1x */
5227 return 1;
5228 }
5229 break;
5230 case NEON_3R_FLOAT_ACMP:
5231 if (!u) {
5232 return 1;
5233 }
5234 break;
505935fc
WN
5235 case NEON_3R_FLOAT_MISC:
5236 /* VMAXNM/VMINNM in ARMv8 */
5237 if (u && !arm_feature(env, ARM_FEATURE_V8)) {
25f84f79
PM
5238 return 1;
5239 }
2c0262af 5240 break;
25f84f79
PM
5241 case NEON_3R_VMUL:
5242 if (u && (size != 0)) {
5243 /* UNDEF on invalid size for polynomial subcase */
5244 return 1;
5245 }
2c0262af 5246 break;
da97f52c
PM
5247 case NEON_3R_VFM:
5248 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
5249 return 1;
5250 }
5251 break;
9ee6e8bb 5252 default:
2c0262af 5253 break;
9ee6e8bb 5254 }
dd8fbd78 5255
25f84f79
PM
5256 if (pairwise && q) {
5257 /* All the pairwise insns UNDEF if Q is set */
5258 return 1;
5259 }
5260
9ee6e8bb
PB
5261 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5262
5263 if (pairwise) {
5264 /* Pairwise. */
a5a14945
JR
5265 if (pass < 1) {
5266 tmp = neon_load_reg(rn, 0);
5267 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5268 } else {
a5a14945
JR
5269 tmp = neon_load_reg(rm, 0);
5270 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5271 }
5272 } else {
5273 /* Elementwise. */
dd8fbd78
FN
5274 tmp = neon_load_reg(rn, pass);
5275 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5276 }
5277 switch (op) {
62698be3 5278 case NEON_3R_VHADD:
9ee6e8bb
PB
5279 GEN_NEON_INTEGER_OP(hadd);
5280 break;
62698be3 5281 case NEON_3R_VQADD:
02da0b2d 5282 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5283 break;
62698be3 5284 case NEON_3R_VRHADD:
9ee6e8bb 5285 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5286 break;
62698be3 5287 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5288 switch ((u << 2) | size) {
5289 case 0: /* VAND */
dd8fbd78 5290 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5291 break;
5292 case 1: /* BIC */
f669df27 5293 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5294 break;
5295 case 2: /* VORR */
dd8fbd78 5296 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5297 break;
5298 case 3: /* VORN */
f669df27 5299 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5300 break;
5301 case 4: /* VEOR */
dd8fbd78 5302 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5303 break;
5304 case 5: /* VBSL */
dd8fbd78
FN
5305 tmp3 = neon_load_reg(rd, pass);
5306 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5307 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5308 break;
5309 case 6: /* VBIT */
dd8fbd78
FN
5310 tmp3 = neon_load_reg(rd, pass);
5311 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5312 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5313 break;
5314 case 7: /* VBIF */
dd8fbd78
FN
5315 tmp3 = neon_load_reg(rd, pass);
5316 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5317 tcg_temp_free_i32(tmp3);
9ee6e8bb 5318 break;
2c0262af
FB
5319 }
5320 break;
62698be3 5321 case NEON_3R_VHSUB:
9ee6e8bb
PB
5322 GEN_NEON_INTEGER_OP(hsub);
5323 break;
62698be3 5324 case NEON_3R_VQSUB:
02da0b2d 5325 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5326 break;
62698be3 5327 case NEON_3R_VCGT:
9ee6e8bb
PB
5328 GEN_NEON_INTEGER_OP(cgt);
5329 break;
62698be3 5330 case NEON_3R_VCGE:
9ee6e8bb
PB
5331 GEN_NEON_INTEGER_OP(cge);
5332 break;
62698be3 5333 case NEON_3R_VSHL:
ad69471c 5334 GEN_NEON_INTEGER_OP(shl);
2c0262af 5335 break;
62698be3 5336 case NEON_3R_VQSHL:
02da0b2d 5337 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5338 break;
62698be3 5339 case NEON_3R_VRSHL:
ad69471c 5340 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5341 break;
62698be3 5342 case NEON_3R_VQRSHL:
02da0b2d 5343 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5344 break;
62698be3 5345 case NEON_3R_VMAX:
9ee6e8bb
PB
5346 GEN_NEON_INTEGER_OP(max);
5347 break;
62698be3 5348 case NEON_3R_VMIN:
9ee6e8bb
PB
5349 GEN_NEON_INTEGER_OP(min);
5350 break;
62698be3 5351 case NEON_3R_VABD:
9ee6e8bb
PB
5352 GEN_NEON_INTEGER_OP(abd);
5353 break;
62698be3 5354 case NEON_3R_VABA:
9ee6e8bb 5355 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5356 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5357 tmp2 = neon_load_reg(rd, pass);
5358 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5359 break;
62698be3 5360 case NEON_3R_VADD_VSUB:
9ee6e8bb 5361 if (!u) { /* VADD */
62698be3 5362 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5363 } else { /* VSUB */
5364 switch (size) {
dd8fbd78
FN
5365 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5366 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5367 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 5368 default: abort();
9ee6e8bb
PB
5369 }
5370 }
5371 break;
62698be3 5372 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
5373 if (!u) { /* VTST */
5374 switch (size) {
dd8fbd78
FN
5375 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5376 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5377 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 5378 default: abort();
9ee6e8bb
PB
5379 }
5380 } else { /* VCEQ */
5381 switch (size) {
dd8fbd78
FN
5382 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5383 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5384 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 5385 default: abort();
9ee6e8bb
PB
5386 }
5387 }
5388 break;
62698be3 5389 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 5390 switch (size) {
dd8fbd78
FN
5391 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5392 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5393 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5394 default: abort();
9ee6e8bb 5395 }
7d1b0095 5396 tcg_temp_free_i32(tmp2);
dd8fbd78 5397 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5398 if (u) { /* VMLS */
dd8fbd78 5399 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 5400 } else { /* VMLA */
dd8fbd78 5401 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5402 }
5403 break;
62698be3 5404 case NEON_3R_VMUL:
9ee6e8bb 5405 if (u) { /* polynomial */
dd8fbd78 5406 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
5407 } else { /* Integer */
5408 switch (size) {
dd8fbd78
FN
5409 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5410 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5411 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5412 default: abort();
9ee6e8bb
PB
5413 }
5414 }
5415 break;
62698be3 5416 case NEON_3R_VPMAX:
9ee6e8bb
PB
5417 GEN_NEON_INTEGER_OP(pmax);
5418 break;
62698be3 5419 case NEON_3R_VPMIN:
9ee6e8bb
PB
5420 GEN_NEON_INTEGER_OP(pmin);
5421 break;
62698be3 5422 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5423 if (!u) { /* VQDMULH */
5424 switch (size) {
02da0b2d
PM
5425 case 1:
5426 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5427 break;
5428 case 2:
5429 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5430 break;
62698be3 5431 default: abort();
9ee6e8bb 5432 }
62698be3 5433 } else { /* VQRDMULH */
9ee6e8bb 5434 switch (size) {
02da0b2d
PM
5435 case 1:
5436 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5437 break;
5438 case 2:
5439 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5440 break;
62698be3 5441 default: abort();
9ee6e8bb
PB
5442 }
5443 }
5444 break;
62698be3 5445 case NEON_3R_VPADD:
9ee6e8bb 5446 switch (size) {
dd8fbd78
FN
5447 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5448 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5449 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5450 default: abort();
9ee6e8bb
PB
5451 }
5452 break;
62698be3 5453 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5454 {
5455 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5456 switch ((u << 2) | size) {
5457 case 0: /* VADD */
aa47cfdd
PM
5458 case 4: /* VPADD */
5459 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5460 break;
5461 case 2: /* VSUB */
aa47cfdd 5462 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5463 break;
5464 case 6: /* VABD */
aa47cfdd 5465 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5466 break;
5467 default:
62698be3 5468 abort();
9ee6e8bb 5469 }
aa47cfdd 5470 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5471 break;
aa47cfdd 5472 }
62698be3 5473 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5474 {
5475 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5476 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5477 if (!u) {
7d1b0095 5478 tcg_temp_free_i32(tmp2);
dd8fbd78 5479 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5480 if (size == 0) {
aa47cfdd 5481 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5482 } else {
aa47cfdd 5483 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5484 }
5485 }
aa47cfdd 5486 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5487 break;
aa47cfdd 5488 }
62698be3 5489 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5490 {
5491 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5492 if (!u) {
aa47cfdd 5493 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5494 } else {
aa47cfdd
PM
5495 if (size == 0) {
5496 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5497 } else {
5498 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5499 }
b5ff1b31 5500 }
aa47cfdd 5501 tcg_temp_free_ptr(fpstatus);
2c0262af 5502 break;
aa47cfdd 5503 }
62698be3 5504 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
5505 {
5506 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5507 if (size == 0) {
5508 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5509 } else {
5510 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5511 }
5512 tcg_temp_free_ptr(fpstatus);
2c0262af 5513 break;
aa47cfdd 5514 }
62698be3 5515 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
5516 {
5517 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5518 if (size == 0) {
f71a2ae5 5519 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 5520 } else {
f71a2ae5 5521 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
5522 }
5523 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5524 break;
aa47cfdd 5525 }
505935fc
WN
5526 case NEON_3R_FLOAT_MISC:
5527 if (u) {
5528 /* VMAXNM/VMINNM */
5529 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5530 if (size == 0) {
f71a2ae5 5531 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 5532 } else {
f71a2ae5 5533 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
5534 }
5535 tcg_temp_free_ptr(fpstatus);
5536 } else {
5537 if (size == 0) {
5538 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5539 } else {
5540 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5541 }
5542 }
2c0262af 5543 break;
da97f52c
PM
5544 case NEON_3R_VFM:
5545 {
5546 /* VFMA, VFMS: fused multiply-add */
5547 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5548 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5549 if (size) {
5550 /* VFMS */
5551 gen_helper_vfp_negs(tmp, tmp);
5552 }
5553 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5554 tcg_temp_free_i32(tmp3);
5555 tcg_temp_free_ptr(fpstatus);
5556 break;
5557 }
9ee6e8bb
PB
5558 default:
5559 abort();
2c0262af 5560 }
7d1b0095 5561 tcg_temp_free_i32(tmp2);
dd8fbd78 5562
9ee6e8bb
PB
5563 /* Save the result. For elementwise operations we can put it
5564 straight into the destination register. For pairwise operations
5565 we have to be careful to avoid clobbering the source operands. */
5566 if (pairwise && rd == rm) {
dd8fbd78 5567 neon_store_scratch(pass, tmp);
9ee6e8bb 5568 } else {
dd8fbd78 5569 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5570 }
5571
5572 } /* for pass */
5573 if (pairwise && rd == rm) {
5574 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5575 tmp = neon_load_scratch(pass);
5576 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5577 }
5578 }
ad69471c 5579 /* End of 3 register same size operations. */
9ee6e8bb
PB
5580 } else if (insn & (1 << 4)) {
5581 if ((insn & 0x00380080) != 0) {
5582 /* Two registers and shift. */
5583 op = (insn >> 8) & 0xf;
5584 if (insn & (1 << 7)) {
cc13115b
PM
5585 /* 64-bit shift. */
5586 if (op > 7) {
5587 return 1;
5588 }
9ee6e8bb
PB
5589 size = 3;
5590 } else {
5591 size = 2;
5592 while ((insn & (1 << (size + 19))) == 0)
5593 size--;
5594 }
5595 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 5596 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
5597 by immediate using the variable shift operations. */
5598 if (op < 8) {
5599 /* Shift by immediate:
5600 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5601 if (q && ((rd | rm) & 1)) {
5602 return 1;
5603 }
5604 if (!u && (op == 4 || op == 6)) {
5605 return 1;
5606 }
9ee6e8bb
PB
5607 /* Right shifts are encoded as N - shift, where N is the
5608 element size in bits. */
5609 if (op <= 4)
5610 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
5611 if (size == 3) {
5612 count = q + 1;
5613 } else {
5614 count = q ? 4: 2;
5615 }
5616 switch (size) {
5617 case 0:
5618 imm = (uint8_t) shift;
5619 imm |= imm << 8;
5620 imm |= imm << 16;
5621 break;
5622 case 1:
5623 imm = (uint16_t) shift;
5624 imm |= imm << 16;
5625 break;
5626 case 2:
5627 case 3:
5628 imm = shift;
5629 break;
5630 default:
5631 abort();
5632 }
5633
5634 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5635 if (size == 3) {
5636 neon_load_reg64(cpu_V0, rm + pass);
5637 tcg_gen_movi_i64(cpu_V1, imm);
5638 switch (op) {
5639 case 0: /* VSHR */
5640 case 1: /* VSRA */
5641 if (u)
5642 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5643 else
ad69471c 5644 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5645 break;
ad69471c
PB
5646 case 2: /* VRSHR */
5647 case 3: /* VRSRA */
5648 if (u)
5649 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5650 else
ad69471c 5651 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5652 break;
ad69471c 5653 case 4: /* VSRI */
ad69471c
PB
5654 case 5: /* VSHL, VSLI */
5655 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5656 break;
0322b26e 5657 case 6: /* VQSHLU */
02da0b2d
PM
5658 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5659 cpu_V0, cpu_V1);
ad69471c 5660 break;
0322b26e
PM
5661 case 7: /* VQSHL */
5662 if (u) {
02da0b2d 5663 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5664 cpu_V0, cpu_V1);
5665 } else {
02da0b2d 5666 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5667 cpu_V0, cpu_V1);
5668 }
9ee6e8bb 5669 break;
9ee6e8bb 5670 }
ad69471c
PB
5671 if (op == 1 || op == 3) {
5672 /* Accumulate. */
5371cb81 5673 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5674 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5675 } else if (op == 4 || (op == 5 && u)) {
5676 /* Insert */
923e6509
CL
5677 neon_load_reg64(cpu_V1, rd + pass);
5678 uint64_t mask;
5679 if (shift < -63 || shift > 63) {
5680 mask = 0;
5681 } else {
5682 if (op == 4) {
5683 mask = 0xffffffffffffffffull >> -shift;
5684 } else {
5685 mask = 0xffffffffffffffffull << shift;
5686 }
5687 }
5688 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5689 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5690 }
5691 neon_store_reg64(cpu_V0, rd + pass);
5692 } else { /* size < 3 */
5693 /* Operands in T0 and T1. */
dd8fbd78 5694 tmp = neon_load_reg(rm, pass);
7d1b0095 5695 tmp2 = tcg_temp_new_i32();
dd8fbd78 5696 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5697 switch (op) {
5698 case 0: /* VSHR */
5699 case 1: /* VSRA */
5700 GEN_NEON_INTEGER_OP(shl);
5701 break;
5702 case 2: /* VRSHR */
5703 case 3: /* VRSRA */
5704 GEN_NEON_INTEGER_OP(rshl);
5705 break;
5706 case 4: /* VSRI */
ad69471c
PB
5707 case 5: /* VSHL, VSLI */
5708 switch (size) {
dd8fbd78
FN
5709 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5710 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5711 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5712 default: abort();
ad69471c
PB
5713 }
5714 break;
0322b26e 5715 case 6: /* VQSHLU */
ad69471c 5716 switch (size) {
0322b26e 5717 case 0:
02da0b2d
PM
5718 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5719 tmp, tmp2);
0322b26e
PM
5720 break;
5721 case 1:
02da0b2d
PM
5722 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5723 tmp, tmp2);
0322b26e
PM
5724 break;
5725 case 2:
02da0b2d
PM
5726 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5727 tmp, tmp2);
0322b26e
PM
5728 break;
5729 default:
cc13115b 5730 abort();
ad69471c
PB
5731 }
5732 break;
0322b26e 5733 case 7: /* VQSHL */
02da0b2d 5734 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5735 break;
ad69471c 5736 }
7d1b0095 5737 tcg_temp_free_i32(tmp2);
ad69471c
PB
5738
5739 if (op == 1 || op == 3) {
5740 /* Accumulate. */
dd8fbd78 5741 tmp2 = neon_load_reg(rd, pass);
5371cb81 5742 gen_neon_add(size, tmp, tmp2);
7d1b0095 5743 tcg_temp_free_i32(tmp2);
ad69471c
PB
5744 } else if (op == 4 || (op == 5 && u)) {
5745 /* Insert */
5746 switch (size) {
5747 case 0:
5748 if (op == 4)
ca9a32e4 5749 mask = 0xff >> -shift;
ad69471c 5750 else
ca9a32e4
JR
5751 mask = (uint8_t)(0xff << shift);
5752 mask |= mask << 8;
5753 mask |= mask << 16;
ad69471c
PB
5754 break;
5755 case 1:
5756 if (op == 4)
ca9a32e4 5757 mask = 0xffff >> -shift;
ad69471c 5758 else
ca9a32e4
JR
5759 mask = (uint16_t)(0xffff << shift);
5760 mask |= mask << 16;
ad69471c
PB
5761 break;
5762 case 2:
ca9a32e4
JR
5763 if (shift < -31 || shift > 31) {
5764 mask = 0;
5765 } else {
5766 if (op == 4)
5767 mask = 0xffffffffu >> -shift;
5768 else
5769 mask = 0xffffffffu << shift;
5770 }
ad69471c
PB
5771 break;
5772 default:
5773 abort();
5774 }
dd8fbd78 5775 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5776 tcg_gen_andi_i32(tmp, tmp, mask);
5777 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5778 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5779 tcg_temp_free_i32(tmp2);
ad69471c 5780 }
dd8fbd78 5781 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5782 }
5783 } /* for pass */
5784 } else if (op < 10) {
ad69471c 5785 /* Shift by immediate and narrow:
9ee6e8bb 5786 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5787 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5788 if (rm & 1) {
5789 return 1;
5790 }
9ee6e8bb
PB
5791 shift = shift - (1 << (size + 3));
5792 size++;
92cdfaeb 5793 if (size == 3) {
a7812ae4 5794 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5795 neon_load_reg64(cpu_V0, rm);
5796 neon_load_reg64(cpu_V1, rm + 1);
5797 for (pass = 0; pass < 2; pass++) {
5798 TCGv_i64 in;
5799 if (pass == 0) {
5800 in = cpu_V0;
5801 } else {
5802 in = cpu_V1;
5803 }
ad69471c 5804 if (q) {
0b36f4cd 5805 if (input_unsigned) {
92cdfaeb 5806 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5807 } else {
92cdfaeb 5808 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5809 }
ad69471c 5810 } else {
0b36f4cd 5811 if (input_unsigned) {
92cdfaeb 5812 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5813 } else {
92cdfaeb 5814 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5815 }
ad69471c 5816 }
7d1b0095 5817 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5818 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5819 neon_store_reg(rd, pass, tmp);
5820 } /* for pass */
5821 tcg_temp_free_i64(tmp64);
5822 } else {
5823 if (size == 1) {
5824 imm = (uint16_t)shift;
5825 imm |= imm << 16;
2c0262af 5826 } else {
92cdfaeb
PM
5827 /* size == 2 */
5828 imm = (uint32_t)shift;
5829 }
5830 tmp2 = tcg_const_i32(imm);
5831 tmp4 = neon_load_reg(rm + 1, 0);
5832 tmp5 = neon_load_reg(rm + 1, 1);
5833 for (pass = 0; pass < 2; pass++) {
5834 if (pass == 0) {
5835 tmp = neon_load_reg(rm, 0);
5836 } else {
5837 tmp = tmp4;
5838 }
0b36f4cd
CL
5839 gen_neon_shift_narrow(size, tmp, tmp2, q,
5840 input_unsigned);
92cdfaeb
PM
5841 if (pass == 0) {
5842 tmp3 = neon_load_reg(rm, 1);
5843 } else {
5844 tmp3 = tmp5;
5845 }
0b36f4cd
CL
5846 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5847 input_unsigned);
36aa55dc 5848 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5849 tcg_temp_free_i32(tmp);
5850 tcg_temp_free_i32(tmp3);
5851 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5852 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5853 neon_store_reg(rd, pass, tmp);
5854 } /* for pass */
c6067f04 5855 tcg_temp_free_i32(tmp2);
b75263d6 5856 }
9ee6e8bb 5857 } else if (op == 10) {
cc13115b
PM
5858 /* VSHLL, VMOVL */
5859 if (q || (rd & 1)) {
9ee6e8bb 5860 return 1;
cc13115b 5861 }
ad69471c
PB
5862 tmp = neon_load_reg(rm, 0);
5863 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5864 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5865 if (pass == 1)
5866 tmp = tmp2;
5867
5868 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5869
9ee6e8bb
PB
5870 if (shift != 0) {
5871 /* The shift is less than the width of the source
ad69471c
PB
5872 type, so we can just shift the whole register. */
5873 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5874 /* Widen the result of shift: we need to clear
5875 * the potential overflow bits resulting from
5876 * left bits of the narrow input appearing as
5877 * right bits of left the neighbour narrow
5878 * input. */
ad69471c
PB
5879 if (size < 2 || !u) {
5880 uint64_t imm64;
5881 if (size == 0) {
5882 imm = (0xffu >> (8 - shift));
5883 imm |= imm << 16;
acdf01ef 5884 } else if (size == 1) {
ad69471c 5885 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5886 } else {
5887 /* size == 2 */
5888 imm = 0xffffffff >> (32 - shift);
5889 }
5890 if (size < 2) {
5891 imm64 = imm | (((uint64_t)imm) << 32);
5892 } else {
5893 imm64 = imm;
9ee6e8bb 5894 }
acdf01ef 5895 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5896 }
5897 }
ad69471c 5898 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5899 }
f73534a5 5900 } else if (op >= 14) {
9ee6e8bb 5901 /* VCVT fixed-point. */
cc13115b
PM
5902 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5903 return 1;
5904 }
f73534a5
PM
5905 /* We have already masked out the must-be-1 top bit of imm6,
5906 * hence this 32-shift where the ARM ARM has 64-imm6.
5907 */
5908 shift = 32 - shift;
9ee6e8bb 5909 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5910 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5911 if (!(op & 1)) {
9ee6e8bb 5912 if (u)
5500b06c 5913 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5914 else
5500b06c 5915 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5916 } else {
5917 if (u)
5500b06c 5918 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5919 else
5500b06c 5920 gen_vfp_tosl(0, shift, 1);
2c0262af 5921 }
4373f3ce 5922 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5923 }
5924 } else {
9ee6e8bb
PB
5925 return 1;
5926 }
5927 } else { /* (insn & 0x00380080) == 0 */
5928 int invert;
7d80fee5
PM
5929 if (q && (rd & 1)) {
5930 return 1;
5931 }
9ee6e8bb
PB
5932
5933 op = (insn >> 8) & 0xf;
5934 /* One register and immediate. */
5935 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5936 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5937 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5938 * We choose to not special-case this and will behave as if a
5939 * valid constant encoding of 0 had been given.
5940 */
9ee6e8bb
PB
5941 switch (op) {
5942 case 0: case 1:
5943 /* no-op */
5944 break;
5945 case 2: case 3:
5946 imm <<= 8;
5947 break;
5948 case 4: case 5:
5949 imm <<= 16;
5950 break;
5951 case 6: case 7:
5952 imm <<= 24;
5953 break;
5954 case 8: case 9:
5955 imm |= imm << 16;
5956 break;
5957 case 10: case 11:
5958 imm = (imm << 8) | (imm << 24);
5959 break;
5960 case 12:
8e31209e 5961 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5962 break;
5963 case 13:
5964 imm = (imm << 16) | 0xffff;
5965 break;
5966 case 14:
5967 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5968 if (invert)
5969 imm = ~imm;
5970 break;
5971 case 15:
7d80fee5
PM
5972 if (invert) {
5973 return 1;
5974 }
9ee6e8bb
PB
5975 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5976 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5977 break;
5978 }
5979 if (invert)
5980 imm = ~imm;
5981
9ee6e8bb
PB
5982 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5983 if (op & 1 && op < 12) {
ad69471c 5984 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5985 if (invert) {
5986 /* The immediate value has already been inverted, so
5987 BIC becomes AND. */
ad69471c 5988 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5989 } else {
ad69471c 5990 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5991 }
9ee6e8bb 5992 } else {
ad69471c 5993 /* VMOV, VMVN. */
7d1b0095 5994 tmp = tcg_temp_new_i32();
9ee6e8bb 5995 if (op == 14 && invert) {
a5a14945 5996 int n;
ad69471c
PB
5997 uint32_t val;
5998 val = 0;
9ee6e8bb
PB
5999 for (n = 0; n < 4; n++) {
6000 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 6001 val |= 0xff << (n * 8);
9ee6e8bb 6002 }
ad69471c
PB
6003 tcg_gen_movi_i32(tmp, val);
6004 } else {
6005 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 6006 }
9ee6e8bb 6007 }
ad69471c 6008 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6009 }
6010 }
e4b3861d 6011 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
6012 if (size != 3) {
6013 op = (insn >> 8) & 0xf;
6014 if ((insn & (1 << 6)) == 0) {
6015 /* Three registers of different lengths. */
6016 int src1_wide;
6017 int src2_wide;
6018 int prewiden;
526d0096
PM
6019 /* undefreq: bit 0 : UNDEF if size == 0
6020 * bit 1 : UNDEF if size == 1
6021 * bit 2 : UNDEF if size == 2
6022 * bit 3 : UNDEF if U == 1
6023 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
6024 */
6025 int undefreq;
6026 /* prewiden, src1_wide, src2_wide, undefreq */
6027 static const int neon_3reg_wide[16][4] = {
6028 {1, 0, 0, 0}, /* VADDL */
6029 {1, 1, 0, 0}, /* VADDW */
6030 {1, 0, 0, 0}, /* VSUBL */
6031 {1, 1, 0, 0}, /* VSUBW */
6032 {0, 1, 1, 0}, /* VADDHN */
6033 {0, 0, 0, 0}, /* VABAL */
6034 {0, 1, 1, 0}, /* VSUBHN */
6035 {0, 0, 0, 0}, /* VABDL */
6036 {0, 0, 0, 0}, /* VMLAL */
526d0096 6037 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 6038 {0, 0, 0, 0}, /* VMLSL */
526d0096 6039 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 6040 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 6041 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 6042 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 6043 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
6044 };
6045
6046 prewiden = neon_3reg_wide[op][0];
6047 src1_wide = neon_3reg_wide[op][1];
6048 src2_wide = neon_3reg_wide[op][2];
695272dc 6049 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 6050
526d0096
PM
6051 if ((undefreq & (1 << size)) ||
6052 ((undefreq & 8) && u)) {
695272dc
PM
6053 return 1;
6054 }
6055 if ((src1_wide && (rn & 1)) ||
6056 (src2_wide && (rm & 1)) ||
6057 (!src2_wide && (rd & 1))) {
ad69471c 6058 return 1;
695272dc 6059 }
ad69471c 6060
4e624eda
PM
6061 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6062 * outside the loop below as it only performs a single pass.
6063 */
6064 if (op == 14 && size == 2) {
6065 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6066
6067 if (!arm_feature(env, ARM_FEATURE_V8_PMULL)) {
6068 return 1;
6069 }
6070 tcg_rn = tcg_temp_new_i64();
6071 tcg_rm = tcg_temp_new_i64();
6072 tcg_rd = tcg_temp_new_i64();
6073 neon_load_reg64(tcg_rn, rn);
6074 neon_load_reg64(tcg_rm, rm);
6075 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6076 neon_store_reg64(tcg_rd, rd);
6077 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6078 neon_store_reg64(tcg_rd, rd + 1);
6079 tcg_temp_free_i64(tcg_rn);
6080 tcg_temp_free_i64(tcg_rm);
6081 tcg_temp_free_i64(tcg_rd);
6082 return 0;
6083 }
6084
9ee6e8bb
PB
6085 /* Avoid overlapping operands. Wide source operands are
6086 always aligned so will never overlap with wide
6087 destinations in problematic ways. */
8f8e3aa4 6088 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6089 tmp = neon_load_reg(rm, 1);
6090 neon_store_scratch(2, tmp);
8f8e3aa4 6091 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6092 tmp = neon_load_reg(rn, 1);
6093 neon_store_scratch(2, tmp);
9ee6e8bb 6094 }
39d5492a 6095 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 6096 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6097 if (src1_wide) {
6098 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 6099 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6100 } else {
ad69471c 6101 if (pass == 1 && rd == rn) {
dd8fbd78 6102 tmp = neon_load_scratch(2);
9ee6e8bb 6103 } else {
ad69471c
PB
6104 tmp = neon_load_reg(rn, pass);
6105 }
6106 if (prewiden) {
6107 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6108 }
6109 }
ad69471c
PB
6110 if (src2_wide) {
6111 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 6112 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6113 } else {
ad69471c 6114 if (pass == 1 && rd == rm) {
dd8fbd78 6115 tmp2 = neon_load_scratch(2);
9ee6e8bb 6116 } else {
ad69471c
PB
6117 tmp2 = neon_load_reg(rm, pass);
6118 }
6119 if (prewiden) {
6120 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6121 }
9ee6e8bb
PB
6122 }
6123 switch (op) {
6124 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6125 gen_neon_addl(size);
9ee6e8bb 6126 break;
79b0e534 6127 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6128 gen_neon_subl(size);
9ee6e8bb
PB
6129 break;
6130 case 5: case 7: /* VABAL, VABDL */
6131 switch ((size << 1) | u) {
ad69471c
PB
6132 case 0:
6133 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6134 break;
6135 case 1:
6136 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6137 break;
6138 case 2:
6139 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6140 break;
6141 case 3:
6142 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6143 break;
6144 case 4:
6145 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6146 break;
6147 case 5:
6148 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6149 break;
9ee6e8bb
PB
6150 default: abort();
6151 }
7d1b0095
PM
6152 tcg_temp_free_i32(tmp2);
6153 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6154 break;
6155 case 8: case 9: case 10: case 11: case 12: case 13:
6156 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6157 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6158 break;
6159 case 14: /* Polynomial VMULL */
e5ca24cb 6160 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6161 tcg_temp_free_i32(tmp2);
6162 tcg_temp_free_i32(tmp);
e5ca24cb 6163 break;
695272dc
PM
6164 default: /* 15 is RESERVED: caught earlier */
6165 abort();
9ee6e8bb 6166 }
ebcd88ce
PM
6167 if (op == 13) {
6168 /* VQDMULL */
6169 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6170 neon_store_reg64(cpu_V0, rd + pass);
6171 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6172 /* Accumulate. */
ebcd88ce 6173 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6174 switch (op) {
4dc064e6
PM
6175 case 10: /* VMLSL */
6176 gen_neon_negl(cpu_V0, size);
6177 /* Fall through */
6178 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6179 gen_neon_addl(size);
9ee6e8bb
PB
6180 break;
6181 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6182 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6183 if (op == 11) {
6184 gen_neon_negl(cpu_V0, size);
6185 }
ad69471c
PB
6186 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6187 break;
9ee6e8bb
PB
6188 default:
6189 abort();
6190 }
ad69471c 6191 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6192 } else if (op == 4 || op == 6) {
6193 /* Narrowing operation. */
7d1b0095 6194 tmp = tcg_temp_new_i32();
79b0e534 6195 if (!u) {
9ee6e8bb 6196 switch (size) {
ad69471c
PB
6197 case 0:
6198 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6199 break;
6200 case 1:
6201 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6202 break;
6203 case 2:
6204 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6205 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
6206 break;
9ee6e8bb
PB
6207 default: abort();
6208 }
6209 } else {
6210 switch (size) {
ad69471c
PB
6211 case 0:
6212 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6213 break;
6214 case 1:
6215 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6216 break;
6217 case 2:
6218 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6219 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6220 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
6221 break;
9ee6e8bb
PB
6222 default: abort();
6223 }
6224 }
ad69471c
PB
6225 if (pass == 0) {
6226 tmp3 = tmp;
6227 } else {
6228 neon_store_reg(rd, 0, tmp3);
6229 neon_store_reg(rd, 1, tmp);
6230 }
9ee6e8bb
PB
6231 } else {
6232 /* Write back the result. */
ad69471c 6233 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6234 }
6235 }
6236 } else {
3e3326df
PM
6237 /* Two registers and a scalar. NB that for ops of this form
6238 * the ARM ARM labels bit 24 as Q, but it is in our variable
6239 * 'u', not 'q'.
6240 */
6241 if (size == 0) {
6242 return 1;
6243 }
9ee6e8bb 6244 switch (op) {
9ee6e8bb 6245 case 1: /* Float VMLA scalar */
9ee6e8bb 6246 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6247 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6248 if (size == 1) {
6249 return 1;
6250 }
6251 /* fall through */
6252 case 0: /* Integer VMLA scalar */
6253 case 4: /* Integer VMLS scalar */
6254 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6255 case 12: /* VQDMULH scalar */
6256 case 13: /* VQRDMULH scalar */
3e3326df
PM
6257 if (u && ((rd | rn) & 1)) {
6258 return 1;
6259 }
dd8fbd78
FN
6260 tmp = neon_get_scalar(size, rm);
6261 neon_store_scratch(0, tmp);
9ee6e8bb 6262 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6263 tmp = neon_load_scratch(0);
6264 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6265 if (op == 12) {
6266 if (size == 1) {
02da0b2d 6267 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6268 } else {
02da0b2d 6269 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6270 }
6271 } else if (op == 13) {
6272 if (size == 1) {
02da0b2d 6273 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6274 } else {
02da0b2d 6275 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6276 }
6277 } else if (op & 1) {
aa47cfdd
PM
6278 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6279 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6280 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6281 } else {
6282 switch (size) {
dd8fbd78
FN
6283 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6284 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6285 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6286 default: abort();
9ee6e8bb
PB
6287 }
6288 }
7d1b0095 6289 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6290 if (op < 8) {
6291 /* Accumulate. */
dd8fbd78 6292 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6293 switch (op) {
6294 case 0:
dd8fbd78 6295 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6296 break;
6297 case 1:
aa47cfdd
PM
6298 {
6299 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6300 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6301 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6302 break;
aa47cfdd 6303 }
9ee6e8bb 6304 case 4:
dd8fbd78 6305 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6306 break;
6307 case 5:
aa47cfdd
PM
6308 {
6309 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6310 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6311 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6312 break;
aa47cfdd 6313 }
9ee6e8bb
PB
6314 default:
6315 abort();
6316 }
7d1b0095 6317 tcg_temp_free_i32(tmp2);
9ee6e8bb 6318 }
dd8fbd78 6319 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6320 }
6321 break;
9ee6e8bb 6322 case 3: /* VQDMLAL scalar */
9ee6e8bb 6323 case 7: /* VQDMLSL scalar */
9ee6e8bb 6324 case 11: /* VQDMULL scalar */
3e3326df 6325 if (u == 1) {
ad69471c 6326 return 1;
3e3326df
PM
6327 }
6328 /* fall through */
6329 case 2: /* VMLAL sclar */
6330 case 6: /* VMLSL scalar */
6331 case 10: /* VMULL scalar */
6332 if (rd & 1) {
6333 return 1;
6334 }
dd8fbd78 6335 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6336 /* We need a copy of tmp2 because gen_neon_mull
6337 * deletes it during pass 0. */
7d1b0095 6338 tmp4 = tcg_temp_new_i32();
c6067f04 6339 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6340 tmp3 = neon_load_reg(rn, 1);
ad69471c 6341
9ee6e8bb 6342 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6343 if (pass == 0) {
6344 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6345 } else {
dd8fbd78 6346 tmp = tmp3;
c6067f04 6347 tmp2 = tmp4;
9ee6e8bb 6348 }
ad69471c 6349 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6350 if (op != 11) {
6351 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6352 }
9ee6e8bb 6353 switch (op) {
4dc064e6
PM
6354 case 6:
6355 gen_neon_negl(cpu_V0, size);
6356 /* Fall through */
6357 case 2:
ad69471c 6358 gen_neon_addl(size);
9ee6e8bb
PB
6359 break;
6360 case 3: case 7:
ad69471c 6361 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6362 if (op == 7) {
6363 gen_neon_negl(cpu_V0, size);
6364 }
ad69471c 6365 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6366 break;
6367 case 10:
6368 /* no-op */
6369 break;
6370 case 11:
ad69471c 6371 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6372 break;
6373 default:
6374 abort();
6375 }
ad69471c 6376 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6377 }
dd8fbd78 6378
dd8fbd78 6379
9ee6e8bb
PB
6380 break;
6381 default: /* 14 and 15 are RESERVED */
6382 return 1;
6383 }
6384 }
6385 } else { /* size == 3 */
6386 if (!u) {
6387 /* Extract. */
9ee6e8bb 6388 imm = (insn >> 8) & 0xf;
ad69471c
PB
6389
6390 if (imm > 7 && !q)
6391 return 1;
6392
52579ea1
PM
6393 if (q && ((rd | rn | rm) & 1)) {
6394 return 1;
6395 }
6396
ad69471c
PB
6397 if (imm == 0) {
6398 neon_load_reg64(cpu_V0, rn);
6399 if (q) {
6400 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6401 }
ad69471c
PB
6402 } else if (imm == 8) {
6403 neon_load_reg64(cpu_V0, rn + 1);
6404 if (q) {
6405 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6406 }
ad69471c 6407 } else if (q) {
a7812ae4 6408 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6409 if (imm < 8) {
6410 neon_load_reg64(cpu_V0, rn);
a7812ae4 6411 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6412 } else {
6413 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6414 neon_load_reg64(tmp64, rm);
ad69471c
PB
6415 }
6416 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6417 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6418 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6419 if (imm < 8) {
6420 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6421 } else {
ad69471c
PB
6422 neon_load_reg64(cpu_V1, rm + 1);
6423 imm -= 8;
9ee6e8bb 6424 }
ad69471c 6425 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6426 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6427 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6428 tcg_temp_free_i64(tmp64);
ad69471c 6429 } else {
a7812ae4 6430 /* BUGFIX */
ad69471c 6431 neon_load_reg64(cpu_V0, rn);
a7812ae4 6432 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6433 neon_load_reg64(cpu_V1, rm);
a7812ae4 6434 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6435 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6436 }
6437 neon_store_reg64(cpu_V0, rd);
6438 if (q) {
6439 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6440 }
6441 } else if ((insn & (1 << 11)) == 0) {
6442 /* Two register misc. */
6443 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6444 size = (insn >> 18) & 3;
600b828c
PM
6445 /* UNDEF for unknown op values and bad op-size combinations */
6446 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6447 return 1;
6448 }
fc2a9b37
PM
6449 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6450 q && ((rm | rd) & 1)) {
6451 return 1;
6452 }
9ee6e8bb 6453 switch (op) {
600b828c 6454 case NEON_2RM_VREV64:
9ee6e8bb 6455 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6456 tmp = neon_load_reg(rm, pass * 2);
6457 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6458 switch (size) {
dd8fbd78
FN
6459 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6460 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6461 case 2: /* no-op */ break;
6462 default: abort();
6463 }
dd8fbd78 6464 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6465 if (size == 2) {
dd8fbd78 6466 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6467 } else {
9ee6e8bb 6468 switch (size) {
dd8fbd78
FN
6469 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6470 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6471 default: abort();
6472 }
dd8fbd78 6473 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6474 }
6475 }
6476 break;
600b828c
PM
6477 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6478 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
6479 for (pass = 0; pass < q + 1; pass++) {
6480 tmp = neon_load_reg(rm, pass * 2);
6481 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6482 tmp = neon_load_reg(rm, pass * 2 + 1);
6483 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6484 switch (size) {
6485 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6486 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6487 case 2: tcg_gen_add_i64(CPU_V001); break;
6488 default: abort();
6489 }
600b828c 6490 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 6491 /* Accumulate. */
ad69471c
PB
6492 neon_load_reg64(cpu_V1, rd + pass);
6493 gen_neon_addl(size);
9ee6e8bb 6494 }
ad69471c 6495 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6496 }
6497 break;
600b828c 6498 case NEON_2RM_VTRN:
9ee6e8bb 6499 if (size == 2) {
a5a14945 6500 int n;
9ee6e8bb 6501 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
6502 tmp = neon_load_reg(rm, n);
6503 tmp2 = neon_load_reg(rd, n + 1);
6504 neon_store_reg(rm, n, tmp2);
6505 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
6506 }
6507 } else {
6508 goto elementwise;
6509 }
6510 break;
600b828c 6511 case NEON_2RM_VUZP:
02acedf9 6512 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 6513 return 1;
9ee6e8bb
PB
6514 }
6515 break;
600b828c 6516 case NEON_2RM_VZIP:
d68a6f3a 6517 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 6518 return 1;
9ee6e8bb
PB
6519 }
6520 break;
600b828c
PM
6521 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6522 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
6523 if (rm & 1) {
6524 return 1;
6525 }
39d5492a 6526 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6527 for (pass = 0; pass < 2; pass++) {
ad69471c 6528 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 6529 tmp = tcg_temp_new_i32();
600b828c
PM
6530 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6531 tmp, cpu_V0);
ad69471c
PB
6532 if (pass == 0) {
6533 tmp2 = tmp;
6534 } else {
6535 neon_store_reg(rd, 0, tmp2);
6536 neon_store_reg(rd, 1, tmp);
9ee6e8bb 6537 }
9ee6e8bb
PB
6538 }
6539 break;
600b828c 6540 case NEON_2RM_VSHLL:
fc2a9b37 6541 if (q || (rd & 1)) {
9ee6e8bb 6542 return 1;
600b828c 6543 }
ad69471c
PB
6544 tmp = neon_load_reg(rm, 0);
6545 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6546 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6547 if (pass == 1)
6548 tmp = tmp2;
6549 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 6550 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 6551 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6552 }
6553 break;
600b828c 6554 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
6555 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6556 q || (rm & 1)) {
6557 return 1;
6558 }
7d1b0095
PM
6559 tmp = tcg_temp_new_i32();
6560 tmp2 = tcg_temp_new_i32();
60011498 6561 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 6562 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 6563 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 6564 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6565 tcg_gen_shli_i32(tmp2, tmp2, 16);
6566 tcg_gen_or_i32(tmp2, tmp2, tmp);
6567 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 6568 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
6569 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6570 neon_store_reg(rd, 0, tmp2);
7d1b0095 6571 tmp2 = tcg_temp_new_i32();
2d981da7 6572 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6573 tcg_gen_shli_i32(tmp2, tmp2, 16);
6574 tcg_gen_or_i32(tmp2, tmp2, tmp);
6575 neon_store_reg(rd, 1, tmp2);
7d1b0095 6576 tcg_temp_free_i32(tmp);
60011498 6577 break;
600b828c 6578 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
6579 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
6580 q || (rd & 1)) {
6581 return 1;
6582 }
7d1b0095 6583 tmp3 = tcg_temp_new_i32();
60011498
PB
6584 tmp = neon_load_reg(rm, 0);
6585 tmp2 = neon_load_reg(rm, 1);
6586 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 6587 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6588 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6589 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 6590 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6591 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 6592 tcg_temp_free_i32(tmp);
60011498 6593 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 6594 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6595 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6596 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 6597 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6598 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
6599 tcg_temp_free_i32(tmp2);
6600 tcg_temp_free_i32(tmp3);
60011498 6601 break;
9d935509
AB
6602 case NEON_2RM_AESE: case NEON_2RM_AESMC:
6603 if (!arm_feature(env, ARM_FEATURE_V8_AES)
6604 || ((rm | rd) & 1)) {
6605 return 1;
6606 }
6607 tmp = tcg_const_i32(rd);
6608 tmp2 = tcg_const_i32(rm);
6609
6610 /* Bit 6 is the lowest opcode bit; it distinguishes between
6611 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6612 */
6613 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6614
6615 if (op == NEON_2RM_AESE) {
6616 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
6617 } else {
6618 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
6619 }
6620 tcg_temp_free_i32(tmp);
6621 tcg_temp_free_i32(tmp2);
6622 tcg_temp_free_i32(tmp3);
6623 break;
f1ecb913
AB
6624 case NEON_2RM_SHA1H:
6625 if (!arm_feature(env, ARM_FEATURE_V8_SHA1)
6626 || ((rm | rd) & 1)) {
6627 return 1;
6628 }
6629 tmp = tcg_const_i32(rd);
6630 tmp2 = tcg_const_i32(rm);
6631
6632 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
6633
6634 tcg_temp_free_i32(tmp);
6635 tcg_temp_free_i32(tmp2);
6636 break;
6637 case NEON_2RM_SHA1SU1:
6638 if ((rm | rd) & 1) {
6639 return 1;
6640 }
6641 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6642 if (q) {
6643 if (!arm_feature(env, ARM_FEATURE_V8_SHA256)) {
6644 return 1;
6645 }
6646 } else if (!arm_feature(env, ARM_FEATURE_V8_SHA1)) {
6647 return 1;
6648 }
6649 tmp = tcg_const_i32(rd);
6650 tmp2 = tcg_const_i32(rm);
6651 if (q) {
6652 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
6653 } else {
6654 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
6655 }
6656 tcg_temp_free_i32(tmp);
6657 tcg_temp_free_i32(tmp2);
6658 break;
9ee6e8bb
PB
6659 default:
6660 elementwise:
6661 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 6662 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6663 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6664 neon_reg_offset(rm, pass));
39d5492a 6665 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6666 } else {
dd8fbd78 6667 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
6668 }
6669 switch (op) {
600b828c 6670 case NEON_2RM_VREV32:
9ee6e8bb 6671 switch (size) {
dd8fbd78
FN
6672 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6673 case 1: gen_swap_half(tmp); break;
600b828c 6674 default: abort();
9ee6e8bb
PB
6675 }
6676 break;
600b828c 6677 case NEON_2RM_VREV16:
dd8fbd78 6678 gen_rev16(tmp);
9ee6e8bb 6679 break;
600b828c 6680 case NEON_2RM_VCLS:
9ee6e8bb 6681 switch (size) {
dd8fbd78
FN
6682 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6683 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6684 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6685 default: abort();
9ee6e8bb
PB
6686 }
6687 break;
600b828c 6688 case NEON_2RM_VCLZ:
9ee6e8bb 6689 switch (size) {
dd8fbd78
FN
6690 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6691 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6692 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 6693 default: abort();
9ee6e8bb
PB
6694 }
6695 break;
600b828c 6696 case NEON_2RM_VCNT:
dd8fbd78 6697 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6698 break;
600b828c 6699 case NEON_2RM_VMVN:
dd8fbd78 6700 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 6701 break;
600b828c 6702 case NEON_2RM_VQABS:
9ee6e8bb 6703 switch (size) {
02da0b2d
PM
6704 case 0:
6705 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6706 break;
6707 case 1:
6708 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6709 break;
6710 case 2:
6711 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6712 break;
600b828c 6713 default: abort();
9ee6e8bb
PB
6714 }
6715 break;
600b828c 6716 case NEON_2RM_VQNEG:
9ee6e8bb 6717 switch (size) {
02da0b2d
PM
6718 case 0:
6719 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6720 break;
6721 case 1:
6722 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6723 break;
6724 case 2:
6725 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6726 break;
600b828c 6727 default: abort();
9ee6e8bb
PB
6728 }
6729 break;
600b828c 6730 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6731 tmp2 = tcg_const_i32(0);
9ee6e8bb 6732 switch(size) {
dd8fbd78
FN
6733 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6734 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6735 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6736 default: abort();
9ee6e8bb 6737 }
39d5492a 6738 tcg_temp_free_i32(tmp2);
600b828c 6739 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6740 tcg_gen_not_i32(tmp, tmp);
600b828c 6741 }
9ee6e8bb 6742 break;
600b828c 6743 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6744 tmp2 = tcg_const_i32(0);
9ee6e8bb 6745 switch(size) {
dd8fbd78
FN
6746 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6747 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6748 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6749 default: abort();
9ee6e8bb 6750 }
39d5492a 6751 tcg_temp_free_i32(tmp2);
600b828c 6752 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6753 tcg_gen_not_i32(tmp, tmp);
600b828c 6754 }
9ee6e8bb 6755 break;
600b828c 6756 case NEON_2RM_VCEQ0:
dd8fbd78 6757 tmp2 = tcg_const_i32(0);
9ee6e8bb 6758 switch(size) {
dd8fbd78
FN
6759 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6760 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6761 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6762 default: abort();
9ee6e8bb 6763 }
39d5492a 6764 tcg_temp_free_i32(tmp2);
9ee6e8bb 6765 break;
600b828c 6766 case NEON_2RM_VABS:
9ee6e8bb 6767 switch(size) {
dd8fbd78
FN
6768 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6769 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6770 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6771 default: abort();
9ee6e8bb
PB
6772 }
6773 break;
600b828c 6774 case NEON_2RM_VNEG:
dd8fbd78
FN
6775 tmp2 = tcg_const_i32(0);
6776 gen_neon_rsb(size, tmp, tmp2);
39d5492a 6777 tcg_temp_free_i32(tmp2);
9ee6e8bb 6778 break;
600b828c 6779 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6780 {
6781 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6782 tmp2 = tcg_const_i32(0);
aa47cfdd 6783 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6784 tcg_temp_free_i32(tmp2);
aa47cfdd 6785 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6786 break;
aa47cfdd 6787 }
600b828c 6788 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6789 {
6790 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6791 tmp2 = tcg_const_i32(0);
aa47cfdd 6792 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6793 tcg_temp_free_i32(tmp2);
aa47cfdd 6794 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6795 break;
aa47cfdd 6796 }
600b828c 6797 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6798 {
6799 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6800 tmp2 = tcg_const_i32(0);
aa47cfdd 6801 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6802 tcg_temp_free_i32(tmp2);
aa47cfdd 6803 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6804 break;
aa47cfdd 6805 }
600b828c 6806 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6807 {
6808 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6809 tmp2 = tcg_const_i32(0);
aa47cfdd 6810 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6811 tcg_temp_free_i32(tmp2);
aa47cfdd 6812 tcg_temp_free_ptr(fpstatus);
0e326109 6813 break;
aa47cfdd 6814 }
600b828c 6815 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6816 {
6817 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6818 tmp2 = tcg_const_i32(0);
aa47cfdd 6819 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6820 tcg_temp_free_i32(tmp2);
aa47cfdd 6821 tcg_temp_free_ptr(fpstatus);
0e326109 6822 break;
aa47cfdd 6823 }
600b828c 6824 case NEON_2RM_VABS_F:
4373f3ce 6825 gen_vfp_abs(0);
9ee6e8bb 6826 break;
600b828c 6827 case NEON_2RM_VNEG_F:
4373f3ce 6828 gen_vfp_neg(0);
9ee6e8bb 6829 break;
600b828c 6830 case NEON_2RM_VSWP:
dd8fbd78
FN
6831 tmp2 = neon_load_reg(rd, pass);
6832 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6833 break;
600b828c 6834 case NEON_2RM_VTRN:
dd8fbd78 6835 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6836 switch (size) {
dd8fbd78
FN
6837 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6838 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6839 default: abort();
9ee6e8bb 6840 }
dd8fbd78 6841 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6842 break;
34f7b0a2
WN
6843 case NEON_2RM_VRINTN:
6844 case NEON_2RM_VRINTA:
6845 case NEON_2RM_VRINTM:
6846 case NEON_2RM_VRINTP:
6847 case NEON_2RM_VRINTZ:
6848 {
6849 TCGv_i32 tcg_rmode;
6850 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6851 int rmode;
6852
6853 if (op == NEON_2RM_VRINTZ) {
6854 rmode = FPROUNDING_ZERO;
6855 } else {
6856 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6857 }
6858
6859 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6860 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6861 cpu_env);
6862 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
6863 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6864 cpu_env);
6865 tcg_temp_free_ptr(fpstatus);
6866 tcg_temp_free_i32(tcg_rmode);
6867 break;
6868 }
2ce70625
WN
6869 case NEON_2RM_VRINTX:
6870 {
6871 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6872 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
6873 tcg_temp_free_ptr(fpstatus);
6874 break;
6875 }
901ad525
WN
6876 case NEON_2RM_VCVTAU:
6877 case NEON_2RM_VCVTAS:
6878 case NEON_2RM_VCVTNU:
6879 case NEON_2RM_VCVTNS:
6880 case NEON_2RM_VCVTPU:
6881 case NEON_2RM_VCVTPS:
6882 case NEON_2RM_VCVTMU:
6883 case NEON_2RM_VCVTMS:
6884 {
6885 bool is_signed = !extract32(insn, 7, 1);
6886 TCGv_ptr fpst = get_fpstatus_ptr(1);
6887 TCGv_i32 tcg_rmode, tcg_shift;
6888 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6889
6890 tcg_shift = tcg_const_i32(0);
6891 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6892 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6893 cpu_env);
6894
6895 if (is_signed) {
6896 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
6897 tcg_shift, fpst);
6898 } else {
6899 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
6900 tcg_shift, fpst);
6901 }
6902
6903 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6904 cpu_env);
6905 tcg_temp_free_i32(tcg_rmode);
6906 tcg_temp_free_i32(tcg_shift);
6907 tcg_temp_free_ptr(fpst);
6908 break;
6909 }
600b828c 6910 case NEON_2RM_VRECPE:
b6d4443a
AB
6911 {
6912 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6913 gen_helper_recpe_u32(tmp, tmp, fpstatus);
6914 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6915 break;
b6d4443a 6916 }
600b828c 6917 case NEON_2RM_VRSQRTE:
c2fb418e
AB
6918 {
6919 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6920 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
6921 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6922 break;
c2fb418e 6923 }
600b828c 6924 case NEON_2RM_VRECPE_F:
b6d4443a
AB
6925 {
6926 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6927 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
6928 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6929 break;
b6d4443a 6930 }
600b828c 6931 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
6932 {
6933 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6934 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
6935 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6936 break;
c2fb418e 6937 }
600b828c 6938 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6939 gen_vfp_sito(0, 1);
9ee6e8bb 6940 break;
600b828c 6941 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6942 gen_vfp_uito(0, 1);
9ee6e8bb 6943 break;
600b828c 6944 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6945 gen_vfp_tosiz(0, 1);
9ee6e8bb 6946 break;
600b828c 6947 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6948 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6949 break;
6950 default:
600b828c
PM
6951 /* Reserved op values were caught by the
6952 * neon_2rm_sizes[] check earlier.
6953 */
6954 abort();
9ee6e8bb 6955 }
600b828c 6956 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6957 tcg_gen_st_f32(cpu_F0s, cpu_env,
6958 neon_reg_offset(rd, pass));
9ee6e8bb 6959 } else {
dd8fbd78 6960 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6961 }
6962 }
6963 break;
6964 }
6965 } else if ((insn & (1 << 10)) == 0) {
6966 /* VTBL, VTBX. */
56907d77
PM
6967 int n = ((insn >> 8) & 3) + 1;
6968 if ((rn + n) > 32) {
6969 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6970 * helper function running off the end of the register file.
6971 */
6972 return 1;
6973 }
6974 n <<= 3;
9ee6e8bb 6975 if (insn & (1 << 6)) {
8f8e3aa4 6976 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6977 } else {
7d1b0095 6978 tmp = tcg_temp_new_i32();
8f8e3aa4 6979 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6980 }
8f8e3aa4 6981 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6982 tmp4 = tcg_const_i32(rn);
6983 tmp5 = tcg_const_i32(n);
9ef39277 6984 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 6985 tcg_temp_free_i32(tmp);
9ee6e8bb 6986 if (insn & (1 << 6)) {
8f8e3aa4 6987 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6988 } else {
7d1b0095 6989 tmp = tcg_temp_new_i32();
8f8e3aa4 6990 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6991 }
8f8e3aa4 6992 tmp3 = neon_load_reg(rm, 1);
9ef39277 6993 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6994 tcg_temp_free_i32(tmp5);
6995 tcg_temp_free_i32(tmp4);
8f8e3aa4 6996 neon_store_reg(rd, 0, tmp2);
3018f259 6997 neon_store_reg(rd, 1, tmp3);
7d1b0095 6998 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6999 } else if ((insn & 0x380) == 0) {
7000 /* VDUP */
133da6aa
JR
7001 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7002 return 1;
7003 }
9ee6e8bb 7004 if (insn & (1 << 19)) {
dd8fbd78 7005 tmp = neon_load_reg(rm, 1);
9ee6e8bb 7006 } else {
dd8fbd78 7007 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
7008 }
7009 if (insn & (1 << 16)) {
dd8fbd78 7010 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
7011 } else if (insn & (1 << 17)) {
7012 if ((insn >> 18) & 1)
dd8fbd78 7013 gen_neon_dup_high16(tmp);
9ee6e8bb 7014 else
dd8fbd78 7015 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
7016 }
7017 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 7018 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
7019 tcg_gen_mov_i32(tmp2, tmp);
7020 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 7021 }
7d1b0095 7022 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7023 } else {
7024 return 1;
7025 }
7026 }
7027 }
7028 return 0;
7029}
7030
0ecb72a5 7031static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb 7032{
4b6a83fb
PM
7033 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7034 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7035
7036 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7037
7038 /* First check for coprocessor space used for XScale/iwMMXt insns */
7039 if (arm_feature(env, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
7040 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7041 return 1;
7042 }
7043 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
7044 return disas_iwmmxt_insn(env, s, insn);
7045 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
7046 return disas_dsp_insn(env, s, insn);
7047 }
7048 return 1;
4b6a83fb
PM
7049 }
7050
7051 /* Otherwise treat as a generic register access */
7052 is64 = (insn & (1 << 25)) == 0;
7053 if (!is64 && ((insn & (1 << 4)) == 0)) {
7054 /* cdp */
7055 return 1;
7056 }
7057
7058 crm = insn & 0xf;
7059 if (is64) {
7060 crn = 0;
7061 opc1 = (insn >> 4) & 0xf;
7062 opc2 = 0;
7063 rt2 = (insn >> 16) & 0xf;
7064 } else {
7065 crn = (insn >> 16) & 0xf;
7066 opc1 = (insn >> 21) & 7;
7067 opc2 = (insn >> 5) & 7;
7068 rt2 = 0;
7069 }
7070 isread = (insn >> 20) & 1;
7071 rt = (insn >> 12) & 0xf;
7072
60322b39 7073 ri = get_arm_cp_reginfo(s->cp_regs,
4b6a83fb
PM
7074 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
7075 if (ri) {
7076 /* Check access permissions */
60322b39 7077 if (!cp_access_ok(s->current_pl, ri, isread)) {
4b6a83fb
PM
7078 return 1;
7079 }
7080
c0f4af17
PM
7081 if (ri->accessfn ||
7082 (arm_feature(env, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7083 /* Emit code to perform further access permissions checks at
7084 * runtime; this may result in an exception.
c0f4af17
PM
7085 * Note that on XScale all cp0..c13 registers do an access check
7086 * call in order to handle c15_cpar.
f59df3f2
PM
7087 */
7088 TCGv_ptr tmpptr;
8bcbf37c
PM
7089 TCGv_i32 tcg_syn;
7090 uint32_t syndrome;
7091
7092 /* Note that since we are an implementation which takes an
7093 * exception on a trapped conditional instruction only if the
7094 * instruction passes its condition code check, we can take
7095 * advantage of the clause in the ARM ARM that allows us to set
7096 * the COND field in the instruction to 0xE in all cases.
7097 * We could fish the actual condition out of the insn (ARM)
7098 * or the condexec bits (Thumb) but it isn't necessary.
7099 */
7100 switch (cpnum) {
7101 case 14:
7102 if (is64) {
7103 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7104 isread, s->thumb);
7105 } else {
7106 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7107 rt, isread, s->thumb);
7108 }
7109 break;
7110 case 15:
7111 if (is64) {
7112 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7113 isread, s->thumb);
7114 } else {
7115 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7116 rt, isread, s->thumb);
7117 }
7118 break;
7119 default:
7120 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7121 * so this can only happen if this is an ARMv7 or earlier CPU,
7122 * in which case the syndrome information won't actually be
7123 * guest visible.
7124 */
7125 assert(!arm_feature(env, ARM_FEATURE_V8));
7126 syndrome = syn_uncategorized();
7127 break;
7128 }
7129
f59df3f2
PM
7130 gen_set_pc_im(s, s->pc);
7131 tmpptr = tcg_const_ptr(ri);
8bcbf37c
PM
7132 tcg_syn = tcg_const_i32(syndrome);
7133 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn);
f59df3f2 7134 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7135 tcg_temp_free_i32(tcg_syn);
f59df3f2
PM
7136 }
7137
4b6a83fb
PM
7138 /* Handle special cases first */
7139 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7140 case ARM_CP_NOP:
7141 return 0;
7142 case ARM_CP_WFI:
7143 if (isread) {
7144 return 1;
7145 }
eaed129d 7146 gen_set_pc_im(s, s->pc);
4b6a83fb 7147 s->is_jmp = DISAS_WFI;
2bee5105 7148 return 0;
4b6a83fb
PM
7149 default:
7150 break;
7151 }
7152
2452731c
PM
7153 if (use_icount && (ri->type & ARM_CP_IO)) {
7154 gen_io_start();
7155 }
7156
4b6a83fb
PM
7157 if (isread) {
7158 /* Read */
7159 if (is64) {
7160 TCGv_i64 tmp64;
7161 TCGv_i32 tmp;
7162 if (ri->type & ARM_CP_CONST) {
7163 tmp64 = tcg_const_i64(ri->resetvalue);
7164 } else if (ri->readfn) {
7165 TCGv_ptr tmpptr;
4b6a83fb
PM
7166 tmp64 = tcg_temp_new_i64();
7167 tmpptr = tcg_const_ptr(ri);
7168 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7169 tcg_temp_free_ptr(tmpptr);
7170 } else {
7171 tmp64 = tcg_temp_new_i64();
7172 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7173 }
7174 tmp = tcg_temp_new_i32();
7175 tcg_gen_trunc_i64_i32(tmp, tmp64);
7176 store_reg(s, rt, tmp);
7177 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 7178 tmp = tcg_temp_new_i32();
4b6a83fb 7179 tcg_gen_trunc_i64_i32(tmp, tmp64);
ed336850 7180 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7181 store_reg(s, rt2, tmp);
7182 } else {
39d5492a 7183 TCGv_i32 tmp;
4b6a83fb
PM
7184 if (ri->type & ARM_CP_CONST) {
7185 tmp = tcg_const_i32(ri->resetvalue);
7186 } else if (ri->readfn) {
7187 TCGv_ptr tmpptr;
4b6a83fb
PM
7188 tmp = tcg_temp_new_i32();
7189 tmpptr = tcg_const_ptr(ri);
7190 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7191 tcg_temp_free_ptr(tmpptr);
7192 } else {
7193 tmp = load_cpu_offset(ri->fieldoffset);
7194 }
7195 if (rt == 15) {
7196 /* Destination register of r15 for 32 bit loads sets
7197 * the condition codes from the high 4 bits of the value
7198 */
7199 gen_set_nzcv(tmp);
7200 tcg_temp_free_i32(tmp);
7201 } else {
7202 store_reg(s, rt, tmp);
7203 }
7204 }
7205 } else {
7206 /* Write */
7207 if (ri->type & ARM_CP_CONST) {
7208 /* If not forbidden by access permissions, treat as WI */
7209 return 0;
7210 }
7211
7212 if (is64) {
39d5492a 7213 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7214 TCGv_i64 tmp64 = tcg_temp_new_i64();
7215 tmplo = load_reg(s, rt);
7216 tmphi = load_reg(s, rt2);
7217 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7218 tcg_temp_free_i32(tmplo);
7219 tcg_temp_free_i32(tmphi);
7220 if (ri->writefn) {
7221 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7222 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7223 tcg_temp_free_ptr(tmpptr);
7224 } else {
7225 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7226 }
7227 tcg_temp_free_i64(tmp64);
7228 } else {
7229 if (ri->writefn) {
39d5492a 7230 TCGv_i32 tmp;
4b6a83fb 7231 TCGv_ptr tmpptr;
4b6a83fb
PM
7232 tmp = load_reg(s, rt);
7233 tmpptr = tcg_const_ptr(ri);
7234 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7235 tcg_temp_free_ptr(tmpptr);
7236 tcg_temp_free_i32(tmp);
7237 } else {
39d5492a 7238 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7239 store_cpu_offset(tmp, ri->fieldoffset);
7240 }
7241 }
2452731c
PM
7242 }
7243
7244 if (use_icount && (ri->type & ARM_CP_IO)) {
7245 /* I/O operations must end the TB here (whether read or write) */
7246 gen_io_end();
7247 gen_lookup_tb(s);
7248 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7249 /* We default to ending the TB on a coprocessor register write,
7250 * but allow this to be suppressed by the register definition
7251 * (usually only necessary to work around guest bugs).
7252 */
2452731c 7253 gen_lookup_tb(s);
4b6a83fb 7254 }
2452731c 7255
4b6a83fb
PM
7256 return 0;
7257 }
7258
626187d8
PM
7259 /* Unknown register; this might be a guest error or a QEMU
7260 * unimplemented feature.
7261 */
7262 if (is64) {
7263 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7264 "64 bit system register cp:%d opc1: %d crm:%d\n",
7265 isread ? "read" : "write", cpnum, opc1, crm);
7266 } else {
7267 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
7268 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d\n",
7269 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2);
7270 }
7271
4a9a539f 7272 return 1;
9ee6e8bb
PB
7273}
7274
5e3f878a
PB
7275
7276/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7277static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7278{
39d5492a 7279 TCGv_i32 tmp;
7d1b0095 7280 tmp = tcg_temp_new_i32();
5e3f878a
PB
7281 tcg_gen_trunc_i64_i32(tmp, val);
7282 store_reg(s, rlow, tmp);
7d1b0095 7283 tmp = tcg_temp_new_i32();
5e3f878a
PB
7284 tcg_gen_shri_i64(val, val, 32);
7285 tcg_gen_trunc_i64_i32(tmp, val);
7286 store_reg(s, rhigh, tmp);
7287}
7288
7289/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7290static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7291{
a7812ae4 7292 TCGv_i64 tmp;
39d5492a 7293 TCGv_i32 tmp2;
5e3f878a 7294
36aa55dc 7295 /* Load value and extend to 64 bits. */
a7812ae4 7296 tmp = tcg_temp_new_i64();
5e3f878a
PB
7297 tmp2 = load_reg(s, rlow);
7298 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7299 tcg_temp_free_i32(tmp2);
5e3f878a 7300 tcg_gen_add_i64(val, val, tmp);
b75263d6 7301 tcg_temp_free_i64(tmp);
5e3f878a
PB
7302}
7303
7304/* load and add a 64-bit value from a register pair. */
a7812ae4 7305static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7306{
a7812ae4 7307 TCGv_i64 tmp;
39d5492a
PM
7308 TCGv_i32 tmpl;
7309 TCGv_i32 tmph;
5e3f878a
PB
7310
7311 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7312 tmpl = load_reg(s, rlow);
7313 tmph = load_reg(s, rhigh);
a7812ae4 7314 tmp = tcg_temp_new_i64();
36aa55dc 7315 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7316 tcg_temp_free_i32(tmpl);
7317 tcg_temp_free_i32(tmph);
5e3f878a 7318 tcg_gen_add_i64(val, val, tmp);
b75263d6 7319 tcg_temp_free_i64(tmp);
5e3f878a
PB
7320}
7321
c9f10124 7322/* Set N and Z flags from hi|lo. */
39d5492a 7323static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7324{
c9f10124
RH
7325 tcg_gen_mov_i32(cpu_NF, hi);
7326 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7327}
7328
426f5abc
PB
7329/* Load/Store exclusive instructions are implemented by remembering
7330 the value/address loaded, and seeing if these are the same
b90372ad 7331 when the store is performed. This should be sufficient to implement
426f5abc
PB
7332 the architecturally mandated semantics, and avoids having to monitor
7333 regular stores.
7334
7335 In system emulation mode only one CPU will be running at once, so
7336 this sequence is effectively atomic. In user emulation mode we
7337 throw an exception and handle the atomic operation elsewhere. */
7338static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7339 TCGv_i32 addr, int size)
426f5abc 7340{
94ee24e7 7341 TCGv_i32 tmp = tcg_temp_new_i32();
426f5abc 7342
50225ad0
PM
7343 s->is_ldex = true;
7344
426f5abc
PB
7345 switch (size) {
7346 case 0:
6ce2faf4 7347 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
426f5abc
PB
7348 break;
7349 case 1:
6ce2faf4 7350 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
426f5abc
PB
7351 break;
7352 case 2:
7353 case 3:
6ce2faf4 7354 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
426f5abc
PB
7355 break;
7356 default:
7357 abort();
7358 }
03d05e2d 7359
426f5abc 7360 if (size == 3) {
39d5492a 7361 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d
PM
7362 TCGv_i32 tmp3 = tcg_temp_new_i32();
7363
2c9adbda 7364 tcg_gen_addi_i32(tmp2, addr, 4);
6ce2faf4 7365 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7d1b0095 7366 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7367 tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
7368 store_reg(s, rt2, tmp3);
7369 } else {
7370 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7371 }
03d05e2d
PM
7372
7373 store_reg(s, rt, tmp);
7374 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7375}
7376
7377static void gen_clrex(DisasContext *s)
7378{
03d05e2d 7379 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7380}
7381
7382#ifdef CONFIG_USER_ONLY
7383static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7384 TCGv_i32 addr, int size)
426f5abc 7385{
03d05e2d 7386 tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
426f5abc
PB
7387 tcg_gen_movi_i32(cpu_exclusive_info,
7388 size | (rd << 4) | (rt << 8) | (rt2 << 12));
d4a2dc67 7389 gen_exception_internal_insn(s, 4, EXCP_STREX);
426f5abc
PB
7390}
7391#else
7392static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7393 TCGv_i32 addr, int size)
426f5abc 7394{
39d5492a 7395 TCGv_i32 tmp;
03d05e2d 7396 TCGv_i64 val64, extaddr;
426f5abc
PB
7397 int done_label;
7398 int fail_label;
7399
7400 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7401 [addr] = {Rt};
7402 {Rd} = 0;
7403 } else {
7404 {Rd} = 1;
7405 } */
7406 fail_label = gen_new_label();
7407 done_label = gen_new_label();
03d05e2d
PM
7408 extaddr = tcg_temp_new_i64();
7409 tcg_gen_extu_i32_i64(extaddr, addr);
7410 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7411 tcg_temp_free_i64(extaddr);
7412
94ee24e7 7413 tmp = tcg_temp_new_i32();
426f5abc
PB
7414 switch (size) {
7415 case 0:
6ce2faf4 7416 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
426f5abc
PB
7417 break;
7418 case 1:
6ce2faf4 7419 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
426f5abc
PB
7420 break;
7421 case 2:
7422 case 3:
6ce2faf4 7423 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
426f5abc
PB
7424 break;
7425 default:
7426 abort();
7427 }
03d05e2d
PM
7428
7429 val64 = tcg_temp_new_i64();
426f5abc 7430 if (size == 3) {
39d5492a 7431 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d 7432 TCGv_i32 tmp3 = tcg_temp_new_i32();
426f5abc 7433 tcg_gen_addi_i32(tmp2, addr, 4);
6ce2faf4 7434 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7d1b0095 7435 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7436 tcg_gen_concat_i32_i64(val64, tmp, tmp3);
7437 tcg_temp_free_i32(tmp3);
7438 } else {
7439 tcg_gen_extu_i32_i64(val64, tmp);
426f5abc 7440 }
03d05e2d
PM
7441 tcg_temp_free_i32(tmp);
7442
7443 tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
7444 tcg_temp_free_i64(val64);
7445
426f5abc
PB
7446 tmp = load_reg(s, rt);
7447 switch (size) {
7448 case 0:
6ce2faf4 7449 gen_aa32_st8(tmp, addr, get_mem_index(s));
426f5abc
PB
7450 break;
7451 case 1:
6ce2faf4 7452 gen_aa32_st16(tmp, addr, get_mem_index(s));
426f5abc
PB
7453 break;
7454 case 2:
7455 case 3:
6ce2faf4 7456 gen_aa32_st32(tmp, addr, get_mem_index(s));
426f5abc
PB
7457 break;
7458 default:
7459 abort();
7460 }
94ee24e7 7461 tcg_temp_free_i32(tmp);
426f5abc
PB
7462 if (size == 3) {
7463 tcg_gen_addi_i32(addr, addr, 4);
7464 tmp = load_reg(s, rt2);
6ce2faf4 7465 gen_aa32_st32(tmp, addr, get_mem_index(s));
94ee24e7 7466 tcg_temp_free_i32(tmp);
426f5abc
PB
7467 }
7468 tcg_gen_movi_i32(cpu_R[rd], 0);
7469 tcg_gen_br(done_label);
7470 gen_set_label(fail_label);
7471 tcg_gen_movi_i32(cpu_R[rd], 1);
7472 gen_set_label(done_label);
03d05e2d 7473 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7474}
7475#endif
7476
81465888
PM
7477/* gen_srs:
7478 * @env: CPUARMState
7479 * @s: DisasContext
7480 * @mode: mode field from insn (which stack to store to)
7481 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7482 * @writeback: true if writeback bit set
7483 *
7484 * Generate code for the SRS (Store Return State) insn.
7485 */
7486static void gen_srs(DisasContext *s,
7487 uint32_t mode, uint32_t amode, bool writeback)
7488{
7489 int32_t offset;
7490 TCGv_i32 addr = tcg_temp_new_i32();
7491 TCGv_i32 tmp = tcg_const_i32(mode);
7492 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7493 tcg_temp_free_i32(tmp);
7494 switch (amode) {
7495 case 0: /* DA */
7496 offset = -4;
7497 break;
7498 case 1: /* IA */
7499 offset = 0;
7500 break;
7501 case 2: /* DB */
7502 offset = -8;
7503 break;
7504 case 3: /* IB */
7505 offset = 4;
7506 break;
7507 default:
7508 abort();
7509 }
7510 tcg_gen_addi_i32(addr, addr, offset);
7511 tmp = load_reg(s, 14);
c1197795 7512 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 7513 tcg_temp_free_i32(tmp);
81465888
PM
7514 tmp = load_cpu_field(spsr);
7515 tcg_gen_addi_i32(addr, addr, 4);
c1197795 7516 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 7517 tcg_temp_free_i32(tmp);
81465888
PM
7518 if (writeback) {
7519 switch (amode) {
7520 case 0:
7521 offset = -8;
7522 break;
7523 case 1:
7524 offset = 4;
7525 break;
7526 case 2:
7527 offset = -4;
7528 break;
7529 case 3:
7530 offset = 0;
7531 break;
7532 default:
7533 abort();
7534 }
7535 tcg_gen_addi_i32(addr, addr, offset);
7536 tmp = tcg_const_i32(mode);
7537 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7538 tcg_temp_free_i32(tmp);
7539 }
7540 tcg_temp_free_i32(addr);
7541}
7542
0ecb72a5 7543static void disas_arm_insn(CPUARMState * env, DisasContext *s)
9ee6e8bb
PB
7544{
7545 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
7546 TCGv_i32 tmp;
7547 TCGv_i32 tmp2;
7548 TCGv_i32 tmp3;
7549 TCGv_i32 addr;
a7812ae4 7550 TCGv_i64 tmp64;
9ee6e8bb 7551
d31dd73e 7552 insn = arm_ldl_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
7553 s->pc += 4;
7554
7555 /* M variants do not implement ARM mode. */
7556 if (IS_M(env))
7557 goto illegal_op;
7558 cond = insn >> 28;
7559 if (cond == 0xf){
be5e7a76
DES
7560 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7561 * choose to UNDEF. In ARMv5 and above the space is used
7562 * for miscellaneous unconditional instructions.
7563 */
7564 ARCH(5);
7565
9ee6e8bb
PB
7566 /* Unconditional instructions. */
7567 if (((insn >> 25) & 7) == 1) {
7568 /* NEON Data processing. */
7569 if (!arm_feature(env, ARM_FEATURE_NEON))
7570 goto illegal_op;
7571
7572 if (disas_neon_data_insn(env, s, insn))
7573 goto illegal_op;
7574 return;
7575 }
7576 if ((insn & 0x0f100000) == 0x04000000) {
7577 /* NEON load/store. */
7578 if (!arm_feature(env, ARM_FEATURE_NEON))
7579 goto illegal_op;
7580
7581 if (disas_neon_ls_insn(env, s, insn))
7582 goto illegal_op;
7583 return;
7584 }
6a57f3eb
WN
7585 if ((insn & 0x0f000e10) == 0x0e000a00) {
7586 /* VFP. */
7587 if (disas_vfp_insn(env, s, insn)) {
7588 goto illegal_op;
7589 }
7590 return;
7591 }
3d185e5d
PM
7592 if (((insn & 0x0f30f000) == 0x0510f000) ||
7593 ((insn & 0x0f30f010) == 0x0710f000)) {
7594 if ((insn & (1 << 22)) == 0) {
7595 /* PLDW; v7MP */
7596 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
7597 goto illegal_op;
7598 }
7599 }
7600 /* Otherwise PLD; v5TE+ */
be5e7a76 7601 ARCH(5TE);
3d185e5d
PM
7602 return;
7603 }
7604 if (((insn & 0x0f70f000) == 0x0450f000) ||
7605 ((insn & 0x0f70f010) == 0x0650f000)) {
7606 ARCH(7);
7607 return; /* PLI; V7 */
7608 }
7609 if (((insn & 0x0f700000) == 0x04100000) ||
7610 ((insn & 0x0f700010) == 0x06100000)) {
7611 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
7612 goto illegal_op;
7613 }
7614 return; /* v7MP: Unallocated memory hint: must NOP */
7615 }
7616
7617 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
7618 ARCH(6);
7619 /* setend */
10962fd5
PM
7620 if (((insn >> 9) & 1) != s->bswap_code) {
7621 /* Dynamic endianness switching not implemented. */
e0c270d9 7622 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
9ee6e8bb
PB
7623 goto illegal_op;
7624 }
7625 return;
7626 } else if ((insn & 0x0fffff00) == 0x057ff000) {
7627 switch ((insn >> 4) & 0xf) {
7628 case 1: /* clrex */
7629 ARCH(6K);
426f5abc 7630 gen_clrex(s);
9ee6e8bb
PB
7631 return;
7632 case 4: /* dsb */
7633 case 5: /* dmb */
7634 case 6: /* isb */
7635 ARCH(7);
7636 /* We don't emulate caches so these are a no-op. */
7637 return;
7638 default:
7639 goto illegal_op;
7640 }
7641 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
7642 /* srs */
81465888 7643 if (IS_USER(s)) {
9ee6e8bb 7644 goto illegal_op;
9ee6e8bb 7645 }
81465888
PM
7646 ARCH(6);
7647 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 7648 return;
ea825eee 7649 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 7650 /* rfe */
c67b6b71 7651 int32_t offset;
9ee6e8bb
PB
7652 if (IS_USER(s))
7653 goto illegal_op;
7654 ARCH(6);
7655 rn = (insn >> 16) & 0xf;
b0109805 7656 addr = load_reg(s, rn);
9ee6e8bb
PB
7657 i = (insn >> 23) & 3;
7658 switch (i) {
b0109805 7659 case 0: offset = -4; break; /* DA */
c67b6b71
FN
7660 case 1: offset = 0; break; /* IA */
7661 case 2: offset = -8; break; /* DB */
b0109805 7662 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
7663 default: abort();
7664 }
7665 if (offset)
b0109805
PB
7666 tcg_gen_addi_i32(addr, addr, offset);
7667 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 7668 tmp = tcg_temp_new_i32();
6ce2faf4 7669 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 7670 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 7671 tmp2 = tcg_temp_new_i32();
6ce2faf4 7672 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
7673 if (insn & (1 << 21)) {
7674 /* Base writeback. */
7675 switch (i) {
b0109805 7676 case 0: offset = -8; break;
c67b6b71
FN
7677 case 1: offset = 4; break;
7678 case 2: offset = -4; break;
b0109805 7679 case 3: offset = 0; break;
9ee6e8bb
PB
7680 default: abort();
7681 }
7682 if (offset)
b0109805
PB
7683 tcg_gen_addi_i32(addr, addr, offset);
7684 store_reg(s, rn, addr);
7685 } else {
7d1b0095 7686 tcg_temp_free_i32(addr);
9ee6e8bb 7687 }
b0109805 7688 gen_rfe(s, tmp, tmp2);
c67b6b71 7689 return;
9ee6e8bb
PB
7690 } else if ((insn & 0x0e000000) == 0x0a000000) {
7691 /* branch link and change to thumb (blx <offset>) */
7692 int32_t offset;
7693
7694 val = (uint32_t)s->pc;
7d1b0095 7695 tmp = tcg_temp_new_i32();
d9ba4830
PB
7696 tcg_gen_movi_i32(tmp, val);
7697 store_reg(s, 14, tmp);
9ee6e8bb
PB
7698 /* Sign-extend the 24-bit offset */
7699 offset = (((int32_t)insn) << 8) >> 8;
7700 /* offset * 4 + bit24 * 2 + (thumb bit) */
7701 val += (offset << 2) | ((insn >> 23) & 2) | 1;
7702 /* pipeline offset */
7703 val += 4;
be5e7a76 7704 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 7705 gen_bx_im(s, val);
9ee6e8bb
PB
7706 return;
7707 } else if ((insn & 0x0e000f00) == 0x0c000100) {
7708 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
7709 /* iWMMXt register transfer. */
c0f4af17
PM
7710 if (extract32(s->c15_cpar, 1, 1)) {
7711 if (!disas_iwmmxt_insn(env, s, insn)) {
9ee6e8bb 7712 return;
c0f4af17
PM
7713 }
7714 }
9ee6e8bb
PB
7715 }
7716 } else if ((insn & 0x0fe00000) == 0x0c400000) {
7717 /* Coprocessor double register transfer. */
be5e7a76 7718 ARCH(5TE);
9ee6e8bb
PB
7719 } else if ((insn & 0x0f000010) == 0x0e000010) {
7720 /* Additional coprocessor register transfer. */
7997d92f 7721 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
7722 uint32_t mask;
7723 uint32_t val;
7724 /* cps (privileged) */
7725 if (IS_USER(s))
7726 return;
7727 mask = val = 0;
7728 if (insn & (1 << 19)) {
7729 if (insn & (1 << 8))
7730 mask |= CPSR_A;
7731 if (insn & (1 << 7))
7732 mask |= CPSR_I;
7733 if (insn & (1 << 6))
7734 mask |= CPSR_F;
7735 if (insn & (1 << 18))
7736 val |= mask;
7737 }
7997d92f 7738 if (insn & (1 << 17)) {
9ee6e8bb
PB
7739 mask |= CPSR_M;
7740 val |= (insn & 0x1f);
7741 }
7742 if (mask) {
2fbac54b 7743 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
7744 }
7745 return;
7746 }
7747 goto illegal_op;
7748 }
7749 if (cond != 0xe) {
7750 /* if not always execute, we generate a conditional jump to
7751 next instruction */
7752 s->condlabel = gen_new_label();
39fb730a 7753 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
7754 s->condjmp = 1;
7755 }
7756 if ((insn & 0x0f900000) == 0x03000000) {
7757 if ((insn & (1 << 21)) == 0) {
7758 ARCH(6T2);
7759 rd = (insn >> 12) & 0xf;
7760 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7761 if ((insn & (1 << 22)) == 0) {
7762 /* MOVW */
7d1b0095 7763 tmp = tcg_temp_new_i32();
5e3f878a 7764 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
7765 } else {
7766 /* MOVT */
5e3f878a 7767 tmp = load_reg(s, rd);
86831435 7768 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7769 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 7770 }
5e3f878a 7771 store_reg(s, rd, tmp);
9ee6e8bb
PB
7772 } else {
7773 if (((insn >> 12) & 0xf) != 0xf)
7774 goto illegal_op;
7775 if (((insn >> 16) & 0xf) == 0) {
7776 gen_nop_hint(s, insn & 0xff);
7777 } else {
7778 /* CPSR = immediate */
7779 val = insn & 0xff;
7780 shift = ((insn >> 8) & 0xf) * 2;
7781 if (shift)
7782 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 7783 i = ((insn & (1 << 22)) != 0);
2fbac54b 7784 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
7785 goto illegal_op;
7786 }
7787 }
7788 } else if ((insn & 0x0f900000) == 0x01000000
7789 && (insn & 0x00000090) != 0x00000090) {
7790 /* miscellaneous instructions */
7791 op1 = (insn >> 21) & 3;
7792 sh = (insn >> 4) & 0xf;
7793 rm = insn & 0xf;
7794 switch (sh) {
7795 case 0x0: /* move program status register */
7796 if (op1 & 1) {
7797 /* PSR = reg */
2fbac54b 7798 tmp = load_reg(s, rm);
9ee6e8bb 7799 i = ((op1 & 2) != 0);
2fbac54b 7800 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
7801 goto illegal_op;
7802 } else {
7803 /* reg = PSR */
7804 rd = (insn >> 12) & 0xf;
7805 if (op1 & 2) {
7806 if (IS_USER(s))
7807 goto illegal_op;
d9ba4830 7808 tmp = load_cpu_field(spsr);
9ee6e8bb 7809 } else {
7d1b0095 7810 tmp = tcg_temp_new_i32();
9ef39277 7811 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 7812 }
d9ba4830 7813 store_reg(s, rd, tmp);
9ee6e8bb
PB
7814 }
7815 break;
7816 case 0x1:
7817 if (op1 == 1) {
7818 /* branch/exchange thumb (bx). */
be5e7a76 7819 ARCH(4T);
d9ba4830
PB
7820 tmp = load_reg(s, rm);
7821 gen_bx(s, tmp);
9ee6e8bb
PB
7822 } else if (op1 == 3) {
7823 /* clz */
be5e7a76 7824 ARCH(5);
9ee6e8bb 7825 rd = (insn >> 12) & 0xf;
1497c961
PB
7826 tmp = load_reg(s, rm);
7827 gen_helper_clz(tmp, tmp);
7828 store_reg(s, rd, tmp);
9ee6e8bb
PB
7829 } else {
7830 goto illegal_op;
7831 }
7832 break;
7833 case 0x2:
7834 if (op1 == 1) {
7835 ARCH(5J); /* bxj */
7836 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7837 tmp = load_reg(s, rm);
7838 gen_bx(s, tmp);
9ee6e8bb
PB
7839 } else {
7840 goto illegal_op;
7841 }
7842 break;
7843 case 0x3:
7844 if (op1 != 1)
7845 goto illegal_op;
7846
be5e7a76 7847 ARCH(5);
9ee6e8bb 7848 /* branch link/exchange thumb (blx) */
d9ba4830 7849 tmp = load_reg(s, rm);
7d1b0095 7850 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
7851 tcg_gen_movi_i32(tmp2, s->pc);
7852 store_reg(s, 14, tmp2);
7853 gen_bx(s, tmp);
9ee6e8bb 7854 break;
eb0ecd5a
WN
7855 case 0x4:
7856 {
7857 /* crc32/crc32c */
7858 uint32_t c = extract32(insn, 8, 4);
7859
7860 /* Check this CPU supports ARMv8 CRC instructions.
7861 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
7862 * Bits 8, 10 and 11 should be zero.
7863 */
7864 if (!arm_feature(env, ARM_FEATURE_CRC) || op1 == 0x3 ||
7865 (c & 0xd) != 0) {
7866 goto illegal_op;
7867 }
7868
7869 rn = extract32(insn, 16, 4);
7870 rd = extract32(insn, 12, 4);
7871
7872 tmp = load_reg(s, rn);
7873 tmp2 = load_reg(s, rm);
aa633469
PM
7874 if (op1 == 0) {
7875 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
7876 } else if (op1 == 1) {
7877 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
7878 }
eb0ecd5a
WN
7879 tmp3 = tcg_const_i32(1 << op1);
7880 if (c & 0x2) {
7881 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
7882 } else {
7883 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
7884 }
7885 tcg_temp_free_i32(tmp2);
7886 tcg_temp_free_i32(tmp3);
7887 store_reg(s, rd, tmp);
7888 break;
7889 }
9ee6e8bb 7890 case 0x5: /* saturating add/subtract */
be5e7a76 7891 ARCH(5TE);
9ee6e8bb
PB
7892 rd = (insn >> 12) & 0xf;
7893 rn = (insn >> 16) & 0xf;
b40d0353 7894 tmp = load_reg(s, rm);
5e3f878a 7895 tmp2 = load_reg(s, rn);
9ee6e8bb 7896 if (op1 & 2)
9ef39277 7897 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 7898 if (op1 & 1)
9ef39277 7899 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 7900 else
9ef39277 7901 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 7902 tcg_temp_free_i32(tmp2);
5e3f878a 7903 store_reg(s, rd, tmp);
9ee6e8bb 7904 break;
49e14940 7905 case 7:
d4a2dc67
PM
7906 {
7907 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e
PM
7908 switch (op1) {
7909 case 1:
7910 /* bkpt */
7911 ARCH(5);
7912 gen_exception_insn(s, 4, EXCP_BKPT,
7913 syn_aa32_bkpt(imm16, false));
7914 break;
7915 case 2:
7916 /* Hypervisor call (v7) */
7917 ARCH(7);
7918 if (IS_USER(s)) {
7919 goto illegal_op;
7920 }
7921 gen_hvc(s, imm16);
7922 break;
7923 case 3:
7924 /* Secure monitor call (v6+) */
7925 ARCH(6K);
7926 if (IS_USER(s)) {
7927 goto illegal_op;
7928 }
7929 gen_smc(s);
7930 break;
7931 default:
49e14940
AL
7932 goto illegal_op;
7933 }
9ee6e8bb 7934 break;
d4a2dc67 7935 }
9ee6e8bb
PB
7936 case 0x8: /* signed multiply */
7937 case 0xa:
7938 case 0xc:
7939 case 0xe:
be5e7a76 7940 ARCH(5TE);
9ee6e8bb
PB
7941 rs = (insn >> 8) & 0xf;
7942 rn = (insn >> 12) & 0xf;
7943 rd = (insn >> 16) & 0xf;
7944 if (op1 == 1) {
7945 /* (32 * 16) >> 16 */
5e3f878a
PB
7946 tmp = load_reg(s, rm);
7947 tmp2 = load_reg(s, rs);
9ee6e8bb 7948 if (sh & 4)
5e3f878a 7949 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7950 else
5e3f878a 7951 gen_sxth(tmp2);
a7812ae4
PB
7952 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7953 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 7954 tmp = tcg_temp_new_i32();
a7812ae4 7955 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7956 tcg_temp_free_i64(tmp64);
9ee6e8bb 7957 if ((sh & 2) == 0) {
5e3f878a 7958 tmp2 = load_reg(s, rn);
9ef39277 7959 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7960 tcg_temp_free_i32(tmp2);
9ee6e8bb 7961 }
5e3f878a 7962 store_reg(s, rd, tmp);
9ee6e8bb
PB
7963 } else {
7964 /* 16 * 16 */
5e3f878a
PB
7965 tmp = load_reg(s, rm);
7966 tmp2 = load_reg(s, rs);
7967 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 7968 tcg_temp_free_i32(tmp2);
9ee6e8bb 7969 if (op1 == 2) {
a7812ae4
PB
7970 tmp64 = tcg_temp_new_i64();
7971 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7972 tcg_temp_free_i32(tmp);
a7812ae4
PB
7973 gen_addq(s, tmp64, rn, rd);
7974 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7975 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7976 } else {
7977 if (op1 == 0) {
5e3f878a 7978 tmp2 = load_reg(s, rn);
9ef39277 7979 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7980 tcg_temp_free_i32(tmp2);
9ee6e8bb 7981 }
5e3f878a 7982 store_reg(s, rd, tmp);
9ee6e8bb
PB
7983 }
7984 }
7985 break;
7986 default:
7987 goto illegal_op;
7988 }
7989 } else if (((insn & 0x0e000000) == 0 &&
7990 (insn & 0x00000090) != 0x90) ||
7991 ((insn & 0x0e000000) == (1 << 25))) {
7992 int set_cc, logic_cc, shiftop;
7993
7994 op1 = (insn >> 21) & 0xf;
7995 set_cc = (insn >> 20) & 1;
7996 logic_cc = table_logic_cc[op1] & set_cc;
7997
7998 /* data processing instruction */
7999 if (insn & (1 << 25)) {
8000 /* immediate operand */
8001 val = insn & 0xff;
8002 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 8003 if (shift) {
9ee6e8bb 8004 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 8005 }
7d1b0095 8006 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
8007 tcg_gen_movi_i32(tmp2, val);
8008 if (logic_cc && shift) {
8009 gen_set_CF_bit31(tmp2);
8010 }
9ee6e8bb
PB
8011 } else {
8012 /* register */
8013 rm = (insn) & 0xf;
e9bb4aa9 8014 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8015 shiftop = (insn >> 5) & 3;
8016 if (!(insn & (1 << 4))) {
8017 shift = (insn >> 7) & 0x1f;
e9bb4aa9 8018 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
8019 } else {
8020 rs = (insn >> 8) & 0xf;
8984bd2e 8021 tmp = load_reg(s, rs);
e9bb4aa9 8022 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
8023 }
8024 }
8025 if (op1 != 0x0f && op1 != 0x0d) {
8026 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
8027 tmp = load_reg(s, rn);
8028 } else {
39d5492a 8029 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
8030 }
8031 rd = (insn >> 12) & 0xf;
8032 switch(op1) {
8033 case 0x00:
e9bb4aa9
JR
8034 tcg_gen_and_i32(tmp, tmp, tmp2);
8035 if (logic_cc) {
8036 gen_logic_CC(tmp);
8037 }
21aeb343 8038 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8039 break;
8040 case 0x01:
e9bb4aa9
JR
8041 tcg_gen_xor_i32(tmp, tmp, tmp2);
8042 if (logic_cc) {
8043 gen_logic_CC(tmp);
8044 }
21aeb343 8045 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8046 break;
8047 case 0x02:
8048 if (set_cc && rd == 15) {
8049 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 8050 if (IS_USER(s)) {
9ee6e8bb 8051 goto illegal_op;
e9bb4aa9 8052 }
72485ec4 8053 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 8054 gen_exception_return(s, tmp);
9ee6e8bb 8055 } else {
e9bb4aa9 8056 if (set_cc) {
72485ec4 8057 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8058 } else {
8059 tcg_gen_sub_i32(tmp, tmp, tmp2);
8060 }
21aeb343 8061 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8062 }
8063 break;
8064 case 0x03:
e9bb4aa9 8065 if (set_cc) {
72485ec4 8066 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8067 } else {
8068 tcg_gen_sub_i32(tmp, tmp2, tmp);
8069 }
21aeb343 8070 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8071 break;
8072 case 0x04:
e9bb4aa9 8073 if (set_cc) {
72485ec4 8074 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8075 } else {
8076 tcg_gen_add_i32(tmp, tmp, tmp2);
8077 }
21aeb343 8078 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8079 break;
8080 case 0x05:
e9bb4aa9 8081 if (set_cc) {
49b4c31e 8082 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8083 } else {
8084 gen_add_carry(tmp, tmp, tmp2);
8085 }
21aeb343 8086 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8087 break;
8088 case 0x06:
e9bb4aa9 8089 if (set_cc) {
2de68a49 8090 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8091 } else {
8092 gen_sub_carry(tmp, tmp, tmp2);
8093 }
21aeb343 8094 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8095 break;
8096 case 0x07:
e9bb4aa9 8097 if (set_cc) {
2de68a49 8098 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8099 } else {
8100 gen_sub_carry(tmp, tmp2, tmp);
8101 }
21aeb343 8102 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8103 break;
8104 case 0x08:
8105 if (set_cc) {
e9bb4aa9
JR
8106 tcg_gen_and_i32(tmp, tmp, tmp2);
8107 gen_logic_CC(tmp);
9ee6e8bb 8108 }
7d1b0095 8109 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8110 break;
8111 case 0x09:
8112 if (set_cc) {
e9bb4aa9
JR
8113 tcg_gen_xor_i32(tmp, tmp, tmp2);
8114 gen_logic_CC(tmp);
9ee6e8bb 8115 }
7d1b0095 8116 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8117 break;
8118 case 0x0a:
8119 if (set_cc) {
72485ec4 8120 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 8121 }
7d1b0095 8122 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8123 break;
8124 case 0x0b:
8125 if (set_cc) {
72485ec4 8126 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 8127 }
7d1b0095 8128 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8129 break;
8130 case 0x0c:
e9bb4aa9
JR
8131 tcg_gen_or_i32(tmp, tmp, tmp2);
8132 if (logic_cc) {
8133 gen_logic_CC(tmp);
8134 }
21aeb343 8135 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8136 break;
8137 case 0x0d:
8138 if (logic_cc && rd == 15) {
8139 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 8140 if (IS_USER(s)) {
9ee6e8bb 8141 goto illegal_op;
e9bb4aa9
JR
8142 }
8143 gen_exception_return(s, tmp2);
9ee6e8bb 8144 } else {
e9bb4aa9
JR
8145 if (logic_cc) {
8146 gen_logic_CC(tmp2);
8147 }
21aeb343 8148 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
8149 }
8150 break;
8151 case 0x0e:
f669df27 8152 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
8153 if (logic_cc) {
8154 gen_logic_CC(tmp);
8155 }
21aeb343 8156 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8157 break;
8158 default:
8159 case 0x0f:
e9bb4aa9
JR
8160 tcg_gen_not_i32(tmp2, tmp2);
8161 if (logic_cc) {
8162 gen_logic_CC(tmp2);
8163 }
21aeb343 8164 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
8165 break;
8166 }
e9bb4aa9 8167 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 8168 tcg_temp_free_i32(tmp2);
e9bb4aa9 8169 }
9ee6e8bb
PB
8170 } else {
8171 /* other instructions */
8172 op1 = (insn >> 24) & 0xf;
8173 switch(op1) {
8174 case 0x0:
8175 case 0x1:
8176 /* multiplies, extra load/stores */
8177 sh = (insn >> 5) & 3;
8178 if (sh == 0) {
8179 if (op1 == 0x0) {
8180 rd = (insn >> 16) & 0xf;
8181 rn = (insn >> 12) & 0xf;
8182 rs = (insn >> 8) & 0xf;
8183 rm = (insn) & 0xf;
8184 op1 = (insn >> 20) & 0xf;
8185 switch (op1) {
8186 case 0: case 1: case 2: case 3: case 6:
8187 /* 32 bit mul */
5e3f878a
PB
8188 tmp = load_reg(s, rs);
8189 tmp2 = load_reg(s, rm);
8190 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8191 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8192 if (insn & (1 << 22)) {
8193 /* Subtract (mls) */
8194 ARCH(6T2);
5e3f878a
PB
8195 tmp2 = load_reg(s, rn);
8196 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 8197 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8198 } else if (insn & (1 << 21)) {
8199 /* Add */
5e3f878a
PB
8200 tmp2 = load_reg(s, rn);
8201 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8202 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8203 }
8204 if (insn & (1 << 20))
5e3f878a
PB
8205 gen_logic_CC(tmp);
8206 store_reg(s, rd, tmp);
9ee6e8bb 8207 break;
8aac08b1
AJ
8208 case 4:
8209 /* 64 bit mul double accumulate (UMAAL) */
8210 ARCH(6);
8211 tmp = load_reg(s, rs);
8212 tmp2 = load_reg(s, rm);
8213 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8214 gen_addq_lo(s, tmp64, rn);
8215 gen_addq_lo(s, tmp64, rd);
8216 gen_storeq_reg(s, rn, rd, tmp64);
8217 tcg_temp_free_i64(tmp64);
8218 break;
8219 case 8: case 9: case 10: case 11:
8220 case 12: case 13: case 14: case 15:
8221 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
8222 tmp = load_reg(s, rs);
8223 tmp2 = load_reg(s, rm);
8aac08b1 8224 if (insn & (1 << 22)) {
c9f10124 8225 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 8226 } else {
c9f10124 8227 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
8228 }
8229 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
8230 TCGv_i32 al = load_reg(s, rn);
8231 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8232 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8233 tcg_temp_free_i32(al);
8234 tcg_temp_free_i32(ah);
9ee6e8bb 8235 }
8aac08b1 8236 if (insn & (1 << 20)) {
c9f10124 8237 gen_logicq_cc(tmp, tmp2);
8aac08b1 8238 }
c9f10124
RH
8239 store_reg(s, rn, tmp);
8240 store_reg(s, rd, tmp2);
9ee6e8bb 8241 break;
8aac08b1
AJ
8242 default:
8243 goto illegal_op;
9ee6e8bb
PB
8244 }
8245 } else {
8246 rn = (insn >> 16) & 0xf;
8247 rd = (insn >> 12) & 0xf;
8248 if (insn & (1 << 23)) {
8249 /* load/store exclusive */
2359bf80 8250 int op2 = (insn >> 8) & 3;
86753403 8251 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8252
8253 switch (op2) {
8254 case 0: /* lda/stl */
8255 if (op1 == 1) {
8256 goto illegal_op;
8257 }
8258 ARCH(8);
8259 break;
8260 case 1: /* reserved */
8261 goto illegal_op;
8262 case 2: /* ldaex/stlex */
8263 ARCH(8);
8264 break;
8265 case 3: /* ldrex/strex */
8266 if (op1) {
8267 ARCH(6K);
8268 } else {
8269 ARCH(6);
8270 }
8271 break;
8272 }
8273
3174f8e9 8274 addr = tcg_temp_local_new_i32();
98a46317 8275 load_reg_var(s, addr, rn);
2359bf80
MR
8276
8277 /* Since the emulation does not have barriers,
8278 the acquire/release semantics need no special
8279 handling */
8280 if (op2 == 0) {
8281 if (insn & (1 << 20)) {
8282 tmp = tcg_temp_new_i32();
8283 switch (op1) {
8284 case 0: /* lda */
6ce2faf4 8285 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
2359bf80
MR
8286 break;
8287 case 2: /* ldab */
6ce2faf4 8288 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
2359bf80
MR
8289 break;
8290 case 3: /* ldah */
6ce2faf4 8291 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
2359bf80
MR
8292 break;
8293 default:
8294 abort();
8295 }
8296 store_reg(s, rd, tmp);
8297 } else {
8298 rm = insn & 0xf;
8299 tmp = load_reg(s, rm);
8300 switch (op1) {
8301 case 0: /* stl */
6ce2faf4 8302 gen_aa32_st32(tmp, addr, get_mem_index(s));
2359bf80
MR
8303 break;
8304 case 2: /* stlb */
6ce2faf4 8305 gen_aa32_st8(tmp, addr, get_mem_index(s));
2359bf80
MR
8306 break;
8307 case 3: /* stlh */
6ce2faf4 8308 gen_aa32_st16(tmp, addr, get_mem_index(s));
2359bf80
MR
8309 break;
8310 default:
8311 abort();
8312 }
8313 tcg_temp_free_i32(tmp);
8314 }
8315 } else if (insn & (1 << 20)) {
86753403
PB
8316 switch (op1) {
8317 case 0: /* ldrex */
426f5abc 8318 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8319 break;
8320 case 1: /* ldrexd */
426f5abc 8321 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8322 break;
8323 case 2: /* ldrexb */
426f5abc 8324 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8325 break;
8326 case 3: /* ldrexh */
426f5abc 8327 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8328 break;
8329 default:
8330 abort();
8331 }
9ee6e8bb
PB
8332 } else {
8333 rm = insn & 0xf;
86753403
PB
8334 switch (op1) {
8335 case 0: /* strex */
426f5abc 8336 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8337 break;
8338 case 1: /* strexd */
502e64fe 8339 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
8340 break;
8341 case 2: /* strexb */
426f5abc 8342 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
8343 break;
8344 case 3: /* strexh */
426f5abc 8345 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
8346 break;
8347 default:
8348 abort();
8349 }
9ee6e8bb 8350 }
39d5492a 8351 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8352 } else {
8353 /* SWP instruction */
8354 rm = (insn) & 0xf;
8355
8984bd2e
PB
8356 /* ??? This is not really atomic. However we know
8357 we never have multiple CPUs running in parallel,
8358 so it is good enough. */
8359 addr = load_reg(s, rn);
8360 tmp = load_reg(s, rm);
5a839c0d 8361 tmp2 = tcg_temp_new_i32();
9ee6e8bb 8362 if (insn & (1 << 22)) {
6ce2faf4
EI
8363 gen_aa32_ld8u(tmp2, addr, get_mem_index(s));
8364 gen_aa32_st8(tmp, addr, get_mem_index(s));
9ee6e8bb 8365 } else {
6ce2faf4
EI
8366 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
8367 gen_aa32_st32(tmp, addr, get_mem_index(s));
9ee6e8bb 8368 }
5a839c0d 8369 tcg_temp_free_i32(tmp);
7d1b0095 8370 tcg_temp_free_i32(addr);
8984bd2e 8371 store_reg(s, rd, tmp2);
9ee6e8bb
PB
8372 }
8373 }
8374 } else {
8375 int address_offset;
8376 int load;
8377 /* Misc load/store */
8378 rn = (insn >> 16) & 0xf;
8379 rd = (insn >> 12) & 0xf;
b0109805 8380 addr = load_reg(s, rn);
9ee6e8bb 8381 if (insn & (1 << 24))
b0109805 8382 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
8383 address_offset = 0;
8384 if (insn & (1 << 20)) {
8385 /* load */
5a839c0d 8386 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
8387 switch(sh) {
8388 case 1:
6ce2faf4 8389 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
8390 break;
8391 case 2:
6ce2faf4 8392 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
8393 break;
8394 default:
8395 case 3:
6ce2faf4 8396 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
8397 break;
8398 }
8399 load = 1;
8400 } else if (sh & 2) {
be5e7a76 8401 ARCH(5TE);
9ee6e8bb
PB
8402 /* doubleword */
8403 if (sh & 1) {
8404 /* store */
b0109805 8405 tmp = load_reg(s, rd);
6ce2faf4 8406 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 8407 tcg_temp_free_i32(tmp);
b0109805
PB
8408 tcg_gen_addi_i32(addr, addr, 4);
8409 tmp = load_reg(s, rd + 1);
6ce2faf4 8410 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 8411 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8412 load = 0;
8413 } else {
8414 /* load */
5a839c0d 8415 tmp = tcg_temp_new_i32();
6ce2faf4 8416 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805
PB
8417 store_reg(s, rd, tmp);
8418 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8419 tmp = tcg_temp_new_i32();
6ce2faf4 8420 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
8421 rd++;
8422 load = 1;
8423 }
8424 address_offset = -4;
8425 } else {
8426 /* store */
b0109805 8427 tmp = load_reg(s, rd);
6ce2faf4 8428 gen_aa32_st16(tmp, addr, get_mem_index(s));
5a839c0d 8429 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8430 load = 0;
8431 }
8432 /* Perform base writeback before the loaded value to
8433 ensure correct behavior with overlapping index registers.
8434 ldrd with base writeback is is undefined if the
8435 destination and index registers overlap. */
8436 if (!(insn & (1 << 24))) {
b0109805
PB
8437 gen_add_datah_offset(s, insn, address_offset, addr);
8438 store_reg(s, rn, addr);
9ee6e8bb
PB
8439 } else if (insn & (1 << 21)) {
8440 if (address_offset)
b0109805
PB
8441 tcg_gen_addi_i32(addr, addr, address_offset);
8442 store_reg(s, rn, addr);
8443 } else {
7d1b0095 8444 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8445 }
8446 if (load) {
8447 /* Complete the load. */
b0109805 8448 store_reg(s, rd, tmp);
9ee6e8bb
PB
8449 }
8450 }
8451 break;
8452 case 0x4:
8453 case 0x5:
8454 goto do_ldst;
8455 case 0x6:
8456 case 0x7:
8457 if (insn & (1 << 4)) {
8458 ARCH(6);
8459 /* Armv6 Media instructions. */
8460 rm = insn & 0xf;
8461 rn = (insn >> 16) & 0xf;
2c0262af 8462 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
8463 rs = (insn >> 8) & 0xf;
8464 switch ((insn >> 23) & 3) {
8465 case 0: /* Parallel add/subtract. */
8466 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
8467 tmp = load_reg(s, rn);
8468 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8469 sh = (insn >> 5) & 7;
8470 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8471 goto illegal_op;
6ddbc6e4 8472 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 8473 tcg_temp_free_i32(tmp2);
6ddbc6e4 8474 store_reg(s, rd, tmp);
9ee6e8bb
PB
8475 break;
8476 case 1:
8477 if ((insn & 0x00700020) == 0) {
6c95676b 8478 /* Halfword pack. */
3670669c
PB
8479 tmp = load_reg(s, rn);
8480 tmp2 = load_reg(s, rm);
9ee6e8bb 8481 shift = (insn >> 7) & 0x1f;
3670669c
PB
8482 if (insn & (1 << 6)) {
8483 /* pkhtb */
22478e79
AZ
8484 if (shift == 0)
8485 shift = 31;
8486 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 8487 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 8488 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
8489 } else {
8490 /* pkhbt */
22478e79
AZ
8491 if (shift)
8492 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 8493 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
8494 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8495 }
8496 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8497 tcg_temp_free_i32(tmp2);
3670669c 8498 store_reg(s, rd, tmp);
9ee6e8bb
PB
8499 } else if ((insn & 0x00200020) == 0x00200000) {
8500 /* [us]sat */
6ddbc6e4 8501 tmp = load_reg(s, rm);
9ee6e8bb
PB
8502 shift = (insn >> 7) & 0x1f;
8503 if (insn & (1 << 6)) {
8504 if (shift == 0)
8505 shift = 31;
6ddbc6e4 8506 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8507 } else {
6ddbc6e4 8508 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
8509 }
8510 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8511 tmp2 = tcg_const_i32(sh);
8512 if (insn & (1 << 22))
9ef39277 8513 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 8514 else
9ef39277 8515 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 8516 tcg_temp_free_i32(tmp2);
6ddbc6e4 8517 store_reg(s, rd, tmp);
9ee6e8bb
PB
8518 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8519 /* [us]sat16 */
6ddbc6e4 8520 tmp = load_reg(s, rm);
9ee6e8bb 8521 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8522 tmp2 = tcg_const_i32(sh);
8523 if (insn & (1 << 22))
9ef39277 8524 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8525 else
9ef39277 8526 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8527 tcg_temp_free_i32(tmp2);
6ddbc6e4 8528 store_reg(s, rd, tmp);
9ee6e8bb
PB
8529 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8530 /* Select bytes. */
6ddbc6e4
PB
8531 tmp = load_reg(s, rn);
8532 tmp2 = load_reg(s, rm);
7d1b0095 8533 tmp3 = tcg_temp_new_i32();
0ecb72a5 8534 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 8535 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8536 tcg_temp_free_i32(tmp3);
8537 tcg_temp_free_i32(tmp2);
6ddbc6e4 8538 store_reg(s, rd, tmp);
9ee6e8bb 8539 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 8540 tmp = load_reg(s, rm);
9ee6e8bb 8541 shift = (insn >> 10) & 3;
1301f322 8542 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8543 rotate, a shift is sufficient. */
8544 if (shift != 0)
f669df27 8545 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8546 op1 = (insn >> 20) & 7;
8547 switch (op1) {
5e3f878a
PB
8548 case 0: gen_sxtb16(tmp); break;
8549 case 2: gen_sxtb(tmp); break;
8550 case 3: gen_sxth(tmp); break;
8551 case 4: gen_uxtb16(tmp); break;
8552 case 6: gen_uxtb(tmp); break;
8553 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
8554 default: goto illegal_op;
8555 }
8556 if (rn != 15) {
5e3f878a 8557 tmp2 = load_reg(s, rn);
9ee6e8bb 8558 if ((op1 & 3) == 0) {
5e3f878a 8559 gen_add16(tmp, tmp2);
9ee6e8bb 8560 } else {
5e3f878a 8561 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8562 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8563 }
8564 }
6c95676b 8565 store_reg(s, rd, tmp);
9ee6e8bb
PB
8566 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
8567 /* rev */
b0109805 8568 tmp = load_reg(s, rm);
9ee6e8bb
PB
8569 if (insn & (1 << 22)) {
8570 if (insn & (1 << 7)) {
b0109805 8571 gen_revsh(tmp);
9ee6e8bb
PB
8572 } else {
8573 ARCH(6T2);
b0109805 8574 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8575 }
8576 } else {
8577 if (insn & (1 << 7))
b0109805 8578 gen_rev16(tmp);
9ee6e8bb 8579 else
66896cb8 8580 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 8581 }
b0109805 8582 store_reg(s, rd, tmp);
9ee6e8bb
PB
8583 } else {
8584 goto illegal_op;
8585 }
8586 break;
8587 case 2: /* Multiplies (Type 3). */
41e9564d
PM
8588 switch ((insn >> 20) & 0x7) {
8589 case 5:
8590 if (((insn >> 6) ^ (insn >> 7)) & 1) {
8591 /* op2 not 00x or 11x : UNDEF */
8592 goto illegal_op;
8593 }
838fa72d
AJ
8594 /* Signed multiply most significant [accumulate].
8595 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
8596 tmp = load_reg(s, rm);
8597 tmp2 = load_reg(s, rs);
a7812ae4 8598 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 8599
955a7dd5 8600 if (rd != 15) {
838fa72d 8601 tmp = load_reg(s, rd);
9ee6e8bb 8602 if (insn & (1 << 6)) {
838fa72d 8603 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 8604 } else {
838fa72d 8605 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
8606 }
8607 }
838fa72d
AJ
8608 if (insn & (1 << 5)) {
8609 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8610 }
8611 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8612 tmp = tcg_temp_new_i32();
838fa72d
AJ
8613 tcg_gen_trunc_i64_i32(tmp, tmp64);
8614 tcg_temp_free_i64(tmp64);
955a7dd5 8615 store_reg(s, rn, tmp);
41e9564d
PM
8616 break;
8617 case 0:
8618 case 4:
8619 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8620 if (insn & (1 << 7)) {
8621 goto illegal_op;
8622 }
8623 tmp = load_reg(s, rm);
8624 tmp2 = load_reg(s, rs);
9ee6e8bb 8625 if (insn & (1 << 5))
5e3f878a
PB
8626 gen_swap_half(tmp2);
8627 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8628 if (insn & (1 << 22)) {
5e3f878a 8629 /* smlald, smlsld */
33bbd75a
PC
8630 TCGv_i64 tmp64_2;
8631
a7812ae4 8632 tmp64 = tcg_temp_new_i64();
33bbd75a 8633 tmp64_2 = tcg_temp_new_i64();
a7812ae4 8634 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 8635 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 8636 tcg_temp_free_i32(tmp);
33bbd75a
PC
8637 tcg_temp_free_i32(tmp2);
8638 if (insn & (1 << 6)) {
8639 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
8640 } else {
8641 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
8642 }
8643 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
8644 gen_addq(s, tmp64, rd, rn);
8645 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 8646 tcg_temp_free_i64(tmp64);
9ee6e8bb 8647 } else {
5e3f878a 8648 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
8649 if (insn & (1 << 6)) {
8650 /* This subtraction cannot overflow. */
8651 tcg_gen_sub_i32(tmp, tmp, tmp2);
8652 } else {
8653 /* This addition cannot overflow 32 bits;
8654 * however it may overflow considered as a
8655 * signed operation, in which case we must set
8656 * the Q flag.
8657 */
8658 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8659 }
8660 tcg_temp_free_i32(tmp2);
22478e79 8661 if (rd != 15)
9ee6e8bb 8662 {
22478e79 8663 tmp2 = load_reg(s, rd);
9ef39277 8664 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8665 tcg_temp_free_i32(tmp2);
9ee6e8bb 8666 }
22478e79 8667 store_reg(s, rn, tmp);
9ee6e8bb 8668 }
41e9564d 8669 break;
b8b8ea05
PM
8670 case 1:
8671 case 3:
8672 /* SDIV, UDIV */
8673 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
8674 goto illegal_op;
8675 }
8676 if (((insn >> 5) & 7) || (rd != 15)) {
8677 goto illegal_op;
8678 }
8679 tmp = load_reg(s, rm);
8680 tmp2 = load_reg(s, rs);
8681 if (insn & (1 << 21)) {
8682 gen_helper_udiv(tmp, tmp, tmp2);
8683 } else {
8684 gen_helper_sdiv(tmp, tmp, tmp2);
8685 }
8686 tcg_temp_free_i32(tmp2);
8687 store_reg(s, rn, tmp);
8688 break;
41e9564d
PM
8689 default:
8690 goto illegal_op;
9ee6e8bb
PB
8691 }
8692 break;
8693 case 3:
8694 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
8695 switch (op1) {
8696 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
8697 ARCH(6);
8698 tmp = load_reg(s, rm);
8699 tmp2 = load_reg(s, rs);
8700 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8701 tcg_temp_free_i32(tmp2);
ded9d295
AZ
8702 if (rd != 15) {
8703 tmp2 = load_reg(s, rd);
6ddbc6e4 8704 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8705 tcg_temp_free_i32(tmp2);
9ee6e8bb 8706 }
ded9d295 8707 store_reg(s, rn, tmp);
9ee6e8bb
PB
8708 break;
8709 case 0x20: case 0x24: case 0x28: case 0x2c:
8710 /* Bitfield insert/clear. */
8711 ARCH(6T2);
8712 shift = (insn >> 7) & 0x1f;
8713 i = (insn >> 16) & 0x1f;
8714 i = i + 1 - shift;
8715 if (rm == 15) {
7d1b0095 8716 tmp = tcg_temp_new_i32();
5e3f878a 8717 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 8718 } else {
5e3f878a 8719 tmp = load_reg(s, rm);
9ee6e8bb
PB
8720 }
8721 if (i != 32) {
5e3f878a 8722 tmp2 = load_reg(s, rd);
d593c48e 8723 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 8724 tcg_temp_free_i32(tmp2);
9ee6e8bb 8725 }
5e3f878a 8726 store_reg(s, rd, tmp);
9ee6e8bb
PB
8727 break;
8728 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
8729 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 8730 ARCH(6T2);
5e3f878a 8731 tmp = load_reg(s, rm);
9ee6e8bb
PB
8732 shift = (insn >> 7) & 0x1f;
8733 i = ((insn >> 16) & 0x1f) + 1;
8734 if (shift + i > 32)
8735 goto illegal_op;
8736 if (i < 32) {
8737 if (op1 & 0x20) {
5e3f878a 8738 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 8739 } else {
5e3f878a 8740 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
8741 }
8742 }
5e3f878a 8743 store_reg(s, rd, tmp);
9ee6e8bb
PB
8744 break;
8745 default:
8746 goto illegal_op;
8747 }
8748 break;
8749 }
8750 break;
8751 }
8752 do_ldst:
8753 /* Check for undefined extension instructions
8754 * per the ARM Bible IE:
8755 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
8756 */
8757 sh = (0xf << 20) | (0xf << 4);
8758 if (op1 == 0x7 && ((insn & sh) == sh))
8759 {
8760 goto illegal_op;
8761 }
8762 /* load/store byte/word */
8763 rn = (insn >> 16) & 0xf;
8764 rd = (insn >> 12) & 0xf;
b0109805 8765 tmp2 = load_reg(s, rn);
a99caa48
PM
8766 if ((insn & 0x01200000) == 0x00200000) {
8767 /* ldrt/strt */
8768 i = MMU_USER_IDX;
8769 } else {
8770 i = get_mem_index(s);
8771 }
9ee6e8bb 8772 if (insn & (1 << 24))
b0109805 8773 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
8774 if (insn & (1 << 20)) {
8775 /* load */
5a839c0d 8776 tmp = tcg_temp_new_i32();
9ee6e8bb 8777 if (insn & (1 << 22)) {
08307563 8778 gen_aa32_ld8u(tmp, tmp2, i);
9ee6e8bb 8779 } else {
08307563 8780 gen_aa32_ld32u(tmp, tmp2, i);
9ee6e8bb 8781 }
9ee6e8bb
PB
8782 } else {
8783 /* store */
b0109805 8784 tmp = load_reg(s, rd);
5a839c0d 8785 if (insn & (1 << 22)) {
08307563 8786 gen_aa32_st8(tmp, tmp2, i);
5a839c0d 8787 } else {
08307563 8788 gen_aa32_st32(tmp, tmp2, i);
5a839c0d
PM
8789 }
8790 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8791 }
8792 if (!(insn & (1 << 24))) {
b0109805
PB
8793 gen_add_data_offset(s, insn, tmp2);
8794 store_reg(s, rn, tmp2);
8795 } else if (insn & (1 << 21)) {
8796 store_reg(s, rn, tmp2);
8797 } else {
7d1b0095 8798 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8799 }
8800 if (insn & (1 << 20)) {
8801 /* Complete the load. */
be5e7a76 8802 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
8803 }
8804 break;
8805 case 0x08:
8806 case 0x09:
8807 {
8808 int j, n, user, loaded_base;
39d5492a 8809 TCGv_i32 loaded_var;
9ee6e8bb
PB
8810 /* load/store multiple words */
8811 /* XXX: store correct base if write back */
8812 user = 0;
8813 if (insn & (1 << 22)) {
8814 if (IS_USER(s))
8815 goto illegal_op; /* only usable in supervisor mode */
8816
8817 if ((insn & (1 << 15)) == 0)
8818 user = 1;
8819 }
8820 rn = (insn >> 16) & 0xf;
b0109805 8821 addr = load_reg(s, rn);
9ee6e8bb
PB
8822
8823 /* compute total size */
8824 loaded_base = 0;
39d5492a 8825 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
8826 n = 0;
8827 for(i=0;i<16;i++) {
8828 if (insn & (1 << i))
8829 n++;
8830 }
8831 /* XXX: test invalid n == 0 case ? */
8832 if (insn & (1 << 23)) {
8833 if (insn & (1 << 24)) {
8834 /* pre increment */
b0109805 8835 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8836 } else {
8837 /* post increment */
8838 }
8839 } else {
8840 if (insn & (1 << 24)) {
8841 /* pre decrement */
b0109805 8842 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
8843 } else {
8844 /* post decrement */
8845 if (n != 1)
b0109805 8846 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
8847 }
8848 }
8849 j = 0;
8850 for(i=0;i<16;i++) {
8851 if (insn & (1 << i)) {
8852 if (insn & (1 << 20)) {
8853 /* load */
5a839c0d 8854 tmp = tcg_temp_new_i32();
6ce2faf4 8855 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
be5e7a76 8856 if (user) {
b75263d6 8857 tmp2 = tcg_const_i32(i);
1ce94f81 8858 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 8859 tcg_temp_free_i32(tmp2);
7d1b0095 8860 tcg_temp_free_i32(tmp);
9ee6e8bb 8861 } else if (i == rn) {
b0109805 8862 loaded_var = tmp;
9ee6e8bb
PB
8863 loaded_base = 1;
8864 } else {
be5e7a76 8865 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
8866 }
8867 } else {
8868 /* store */
8869 if (i == 15) {
8870 /* special case: r15 = PC + 8 */
8871 val = (long)s->pc + 4;
7d1b0095 8872 tmp = tcg_temp_new_i32();
b0109805 8873 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 8874 } else if (user) {
7d1b0095 8875 tmp = tcg_temp_new_i32();
b75263d6 8876 tmp2 = tcg_const_i32(i);
9ef39277 8877 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 8878 tcg_temp_free_i32(tmp2);
9ee6e8bb 8879 } else {
b0109805 8880 tmp = load_reg(s, i);
9ee6e8bb 8881 }
6ce2faf4 8882 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 8883 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8884 }
8885 j++;
8886 /* no need to add after the last transfer */
8887 if (j != n)
b0109805 8888 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8889 }
8890 }
8891 if (insn & (1 << 21)) {
8892 /* write back */
8893 if (insn & (1 << 23)) {
8894 if (insn & (1 << 24)) {
8895 /* pre increment */
8896 } else {
8897 /* post increment */
b0109805 8898 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8899 }
8900 } else {
8901 if (insn & (1 << 24)) {
8902 /* pre decrement */
8903 if (n != 1)
b0109805 8904 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
8905 } else {
8906 /* post decrement */
b0109805 8907 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
8908 }
8909 }
b0109805
PB
8910 store_reg(s, rn, addr);
8911 } else {
7d1b0095 8912 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8913 }
8914 if (loaded_base) {
b0109805 8915 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
8916 }
8917 if ((insn & (1 << 22)) && !user) {
8918 /* Restore CPSR from SPSR. */
d9ba4830 8919 tmp = load_cpu_field(spsr);
4051e12c 8920 gen_set_cpsr(tmp, CPSR_ERET_MASK);
7d1b0095 8921 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8922 s->is_jmp = DISAS_UPDATE;
8923 }
8924 }
8925 break;
8926 case 0xa:
8927 case 0xb:
8928 {
8929 int32_t offset;
8930
8931 /* branch (and link) */
8932 val = (int32_t)s->pc;
8933 if (insn & (1 << 24)) {
7d1b0095 8934 tmp = tcg_temp_new_i32();
5e3f878a
PB
8935 tcg_gen_movi_i32(tmp, val);
8936 store_reg(s, 14, tmp);
9ee6e8bb 8937 }
534df156
PM
8938 offset = sextract32(insn << 2, 0, 26);
8939 val += offset + 4;
9ee6e8bb
PB
8940 gen_jmp(s, val);
8941 }
8942 break;
8943 case 0xc:
8944 case 0xd:
8945 case 0xe:
6a57f3eb
WN
8946 if (((insn >> 8) & 0xe) == 10) {
8947 /* VFP. */
8948 if (disas_vfp_insn(env, s, insn)) {
8949 goto illegal_op;
8950 }
8951 } else if (disas_coproc_insn(env, s, insn)) {
8952 /* Coprocessor. */
9ee6e8bb 8953 goto illegal_op;
6a57f3eb 8954 }
9ee6e8bb
PB
8955 break;
8956 case 0xf:
8957 /* swi */
eaed129d 8958 gen_set_pc_im(s, s->pc);
d4a2dc67 8959 s->svc_imm = extract32(insn, 0, 24);
9ee6e8bb
PB
8960 s->is_jmp = DISAS_SWI;
8961 break;
8962 default:
8963 illegal_op:
d4a2dc67 8964 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
9ee6e8bb
PB
8965 break;
8966 }
8967 }
8968}
8969
8970/* Return true if this is a Thumb-2 logical op. */
8971static int
8972thumb2_logic_op(int op)
8973{
8974 return (op < 8);
8975}
8976
8977/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
8978 then set condition code flags based on the result of the operation.
8979 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
8980 to the high bit of T1.
8981 Returns zero if the opcode is valid. */
8982
8983static int
39d5492a
PM
8984gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
8985 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
8986{
8987 int logic_cc;
8988
8989 logic_cc = 0;
8990 switch (op) {
8991 case 0: /* and */
396e467c 8992 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
8993 logic_cc = conds;
8994 break;
8995 case 1: /* bic */
f669df27 8996 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
8997 logic_cc = conds;
8998 break;
8999 case 2: /* orr */
396e467c 9000 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
9001 logic_cc = conds;
9002 break;
9003 case 3: /* orn */
29501f1b 9004 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
9005 logic_cc = conds;
9006 break;
9007 case 4: /* eor */
396e467c 9008 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
9009 logic_cc = conds;
9010 break;
9011 case 8: /* add */
9012 if (conds)
72485ec4 9013 gen_add_CC(t0, t0, t1);
9ee6e8bb 9014 else
396e467c 9015 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
9016 break;
9017 case 10: /* adc */
9018 if (conds)
49b4c31e 9019 gen_adc_CC(t0, t0, t1);
9ee6e8bb 9020 else
396e467c 9021 gen_adc(t0, t1);
9ee6e8bb
PB
9022 break;
9023 case 11: /* sbc */
2de68a49
RH
9024 if (conds) {
9025 gen_sbc_CC(t0, t0, t1);
9026 } else {
396e467c 9027 gen_sub_carry(t0, t0, t1);
2de68a49 9028 }
9ee6e8bb
PB
9029 break;
9030 case 13: /* sub */
9031 if (conds)
72485ec4 9032 gen_sub_CC(t0, t0, t1);
9ee6e8bb 9033 else
396e467c 9034 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
9035 break;
9036 case 14: /* rsb */
9037 if (conds)
72485ec4 9038 gen_sub_CC(t0, t1, t0);
9ee6e8bb 9039 else
396e467c 9040 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
9041 break;
9042 default: /* 5, 6, 7, 9, 12, 15. */
9043 return 1;
9044 }
9045 if (logic_cc) {
396e467c 9046 gen_logic_CC(t0);
9ee6e8bb 9047 if (shifter_out)
396e467c 9048 gen_set_CF_bit31(t1);
9ee6e8bb
PB
9049 }
9050 return 0;
9051}
9052
9053/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9054 is not legal. */
0ecb72a5 9055static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 9056{
b0109805 9057 uint32_t insn, imm, shift, offset;
9ee6e8bb 9058 uint32_t rd, rn, rm, rs;
39d5492a
PM
9059 TCGv_i32 tmp;
9060 TCGv_i32 tmp2;
9061 TCGv_i32 tmp3;
9062 TCGv_i32 addr;
a7812ae4 9063 TCGv_i64 tmp64;
9ee6e8bb
PB
9064 int op;
9065 int shiftop;
9066 int conds;
9067 int logic_cc;
9068
9069 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
9070 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 9071 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
9072 16-bit instructions to get correct prefetch abort behavior. */
9073 insn = insn_hw1;
9074 if ((insn & (1 << 12)) == 0) {
be5e7a76 9075 ARCH(5);
9ee6e8bb
PB
9076 /* Second half of blx. */
9077 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
9078 tmp = load_reg(s, 14);
9079 tcg_gen_addi_i32(tmp, tmp, offset);
9080 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 9081
7d1b0095 9082 tmp2 = tcg_temp_new_i32();
b0109805 9083 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9084 store_reg(s, 14, tmp2);
9085 gen_bx(s, tmp);
9ee6e8bb
PB
9086 return 0;
9087 }
9088 if (insn & (1 << 11)) {
9089 /* Second half of bl. */
9090 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 9091 tmp = load_reg(s, 14);
6a0d8a1d 9092 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 9093
7d1b0095 9094 tmp2 = tcg_temp_new_i32();
b0109805 9095 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9096 store_reg(s, 14, tmp2);
9097 gen_bx(s, tmp);
9ee6e8bb
PB
9098 return 0;
9099 }
9100 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9101 /* Instruction spans a page boundary. Implement it as two
9102 16-bit instructions in case the second half causes an
9103 prefetch abort. */
9104 offset = ((int32_t)insn << 21) >> 9;
396e467c 9105 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
9106 return 0;
9107 }
9108 /* Fall through to 32-bit decode. */
9109 }
9110
d31dd73e 9111 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
9112 s->pc += 2;
9113 insn |= (uint32_t)insn_hw1 << 16;
9114
9115 if ((insn & 0xf800e800) != 0xf000e800) {
9116 ARCH(6T2);
9117 }
9118
9119 rn = (insn >> 16) & 0xf;
9120 rs = (insn >> 12) & 0xf;
9121 rd = (insn >> 8) & 0xf;
9122 rm = insn & 0xf;
9123 switch ((insn >> 25) & 0xf) {
9124 case 0: case 1: case 2: case 3:
9125 /* 16-bit instructions. Should never happen. */
9126 abort();
9127 case 4:
9128 if (insn & (1 << 22)) {
9129 /* Other load/store, table branch. */
9130 if (insn & 0x01200000) {
9131 /* Load/store doubleword. */
9132 if (rn == 15) {
7d1b0095 9133 addr = tcg_temp_new_i32();
b0109805 9134 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 9135 } else {
b0109805 9136 addr = load_reg(s, rn);
9ee6e8bb
PB
9137 }
9138 offset = (insn & 0xff) * 4;
9139 if ((insn & (1 << 23)) == 0)
9140 offset = -offset;
9141 if (insn & (1 << 24)) {
b0109805 9142 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
9143 offset = 0;
9144 }
9145 if (insn & (1 << 20)) {
9146 /* ldrd */
e2592fad 9147 tmp = tcg_temp_new_i32();
6ce2faf4 9148 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805
PB
9149 store_reg(s, rs, tmp);
9150 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9151 tmp = tcg_temp_new_i32();
6ce2faf4 9152 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 9153 store_reg(s, rd, tmp);
9ee6e8bb
PB
9154 } else {
9155 /* strd */
b0109805 9156 tmp = load_reg(s, rs);
6ce2faf4 9157 gen_aa32_st32(tmp, addr, get_mem_index(s));
e2592fad 9158 tcg_temp_free_i32(tmp);
b0109805
PB
9159 tcg_gen_addi_i32(addr, addr, 4);
9160 tmp = load_reg(s, rd);
6ce2faf4 9161 gen_aa32_st32(tmp, addr, get_mem_index(s));
e2592fad 9162 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9163 }
9164 if (insn & (1 << 21)) {
9165 /* Base writeback. */
9166 if (rn == 15)
9167 goto illegal_op;
b0109805
PB
9168 tcg_gen_addi_i32(addr, addr, offset - 4);
9169 store_reg(s, rn, addr);
9170 } else {
7d1b0095 9171 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9172 }
9173 } else if ((insn & (1 << 23)) == 0) {
9174 /* Load/store exclusive word. */
39d5492a 9175 addr = tcg_temp_local_new_i32();
98a46317 9176 load_reg_var(s, addr, rn);
426f5abc 9177 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 9178 if (insn & (1 << 20)) {
426f5abc 9179 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 9180 } else {
426f5abc 9181 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 9182 }
39d5492a 9183 tcg_temp_free_i32(addr);
2359bf80 9184 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
9185 /* Table Branch. */
9186 if (rn == 15) {
7d1b0095 9187 addr = tcg_temp_new_i32();
b0109805 9188 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 9189 } else {
b0109805 9190 addr = load_reg(s, rn);
9ee6e8bb 9191 }
b26eefb6 9192 tmp = load_reg(s, rm);
b0109805 9193 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
9194 if (insn & (1 << 4)) {
9195 /* tbh */
b0109805 9196 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9197 tcg_temp_free_i32(tmp);
e2592fad 9198 tmp = tcg_temp_new_i32();
6ce2faf4 9199 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9ee6e8bb 9200 } else { /* tbb */
7d1b0095 9201 tcg_temp_free_i32(tmp);
e2592fad 9202 tmp = tcg_temp_new_i32();
6ce2faf4 9203 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9ee6e8bb 9204 }
7d1b0095 9205 tcg_temp_free_i32(addr);
b0109805
PB
9206 tcg_gen_shli_i32(tmp, tmp, 1);
9207 tcg_gen_addi_i32(tmp, tmp, s->pc);
9208 store_reg(s, 15, tmp);
9ee6e8bb 9209 } else {
2359bf80 9210 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 9211 op = (insn >> 4) & 0x3;
2359bf80
MR
9212 switch (op2) {
9213 case 0:
426f5abc 9214 goto illegal_op;
2359bf80
MR
9215 case 1:
9216 /* Load/store exclusive byte/halfword/doubleword */
9217 if (op == 2) {
9218 goto illegal_op;
9219 }
9220 ARCH(7);
9221 break;
9222 case 2:
9223 /* Load-acquire/store-release */
9224 if (op == 3) {
9225 goto illegal_op;
9226 }
9227 /* Fall through */
9228 case 3:
9229 /* Load-acquire/store-release exclusive */
9230 ARCH(8);
9231 break;
426f5abc 9232 }
39d5492a 9233 addr = tcg_temp_local_new_i32();
98a46317 9234 load_reg_var(s, addr, rn);
2359bf80
MR
9235 if (!(op2 & 1)) {
9236 if (insn & (1 << 20)) {
9237 tmp = tcg_temp_new_i32();
9238 switch (op) {
9239 case 0: /* ldab */
6ce2faf4 9240 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
2359bf80
MR
9241 break;
9242 case 1: /* ldah */
6ce2faf4 9243 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
2359bf80
MR
9244 break;
9245 case 2: /* lda */
6ce2faf4 9246 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
2359bf80
MR
9247 break;
9248 default:
9249 abort();
9250 }
9251 store_reg(s, rs, tmp);
9252 } else {
9253 tmp = load_reg(s, rs);
9254 switch (op) {
9255 case 0: /* stlb */
6ce2faf4 9256 gen_aa32_st8(tmp, addr, get_mem_index(s));
2359bf80
MR
9257 break;
9258 case 1: /* stlh */
6ce2faf4 9259 gen_aa32_st16(tmp, addr, get_mem_index(s));
2359bf80
MR
9260 break;
9261 case 2: /* stl */
6ce2faf4 9262 gen_aa32_st32(tmp, addr, get_mem_index(s));
2359bf80
MR
9263 break;
9264 default:
9265 abort();
9266 }
9267 tcg_temp_free_i32(tmp);
9268 }
9269 } else if (insn & (1 << 20)) {
426f5abc 9270 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 9271 } else {
426f5abc 9272 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 9273 }
39d5492a 9274 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9275 }
9276 } else {
9277 /* Load/store multiple, RFE, SRS. */
9278 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976
PM
9279 /* RFE, SRS: not available in user mode or on M profile */
9280 if (IS_USER(s) || IS_M(env)) {
9ee6e8bb 9281 goto illegal_op;
00115976 9282 }
9ee6e8bb
PB
9283 if (insn & (1 << 20)) {
9284 /* rfe */
b0109805
PB
9285 addr = load_reg(s, rn);
9286 if ((insn & (1 << 24)) == 0)
9287 tcg_gen_addi_i32(addr, addr, -8);
9288 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 9289 tmp = tcg_temp_new_i32();
6ce2faf4 9290 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 9291 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9292 tmp2 = tcg_temp_new_i32();
6ce2faf4 9293 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9294 if (insn & (1 << 21)) {
9295 /* Base writeback. */
b0109805
PB
9296 if (insn & (1 << 24)) {
9297 tcg_gen_addi_i32(addr, addr, 4);
9298 } else {
9299 tcg_gen_addi_i32(addr, addr, -4);
9300 }
9301 store_reg(s, rn, addr);
9302 } else {
7d1b0095 9303 tcg_temp_free_i32(addr);
9ee6e8bb 9304 }
b0109805 9305 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
9306 } else {
9307 /* srs */
81465888
PM
9308 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9309 insn & (1 << 21));
9ee6e8bb
PB
9310 }
9311 } else {
5856d44e 9312 int i, loaded_base = 0;
39d5492a 9313 TCGv_i32 loaded_var;
9ee6e8bb 9314 /* Load/store multiple. */
b0109805 9315 addr = load_reg(s, rn);
9ee6e8bb
PB
9316 offset = 0;
9317 for (i = 0; i < 16; i++) {
9318 if (insn & (1 << i))
9319 offset += 4;
9320 }
9321 if (insn & (1 << 24)) {
b0109805 9322 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9323 }
9324
39d5492a 9325 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9326 for (i = 0; i < 16; i++) {
9327 if ((insn & (1 << i)) == 0)
9328 continue;
9329 if (insn & (1 << 20)) {
9330 /* Load. */
e2592fad 9331 tmp = tcg_temp_new_i32();
6ce2faf4 9332 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9ee6e8bb 9333 if (i == 15) {
b0109805 9334 gen_bx(s, tmp);
5856d44e
YO
9335 } else if (i == rn) {
9336 loaded_var = tmp;
9337 loaded_base = 1;
9ee6e8bb 9338 } else {
b0109805 9339 store_reg(s, i, tmp);
9ee6e8bb
PB
9340 }
9341 } else {
9342 /* Store. */
b0109805 9343 tmp = load_reg(s, i);
6ce2faf4 9344 gen_aa32_st32(tmp, addr, get_mem_index(s));
e2592fad 9345 tcg_temp_free_i32(tmp);
9ee6e8bb 9346 }
b0109805 9347 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 9348 }
5856d44e
YO
9349 if (loaded_base) {
9350 store_reg(s, rn, loaded_var);
9351 }
9ee6e8bb
PB
9352 if (insn & (1 << 21)) {
9353 /* Base register writeback. */
9354 if (insn & (1 << 24)) {
b0109805 9355 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9356 }
9357 /* Fault if writeback register is in register list. */
9358 if (insn & (1 << rn))
9359 goto illegal_op;
b0109805
PB
9360 store_reg(s, rn, addr);
9361 } else {
7d1b0095 9362 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9363 }
9364 }
9365 }
9366 break;
2af9ab77
JB
9367 case 5:
9368
9ee6e8bb 9369 op = (insn >> 21) & 0xf;
2af9ab77
JB
9370 if (op == 6) {
9371 /* Halfword pack. */
9372 tmp = load_reg(s, rn);
9373 tmp2 = load_reg(s, rm);
9374 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9375 if (insn & (1 << 5)) {
9376 /* pkhtb */
9377 if (shift == 0)
9378 shift = 31;
9379 tcg_gen_sari_i32(tmp2, tmp2, shift);
9380 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9381 tcg_gen_ext16u_i32(tmp2, tmp2);
9382 } else {
9383 /* pkhbt */
9384 if (shift)
9385 tcg_gen_shli_i32(tmp2, tmp2, shift);
9386 tcg_gen_ext16u_i32(tmp, tmp);
9387 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9388 }
9389 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9390 tcg_temp_free_i32(tmp2);
3174f8e9
FN
9391 store_reg(s, rd, tmp);
9392 } else {
2af9ab77
JB
9393 /* Data processing register constant shift. */
9394 if (rn == 15) {
7d1b0095 9395 tmp = tcg_temp_new_i32();
2af9ab77
JB
9396 tcg_gen_movi_i32(tmp, 0);
9397 } else {
9398 tmp = load_reg(s, rn);
9399 }
9400 tmp2 = load_reg(s, rm);
9401
9402 shiftop = (insn >> 4) & 3;
9403 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9404 conds = (insn & (1 << 20)) != 0;
9405 logic_cc = (conds && thumb2_logic_op(op));
9406 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9407 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9408 goto illegal_op;
7d1b0095 9409 tcg_temp_free_i32(tmp2);
2af9ab77
JB
9410 if (rd != 15) {
9411 store_reg(s, rd, tmp);
9412 } else {
7d1b0095 9413 tcg_temp_free_i32(tmp);
2af9ab77 9414 }
3174f8e9 9415 }
9ee6e8bb
PB
9416 break;
9417 case 13: /* Misc data processing. */
9418 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9419 if (op < 4 && (insn & 0xf000) != 0xf000)
9420 goto illegal_op;
9421 switch (op) {
9422 case 0: /* Register controlled shift. */
8984bd2e
PB
9423 tmp = load_reg(s, rn);
9424 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9425 if ((insn & 0x70) != 0)
9426 goto illegal_op;
9427 op = (insn >> 21) & 3;
8984bd2e
PB
9428 logic_cc = (insn & (1 << 20)) != 0;
9429 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9430 if (logic_cc)
9431 gen_logic_CC(tmp);
21aeb343 9432 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
9433 break;
9434 case 1: /* Sign/zero extend. */
5e3f878a 9435 tmp = load_reg(s, rm);
9ee6e8bb 9436 shift = (insn >> 4) & 3;
1301f322 9437 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9438 rotate, a shift is sufficient. */
9439 if (shift != 0)
f669df27 9440 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9441 op = (insn >> 20) & 7;
9442 switch (op) {
5e3f878a
PB
9443 case 0: gen_sxth(tmp); break;
9444 case 1: gen_uxth(tmp); break;
9445 case 2: gen_sxtb16(tmp); break;
9446 case 3: gen_uxtb16(tmp); break;
9447 case 4: gen_sxtb(tmp); break;
9448 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
9449 default: goto illegal_op;
9450 }
9451 if (rn != 15) {
5e3f878a 9452 tmp2 = load_reg(s, rn);
9ee6e8bb 9453 if ((op >> 1) == 1) {
5e3f878a 9454 gen_add16(tmp, tmp2);
9ee6e8bb 9455 } else {
5e3f878a 9456 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9457 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9458 }
9459 }
5e3f878a 9460 store_reg(s, rd, tmp);
9ee6e8bb
PB
9461 break;
9462 case 2: /* SIMD add/subtract. */
9463 op = (insn >> 20) & 7;
9464 shift = (insn >> 4) & 7;
9465 if ((op & 3) == 3 || (shift & 3) == 3)
9466 goto illegal_op;
6ddbc6e4
PB
9467 tmp = load_reg(s, rn);
9468 tmp2 = load_reg(s, rm);
9469 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 9470 tcg_temp_free_i32(tmp2);
6ddbc6e4 9471 store_reg(s, rd, tmp);
9ee6e8bb
PB
9472 break;
9473 case 3: /* Other data processing. */
9474 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9475 if (op < 4) {
9476 /* Saturating add/subtract. */
d9ba4830
PB
9477 tmp = load_reg(s, rn);
9478 tmp2 = load_reg(s, rm);
9ee6e8bb 9479 if (op & 1)
9ef39277 9480 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 9481 if (op & 2)
9ef39277 9482 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 9483 else
9ef39277 9484 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 9485 tcg_temp_free_i32(tmp2);
9ee6e8bb 9486 } else {
d9ba4830 9487 tmp = load_reg(s, rn);
9ee6e8bb
PB
9488 switch (op) {
9489 case 0x0a: /* rbit */
d9ba4830 9490 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9491 break;
9492 case 0x08: /* rev */
66896cb8 9493 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
9494 break;
9495 case 0x09: /* rev16 */
d9ba4830 9496 gen_rev16(tmp);
9ee6e8bb
PB
9497 break;
9498 case 0x0b: /* revsh */
d9ba4830 9499 gen_revsh(tmp);
9ee6e8bb
PB
9500 break;
9501 case 0x10: /* sel */
d9ba4830 9502 tmp2 = load_reg(s, rm);
7d1b0095 9503 tmp3 = tcg_temp_new_i32();
0ecb72a5 9504 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 9505 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9506 tcg_temp_free_i32(tmp3);
9507 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9508 break;
9509 case 0x18: /* clz */
d9ba4830 9510 gen_helper_clz(tmp, tmp);
9ee6e8bb 9511 break;
eb0ecd5a
WN
9512 case 0x20:
9513 case 0x21:
9514 case 0x22:
9515 case 0x28:
9516 case 0x29:
9517 case 0x2a:
9518 {
9519 /* crc32/crc32c */
9520 uint32_t sz = op & 0x3;
9521 uint32_t c = op & 0x8;
9522
9523 if (!arm_feature(env, ARM_FEATURE_CRC)) {
9524 goto illegal_op;
9525 }
9526
9527 tmp2 = load_reg(s, rm);
aa633469
PM
9528 if (sz == 0) {
9529 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
9530 } else if (sz == 1) {
9531 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
9532 }
eb0ecd5a
WN
9533 tmp3 = tcg_const_i32(1 << sz);
9534 if (c) {
9535 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
9536 } else {
9537 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
9538 }
9539 tcg_temp_free_i32(tmp2);
9540 tcg_temp_free_i32(tmp3);
9541 break;
9542 }
9ee6e8bb
PB
9543 default:
9544 goto illegal_op;
9545 }
9546 }
d9ba4830 9547 store_reg(s, rd, tmp);
9ee6e8bb
PB
9548 break;
9549 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
9550 op = (insn >> 4) & 0xf;
d9ba4830
PB
9551 tmp = load_reg(s, rn);
9552 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9553 switch ((insn >> 20) & 7) {
9554 case 0: /* 32 x 32 -> 32 */
d9ba4830 9555 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 9556 tcg_temp_free_i32(tmp2);
9ee6e8bb 9557 if (rs != 15) {
d9ba4830 9558 tmp2 = load_reg(s, rs);
9ee6e8bb 9559 if (op)
d9ba4830 9560 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 9561 else
d9ba4830 9562 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9563 tcg_temp_free_i32(tmp2);
9ee6e8bb 9564 }
9ee6e8bb
PB
9565 break;
9566 case 1: /* 16 x 16 -> 32 */
d9ba4830 9567 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 9568 tcg_temp_free_i32(tmp2);
9ee6e8bb 9569 if (rs != 15) {
d9ba4830 9570 tmp2 = load_reg(s, rs);
9ef39277 9571 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9572 tcg_temp_free_i32(tmp2);
9ee6e8bb 9573 }
9ee6e8bb
PB
9574 break;
9575 case 2: /* Dual multiply add. */
9576 case 4: /* Dual multiply subtract. */
9577 if (op)
d9ba4830
PB
9578 gen_swap_half(tmp2);
9579 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9580 if (insn & (1 << 22)) {
e1d177b9 9581 /* This subtraction cannot overflow. */
d9ba4830 9582 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9583 } else {
e1d177b9
PM
9584 /* This addition cannot overflow 32 bits;
9585 * however it may overflow considered as a signed
9586 * operation, in which case we must set the Q flag.
9587 */
9ef39277 9588 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9589 }
7d1b0095 9590 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9591 if (rs != 15)
9592 {
d9ba4830 9593 tmp2 = load_reg(s, rs);
9ef39277 9594 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9595 tcg_temp_free_i32(tmp2);
9ee6e8bb 9596 }
9ee6e8bb
PB
9597 break;
9598 case 3: /* 32 * 16 -> 32msb */
9599 if (op)
d9ba4830 9600 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 9601 else
d9ba4830 9602 gen_sxth(tmp2);
a7812ae4
PB
9603 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9604 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 9605 tmp = tcg_temp_new_i32();
a7812ae4 9606 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 9607 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9608 if (rs != 15)
9609 {
d9ba4830 9610 tmp2 = load_reg(s, rs);
9ef39277 9611 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9612 tcg_temp_free_i32(tmp2);
9ee6e8bb 9613 }
9ee6e8bb 9614 break;
838fa72d
AJ
9615 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9616 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 9617 if (rs != 15) {
838fa72d
AJ
9618 tmp = load_reg(s, rs);
9619 if (insn & (1 << 20)) {
9620 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 9621 } else {
838fa72d 9622 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 9623 }
2c0262af 9624 }
838fa72d
AJ
9625 if (insn & (1 << 4)) {
9626 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9627 }
9628 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9629 tmp = tcg_temp_new_i32();
838fa72d
AJ
9630 tcg_gen_trunc_i64_i32(tmp, tmp64);
9631 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9632 break;
9633 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 9634 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9635 tcg_temp_free_i32(tmp2);
9ee6e8bb 9636 if (rs != 15) {
d9ba4830
PB
9637 tmp2 = load_reg(s, rs);
9638 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9639 tcg_temp_free_i32(tmp2);
5fd46862 9640 }
9ee6e8bb 9641 break;
2c0262af 9642 }
d9ba4830 9643 store_reg(s, rd, tmp);
2c0262af 9644 break;
9ee6e8bb
PB
9645 case 6: case 7: /* 64-bit multiply, Divide. */
9646 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
9647 tmp = load_reg(s, rn);
9648 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9649 if ((op & 0x50) == 0x10) {
9650 /* sdiv, udiv */
47789990 9651 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 9652 goto illegal_op;
47789990 9653 }
9ee6e8bb 9654 if (op & 0x20)
5e3f878a 9655 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 9656 else
5e3f878a 9657 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 9658 tcg_temp_free_i32(tmp2);
5e3f878a 9659 store_reg(s, rd, tmp);
9ee6e8bb
PB
9660 } else if ((op & 0xe) == 0xc) {
9661 /* Dual multiply accumulate long. */
9662 if (op & 1)
5e3f878a
PB
9663 gen_swap_half(tmp2);
9664 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9665 if (op & 0x10) {
5e3f878a 9666 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 9667 } else {
5e3f878a 9668 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 9669 }
7d1b0095 9670 tcg_temp_free_i32(tmp2);
a7812ae4
PB
9671 /* BUGFIX */
9672 tmp64 = tcg_temp_new_i64();
9673 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9674 tcg_temp_free_i32(tmp);
a7812ae4
PB
9675 gen_addq(s, tmp64, rs, rd);
9676 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 9677 tcg_temp_free_i64(tmp64);
2c0262af 9678 } else {
9ee6e8bb
PB
9679 if (op & 0x20) {
9680 /* Unsigned 64-bit multiply */
a7812ae4 9681 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 9682 } else {
9ee6e8bb
PB
9683 if (op & 8) {
9684 /* smlalxy */
5e3f878a 9685 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 9686 tcg_temp_free_i32(tmp2);
a7812ae4
PB
9687 tmp64 = tcg_temp_new_i64();
9688 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9689 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9690 } else {
9691 /* Signed 64-bit multiply */
a7812ae4 9692 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 9693 }
b5ff1b31 9694 }
9ee6e8bb
PB
9695 if (op & 4) {
9696 /* umaal */
a7812ae4
PB
9697 gen_addq_lo(s, tmp64, rs);
9698 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
9699 } else if (op & 0x40) {
9700 /* 64-bit accumulate. */
a7812ae4 9701 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 9702 }
a7812ae4 9703 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 9704 tcg_temp_free_i64(tmp64);
5fd46862 9705 }
2c0262af 9706 break;
9ee6e8bb
PB
9707 }
9708 break;
9709 case 6: case 7: case 14: case 15:
9710 /* Coprocessor. */
9711 if (((insn >> 24) & 3) == 3) {
9712 /* Translate into the equivalent ARM encoding. */
f06053e3 9713 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
9714 if (disas_neon_data_insn(env, s, insn))
9715 goto illegal_op;
6a57f3eb
WN
9716 } else if (((insn >> 8) & 0xe) == 10) {
9717 if (disas_vfp_insn(env, s, insn)) {
9718 goto illegal_op;
9719 }
9ee6e8bb
PB
9720 } else {
9721 if (insn & (1 << 28))
9722 goto illegal_op;
9723 if (disas_coproc_insn (env, s, insn))
9724 goto illegal_op;
9725 }
9726 break;
9727 case 8: case 9: case 10: case 11:
9728 if (insn & (1 << 15)) {
9729 /* Branches, misc control. */
9730 if (insn & 0x5000) {
9731 /* Unconditional branch. */
9732 /* signextend(hw1[10:0]) -> offset[:12]. */
9733 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
9734 /* hw1[10:0] -> offset[11:1]. */
9735 offset |= (insn & 0x7ff) << 1;
9736 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
9737 offset[24:22] already have the same value because of the
9738 sign extension above. */
9739 offset ^= ((~insn) & (1 << 13)) << 10;
9740 offset ^= ((~insn) & (1 << 11)) << 11;
9741
9ee6e8bb
PB
9742 if (insn & (1 << 14)) {
9743 /* Branch and link. */
3174f8e9 9744 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 9745 }
3b46e624 9746
b0109805 9747 offset += s->pc;
9ee6e8bb
PB
9748 if (insn & (1 << 12)) {
9749 /* b/bl */
b0109805 9750 gen_jmp(s, offset);
9ee6e8bb
PB
9751 } else {
9752 /* blx */
b0109805 9753 offset &= ~(uint32_t)2;
be5e7a76 9754 /* thumb2 bx, no need to check */
b0109805 9755 gen_bx_im(s, offset);
2c0262af 9756 }
9ee6e8bb
PB
9757 } else if (((insn >> 23) & 7) == 7) {
9758 /* Misc control */
9759 if (insn & (1 << 13))
9760 goto illegal_op;
9761
9762 if (insn & (1 << 26)) {
37e6456e
PM
9763 if (!(insn & (1 << 20))) {
9764 /* Hypervisor call (v7) */
9765 int imm16 = extract32(insn, 16, 4) << 12
9766 | extract32(insn, 0, 12);
9767 ARCH(7);
9768 if (IS_USER(s)) {
9769 goto illegal_op;
9770 }
9771 gen_hvc(s, imm16);
9772 } else {
9773 /* Secure monitor call (v6+) */
9774 ARCH(6K);
9775 if (IS_USER(s)) {
9776 goto illegal_op;
9777 }
9778 gen_smc(s);
9779 }
2c0262af 9780 } else {
9ee6e8bb
PB
9781 op = (insn >> 20) & 7;
9782 switch (op) {
9783 case 0: /* msr cpsr. */
9784 if (IS_M(env)) {
8984bd2e
PB
9785 tmp = load_reg(s, rn);
9786 addr = tcg_const_i32(insn & 0xff);
9787 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9788 tcg_temp_free_i32(addr);
7d1b0095 9789 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9790 gen_lookup_tb(s);
9791 break;
9792 }
9793 /* fall through */
9794 case 1: /* msr spsr. */
9795 if (IS_M(env))
9796 goto illegal_op;
2fbac54b
FN
9797 tmp = load_reg(s, rn);
9798 if (gen_set_psr(s,
9ee6e8bb 9799 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 9800 op == 1, tmp))
9ee6e8bb
PB
9801 goto illegal_op;
9802 break;
9803 case 2: /* cps, nop-hint. */
9804 if (((insn >> 8) & 7) == 0) {
9805 gen_nop_hint(s, insn & 0xff);
9806 }
9807 /* Implemented as NOP in user mode. */
9808 if (IS_USER(s))
9809 break;
9810 offset = 0;
9811 imm = 0;
9812 if (insn & (1 << 10)) {
9813 if (insn & (1 << 7))
9814 offset |= CPSR_A;
9815 if (insn & (1 << 6))
9816 offset |= CPSR_I;
9817 if (insn & (1 << 5))
9818 offset |= CPSR_F;
9819 if (insn & (1 << 9))
9820 imm = CPSR_A | CPSR_I | CPSR_F;
9821 }
9822 if (insn & (1 << 8)) {
9823 offset |= 0x1f;
9824 imm |= (insn & 0x1f);
9825 }
9826 if (offset) {
2fbac54b 9827 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
9828 }
9829 break;
9830 case 3: /* Special control operations. */
426f5abc 9831 ARCH(7);
9ee6e8bb
PB
9832 op = (insn >> 4) & 0xf;
9833 switch (op) {
9834 case 2: /* clrex */
426f5abc 9835 gen_clrex(s);
9ee6e8bb
PB
9836 break;
9837 case 4: /* dsb */
9838 case 5: /* dmb */
9839 case 6: /* isb */
9840 /* These execute as NOPs. */
9ee6e8bb
PB
9841 break;
9842 default:
9843 goto illegal_op;
9844 }
9845 break;
9846 case 4: /* bxj */
9847 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
9848 tmp = load_reg(s, rn);
9849 gen_bx(s, tmp);
9ee6e8bb
PB
9850 break;
9851 case 5: /* Exception return. */
b8b45b68
RV
9852 if (IS_USER(s)) {
9853 goto illegal_op;
9854 }
9855 if (rn != 14 || rd != 15) {
9856 goto illegal_op;
9857 }
9858 tmp = load_reg(s, rn);
9859 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
9860 gen_exception_return(s, tmp);
9861 break;
9ee6e8bb 9862 case 6: /* mrs cpsr. */
7d1b0095 9863 tmp = tcg_temp_new_i32();
9ee6e8bb 9864 if (IS_M(env)) {
8984bd2e
PB
9865 addr = tcg_const_i32(insn & 0xff);
9866 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 9867 tcg_temp_free_i32(addr);
9ee6e8bb 9868 } else {
9ef39277 9869 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 9870 }
8984bd2e 9871 store_reg(s, rd, tmp);
9ee6e8bb
PB
9872 break;
9873 case 7: /* mrs spsr. */
9874 /* Not accessible in user mode. */
9875 if (IS_USER(s) || IS_M(env))
9876 goto illegal_op;
d9ba4830
PB
9877 tmp = load_cpu_field(spsr);
9878 store_reg(s, rd, tmp);
9ee6e8bb 9879 break;
2c0262af
FB
9880 }
9881 }
9ee6e8bb
PB
9882 } else {
9883 /* Conditional branch. */
9884 op = (insn >> 22) & 0xf;
9885 /* Generate a conditional jump to next instruction. */
9886 s->condlabel = gen_new_label();
39fb730a 9887 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
9888 s->condjmp = 1;
9889
9890 /* offset[11:1] = insn[10:0] */
9891 offset = (insn & 0x7ff) << 1;
9892 /* offset[17:12] = insn[21:16]. */
9893 offset |= (insn & 0x003f0000) >> 4;
9894 /* offset[31:20] = insn[26]. */
9895 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
9896 /* offset[18] = insn[13]. */
9897 offset |= (insn & (1 << 13)) << 5;
9898 /* offset[19] = insn[11]. */
9899 offset |= (insn & (1 << 11)) << 8;
9900
9901 /* jump to the offset */
b0109805 9902 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
9903 }
9904 } else {
9905 /* Data processing immediate. */
9906 if (insn & (1 << 25)) {
9907 if (insn & (1 << 24)) {
9908 if (insn & (1 << 20))
9909 goto illegal_op;
9910 /* Bitfield/Saturate. */
9911 op = (insn >> 21) & 7;
9912 imm = insn & 0x1f;
9913 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 9914 if (rn == 15) {
7d1b0095 9915 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
9916 tcg_gen_movi_i32(tmp, 0);
9917 } else {
9918 tmp = load_reg(s, rn);
9919 }
9ee6e8bb
PB
9920 switch (op) {
9921 case 2: /* Signed bitfield extract. */
9922 imm++;
9923 if (shift + imm > 32)
9924 goto illegal_op;
9925 if (imm < 32)
6ddbc6e4 9926 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
9927 break;
9928 case 6: /* Unsigned bitfield extract. */
9929 imm++;
9930 if (shift + imm > 32)
9931 goto illegal_op;
9932 if (imm < 32)
6ddbc6e4 9933 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
9934 break;
9935 case 3: /* Bitfield insert/clear. */
9936 if (imm < shift)
9937 goto illegal_op;
9938 imm = imm + 1 - shift;
9939 if (imm != 32) {
6ddbc6e4 9940 tmp2 = load_reg(s, rd);
d593c48e 9941 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 9942 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9943 }
9944 break;
9945 case 7:
9946 goto illegal_op;
9947 default: /* Saturate. */
9ee6e8bb
PB
9948 if (shift) {
9949 if (op & 1)
6ddbc6e4 9950 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9951 else
6ddbc6e4 9952 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 9953 }
6ddbc6e4 9954 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
9955 if (op & 4) {
9956 /* Unsigned. */
9ee6e8bb 9957 if ((op & 1) && shift == 0)
9ef39277 9958 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9959 else
9ef39277 9960 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
2c0262af 9961 } else {
9ee6e8bb 9962 /* Signed. */
9ee6e8bb 9963 if ((op & 1) && shift == 0)
9ef39277 9964 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9965 else
9ef39277 9966 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
2c0262af 9967 }
b75263d6 9968 tcg_temp_free_i32(tmp2);
9ee6e8bb 9969 break;
2c0262af 9970 }
6ddbc6e4 9971 store_reg(s, rd, tmp);
9ee6e8bb
PB
9972 } else {
9973 imm = ((insn & 0x04000000) >> 15)
9974 | ((insn & 0x7000) >> 4) | (insn & 0xff);
9975 if (insn & (1 << 22)) {
9976 /* 16-bit immediate. */
9977 imm |= (insn >> 4) & 0xf000;
9978 if (insn & (1 << 23)) {
9979 /* movt */
5e3f878a 9980 tmp = load_reg(s, rd);
86831435 9981 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 9982 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 9983 } else {
9ee6e8bb 9984 /* movw */
7d1b0095 9985 tmp = tcg_temp_new_i32();
5e3f878a 9986 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
9987 }
9988 } else {
9ee6e8bb
PB
9989 /* Add/sub 12-bit immediate. */
9990 if (rn == 15) {
b0109805 9991 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 9992 if (insn & (1 << 23))
b0109805 9993 offset -= imm;
9ee6e8bb 9994 else
b0109805 9995 offset += imm;
7d1b0095 9996 tmp = tcg_temp_new_i32();
5e3f878a 9997 tcg_gen_movi_i32(tmp, offset);
2c0262af 9998 } else {
5e3f878a 9999 tmp = load_reg(s, rn);
9ee6e8bb 10000 if (insn & (1 << 23))
5e3f878a 10001 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 10002 else
5e3f878a 10003 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 10004 }
9ee6e8bb 10005 }
5e3f878a 10006 store_reg(s, rd, tmp);
191abaa2 10007 }
9ee6e8bb
PB
10008 } else {
10009 int shifter_out = 0;
10010 /* modified 12-bit immediate. */
10011 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10012 imm = (insn & 0xff);
10013 switch (shift) {
10014 case 0: /* XY */
10015 /* Nothing to do. */
10016 break;
10017 case 1: /* 00XY00XY */
10018 imm |= imm << 16;
10019 break;
10020 case 2: /* XY00XY00 */
10021 imm |= imm << 16;
10022 imm <<= 8;
10023 break;
10024 case 3: /* XYXYXYXY */
10025 imm |= imm << 16;
10026 imm |= imm << 8;
10027 break;
10028 default: /* Rotated constant. */
10029 shift = (shift << 1) | (imm >> 7);
10030 imm |= 0x80;
10031 imm = imm << (32 - shift);
10032 shifter_out = 1;
10033 break;
b5ff1b31 10034 }
7d1b0095 10035 tmp2 = tcg_temp_new_i32();
3174f8e9 10036 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 10037 rn = (insn >> 16) & 0xf;
3174f8e9 10038 if (rn == 15) {
7d1b0095 10039 tmp = tcg_temp_new_i32();
3174f8e9
FN
10040 tcg_gen_movi_i32(tmp, 0);
10041 } else {
10042 tmp = load_reg(s, rn);
10043 }
9ee6e8bb
PB
10044 op = (insn >> 21) & 0xf;
10045 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 10046 shifter_out, tmp, tmp2))
9ee6e8bb 10047 goto illegal_op;
7d1b0095 10048 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10049 rd = (insn >> 8) & 0xf;
10050 if (rd != 15) {
3174f8e9
FN
10051 store_reg(s, rd, tmp);
10052 } else {
7d1b0095 10053 tcg_temp_free_i32(tmp);
2c0262af 10054 }
2c0262af 10055 }
9ee6e8bb
PB
10056 }
10057 break;
10058 case 12: /* Load/store single data item. */
10059 {
10060 int postinc = 0;
10061 int writeback = 0;
a99caa48 10062 int memidx;
9ee6e8bb
PB
10063 if ((insn & 0x01100000) == 0x01000000) {
10064 if (disas_neon_ls_insn(env, s, insn))
c1713132 10065 goto illegal_op;
9ee6e8bb
PB
10066 break;
10067 }
a2fdc890
PM
10068 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10069 if (rs == 15) {
10070 if (!(insn & (1 << 20))) {
10071 goto illegal_op;
10072 }
10073 if (op != 2) {
10074 /* Byte or halfword load space with dest == r15 : memory hints.
10075 * Catch them early so we don't emit pointless addressing code.
10076 * This space is a mix of:
10077 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10078 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10079 * cores)
10080 * unallocated hints, which must be treated as NOPs
10081 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10082 * which is easiest for the decoding logic
10083 * Some space which must UNDEF
10084 */
10085 int op1 = (insn >> 23) & 3;
10086 int op2 = (insn >> 6) & 0x3f;
10087 if (op & 2) {
10088 goto illegal_op;
10089 }
10090 if (rn == 15) {
02afbf64
PM
10091 /* UNPREDICTABLE, unallocated hint or
10092 * PLD/PLDW/PLI (literal)
10093 */
a2fdc890
PM
10094 return 0;
10095 }
10096 if (op1 & 1) {
02afbf64 10097 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10098 }
10099 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 10100 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10101 }
10102 /* UNDEF space, or an UNPREDICTABLE */
10103 return 1;
10104 }
10105 }
a99caa48 10106 memidx = get_mem_index(s);
9ee6e8bb 10107 if (rn == 15) {
7d1b0095 10108 addr = tcg_temp_new_i32();
9ee6e8bb
PB
10109 /* PC relative. */
10110 /* s->pc has already been incremented by 4. */
10111 imm = s->pc & 0xfffffffc;
10112 if (insn & (1 << 23))
10113 imm += insn & 0xfff;
10114 else
10115 imm -= insn & 0xfff;
b0109805 10116 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 10117 } else {
b0109805 10118 addr = load_reg(s, rn);
9ee6e8bb
PB
10119 if (insn & (1 << 23)) {
10120 /* Positive offset. */
10121 imm = insn & 0xfff;
b0109805 10122 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 10123 } else {
9ee6e8bb 10124 imm = insn & 0xff;
2a0308c5
PM
10125 switch ((insn >> 8) & 0xf) {
10126 case 0x0: /* Shifted Register. */
9ee6e8bb 10127 shift = (insn >> 4) & 0xf;
2a0308c5
PM
10128 if (shift > 3) {
10129 tcg_temp_free_i32(addr);
18c9b560 10130 goto illegal_op;
2a0308c5 10131 }
b26eefb6 10132 tmp = load_reg(s, rm);
9ee6e8bb 10133 if (shift)
b26eefb6 10134 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 10135 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10136 tcg_temp_free_i32(tmp);
9ee6e8bb 10137 break;
2a0308c5 10138 case 0xc: /* Negative offset. */
b0109805 10139 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 10140 break;
2a0308c5 10141 case 0xe: /* User privilege. */
b0109805 10142 tcg_gen_addi_i32(addr, addr, imm);
a99caa48 10143 memidx = MMU_USER_IDX;
9ee6e8bb 10144 break;
2a0308c5 10145 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
10146 imm = -imm;
10147 /* Fall through. */
2a0308c5 10148 case 0xb: /* Post-increment. */
9ee6e8bb
PB
10149 postinc = 1;
10150 writeback = 1;
10151 break;
2a0308c5 10152 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
10153 imm = -imm;
10154 /* Fall through. */
2a0308c5 10155 case 0xf: /* Pre-increment. */
b0109805 10156 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
10157 writeback = 1;
10158 break;
10159 default:
2a0308c5 10160 tcg_temp_free_i32(addr);
b7bcbe95 10161 goto illegal_op;
9ee6e8bb
PB
10162 }
10163 }
10164 }
9ee6e8bb
PB
10165 if (insn & (1 << 20)) {
10166 /* Load. */
5a839c0d 10167 tmp = tcg_temp_new_i32();
a2fdc890 10168 switch (op) {
5a839c0d 10169 case 0:
a99caa48 10170 gen_aa32_ld8u(tmp, addr, memidx);
5a839c0d
PM
10171 break;
10172 case 4:
a99caa48 10173 gen_aa32_ld8s(tmp, addr, memidx);
5a839c0d
PM
10174 break;
10175 case 1:
a99caa48 10176 gen_aa32_ld16u(tmp, addr, memidx);
5a839c0d
PM
10177 break;
10178 case 5:
a99caa48 10179 gen_aa32_ld16s(tmp, addr, memidx);
5a839c0d
PM
10180 break;
10181 case 2:
a99caa48 10182 gen_aa32_ld32u(tmp, addr, memidx);
5a839c0d 10183 break;
2a0308c5 10184 default:
5a839c0d 10185 tcg_temp_free_i32(tmp);
2a0308c5
PM
10186 tcg_temp_free_i32(addr);
10187 goto illegal_op;
a2fdc890
PM
10188 }
10189 if (rs == 15) {
10190 gen_bx(s, tmp);
9ee6e8bb 10191 } else {
a2fdc890 10192 store_reg(s, rs, tmp);
9ee6e8bb
PB
10193 }
10194 } else {
10195 /* Store. */
b0109805 10196 tmp = load_reg(s, rs);
9ee6e8bb 10197 switch (op) {
5a839c0d 10198 case 0:
a99caa48 10199 gen_aa32_st8(tmp, addr, memidx);
5a839c0d
PM
10200 break;
10201 case 1:
a99caa48 10202 gen_aa32_st16(tmp, addr, memidx);
5a839c0d
PM
10203 break;
10204 case 2:
a99caa48 10205 gen_aa32_st32(tmp, addr, memidx);
5a839c0d 10206 break;
2a0308c5 10207 default:
5a839c0d 10208 tcg_temp_free_i32(tmp);
2a0308c5
PM
10209 tcg_temp_free_i32(addr);
10210 goto illegal_op;
b7bcbe95 10211 }
5a839c0d 10212 tcg_temp_free_i32(tmp);
2c0262af 10213 }
9ee6e8bb 10214 if (postinc)
b0109805
PB
10215 tcg_gen_addi_i32(addr, addr, imm);
10216 if (writeback) {
10217 store_reg(s, rn, addr);
10218 } else {
7d1b0095 10219 tcg_temp_free_i32(addr);
b0109805 10220 }
9ee6e8bb
PB
10221 }
10222 break;
10223 default:
10224 goto illegal_op;
2c0262af 10225 }
9ee6e8bb
PB
10226 return 0;
10227illegal_op:
10228 return 1;
2c0262af
FB
10229}
10230
0ecb72a5 10231static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
10232{
10233 uint32_t val, insn, op, rm, rn, rd, shift, cond;
10234 int32_t offset;
10235 int i;
39d5492a
PM
10236 TCGv_i32 tmp;
10237 TCGv_i32 tmp2;
10238 TCGv_i32 addr;
99c475ab 10239
9ee6e8bb
PB
10240 if (s->condexec_mask) {
10241 cond = s->condexec_cond;
bedd2912
JB
10242 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
10243 s->condlabel = gen_new_label();
39fb730a 10244 arm_gen_test_cc(cond ^ 1, s->condlabel);
bedd2912
JB
10245 s->condjmp = 1;
10246 }
9ee6e8bb
PB
10247 }
10248
d31dd73e 10249 insn = arm_lduw_code(env, s->pc, s->bswap_code);
99c475ab 10250 s->pc += 2;
b5ff1b31 10251
99c475ab
FB
10252 switch (insn >> 12) {
10253 case 0: case 1:
396e467c 10254
99c475ab
FB
10255 rd = insn & 7;
10256 op = (insn >> 11) & 3;
10257 if (op == 3) {
10258 /* add/subtract */
10259 rn = (insn >> 3) & 7;
396e467c 10260 tmp = load_reg(s, rn);
99c475ab
FB
10261 if (insn & (1 << 10)) {
10262 /* immediate */
7d1b0095 10263 tmp2 = tcg_temp_new_i32();
396e467c 10264 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
10265 } else {
10266 /* reg */
10267 rm = (insn >> 6) & 7;
396e467c 10268 tmp2 = load_reg(s, rm);
99c475ab 10269 }
9ee6e8bb
PB
10270 if (insn & (1 << 9)) {
10271 if (s->condexec_mask)
396e467c 10272 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10273 else
72485ec4 10274 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
10275 } else {
10276 if (s->condexec_mask)
396e467c 10277 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 10278 else
72485ec4 10279 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 10280 }
7d1b0095 10281 tcg_temp_free_i32(tmp2);
396e467c 10282 store_reg(s, rd, tmp);
99c475ab
FB
10283 } else {
10284 /* shift immediate */
10285 rm = (insn >> 3) & 7;
10286 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
10287 tmp = load_reg(s, rm);
10288 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10289 if (!s->condexec_mask)
10290 gen_logic_CC(tmp);
10291 store_reg(s, rd, tmp);
99c475ab
FB
10292 }
10293 break;
10294 case 2: case 3:
10295 /* arithmetic large immediate */
10296 op = (insn >> 11) & 3;
10297 rd = (insn >> 8) & 0x7;
396e467c 10298 if (op == 0) { /* mov */
7d1b0095 10299 tmp = tcg_temp_new_i32();
396e467c 10300 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 10301 if (!s->condexec_mask)
396e467c
FN
10302 gen_logic_CC(tmp);
10303 store_reg(s, rd, tmp);
10304 } else {
10305 tmp = load_reg(s, rd);
7d1b0095 10306 tmp2 = tcg_temp_new_i32();
396e467c
FN
10307 tcg_gen_movi_i32(tmp2, insn & 0xff);
10308 switch (op) {
10309 case 1: /* cmp */
72485ec4 10310 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10311 tcg_temp_free_i32(tmp);
10312 tcg_temp_free_i32(tmp2);
396e467c
FN
10313 break;
10314 case 2: /* add */
10315 if (s->condexec_mask)
10316 tcg_gen_add_i32(tmp, tmp, tmp2);
10317 else
72485ec4 10318 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 10319 tcg_temp_free_i32(tmp2);
396e467c
FN
10320 store_reg(s, rd, tmp);
10321 break;
10322 case 3: /* sub */
10323 if (s->condexec_mask)
10324 tcg_gen_sub_i32(tmp, tmp, tmp2);
10325 else
72485ec4 10326 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 10327 tcg_temp_free_i32(tmp2);
396e467c
FN
10328 store_reg(s, rd, tmp);
10329 break;
10330 }
99c475ab 10331 }
99c475ab
FB
10332 break;
10333 case 4:
10334 if (insn & (1 << 11)) {
10335 rd = (insn >> 8) & 7;
5899f386
FB
10336 /* load pc-relative. Bit 1 of PC is ignored. */
10337 val = s->pc + 2 + ((insn & 0xff) * 4);
10338 val &= ~(uint32_t)2;
7d1b0095 10339 addr = tcg_temp_new_i32();
b0109805 10340 tcg_gen_movi_i32(addr, val);
c40c8556 10341 tmp = tcg_temp_new_i32();
6ce2faf4 10342 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
7d1b0095 10343 tcg_temp_free_i32(addr);
b0109805 10344 store_reg(s, rd, tmp);
99c475ab
FB
10345 break;
10346 }
10347 if (insn & (1 << 10)) {
10348 /* data processing extended or blx */
10349 rd = (insn & 7) | ((insn >> 4) & 8);
10350 rm = (insn >> 3) & 0xf;
10351 op = (insn >> 8) & 3;
10352 switch (op) {
10353 case 0: /* add */
396e467c
FN
10354 tmp = load_reg(s, rd);
10355 tmp2 = load_reg(s, rm);
10356 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10357 tcg_temp_free_i32(tmp2);
396e467c 10358 store_reg(s, rd, tmp);
99c475ab
FB
10359 break;
10360 case 1: /* cmp */
396e467c
FN
10361 tmp = load_reg(s, rd);
10362 tmp2 = load_reg(s, rm);
72485ec4 10363 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10364 tcg_temp_free_i32(tmp2);
10365 tcg_temp_free_i32(tmp);
99c475ab
FB
10366 break;
10367 case 2: /* mov/cpy */
396e467c
FN
10368 tmp = load_reg(s, rm);
10369 store_reg(s, rd, tmp);
99c475ab
FB
10370 break;
10371 case 3:/* branch [and link] exchange thumb register */
b0109805 10372 tmp = load_reg(s, rm);
99c475ab 10373 if (insn & (1 << 7)) {
be5e7a76 10374 ARCH(5);
99c475ab 10375 val = (uint32_t)s->pc | 1;
7d1b0095 10376 tmp2 = tcg_temp_new_i32();
b0109805
PB
10377 tcg_gen_movi_i32(tmp2, val);
10378 store_reg(s, 14, tmp2);
99c475ab 10379 }
be5e7a76 10380 /* already thumb, no need to check */
d9ba4830 10381 gen_bx(s, tmp);
99c475ab
FB
10382 break;
10383 }
10384 break;
10385 }
10386
10387 /* data processing register */
10388 rd = insn & 7;
10389 rm = (insn >> 3) & 7;
10390 op = (insn >> 6) & 0xf;
10391 if (op == 2 || op == 3 || op == 4 || op == 7) {
10392 /* the shift/rotate ops want the operands backwards */
10393 val = rm;
10394 rm = rd;
10395 rd = val;
10396 val = 1;
10397 } else {
10398 val = 0;
10399 }
10400
396e467c 10401 if (op == 9) { /* neg */
7d1b0095 10402 tmp = tcg_temp_new_i32();
396e467c
FN
10403 tcg_gen_movi_i32(tmp, 0);
10404 } else if (op != 0xf) { /* mvn doesn't read its first operand */
10405 tmp = load_reg(s, rd);
10406 } else {
39d5492a 10407 TCGV_UNUSED_I32(tmp);
396e467c 10408 }
99c475ab 10409
396e467c 10410 tmp2 = load_reg(s, rm);
5899f386 10411 switch (op) {
99c475ab 10412 case 0x0: /* and */
396e467c 10413 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 10414 if (!s->condexec_mask)
396e467c 10415 gen_logic_CC(tmp);
99c475ab
FB
10416 break;
10417 case 0x1: /* eor */
396e467c 10418 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 10419 if (!s->condexec_mask)
396e467c 10420 gen_logic_CC(tmp);
99c475ab
FB
10421 break;
10422 case 0x2: /* lsl */
9ee6e8bb 10423 if (s->condexec_mask) {
365af80e 10424 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 10425 } else {
9ef39277 10426 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10427 gen_logic_CC(tmp2);
9ee6e8bb 10428 }
99c475ab
FB
10429 break;
10430 case 0x3: /* lsr */
9ee6e8bb 10431 if (s->condexec_mask) {
365af80e 10432 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 10433 } else {
9ef39277 10434 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10435 gen_logic_CC(tmp2);
9ee6e8bb 10436 }
99c475ab
FB
10437 break;
10438 case 0x4: /* asr */
9ee6e8bb 10439 if (s->condexec_mask) {
365af80e 10440 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 10441 } else {
9ef39277 10442 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10443 gen_logic_CC(tmp2);
9ee6e8bb 10444 }
99c475ab
FB
10445 break;
10446 case 0x5: /* adc */
49b4c31e 10447 if (s->condexec_mask) {
396e467c 10448 gen_adc(tmp, tmp2);
49b4c31e
RH
10449 } else {
10450 gen_adc_CC(tmp, tmp, tmp2);
10451 }
99c475ab
FB
10452 break;
10453 case 0x6: /* sbc */
2de68a49 10454 if (s->condexec_mask) {
396e467c 10455 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
10456 } else {
10457 gen_sbc_CC(tmp, tmp, tmp2);
10458 }
99c475ab
FB
10459 break;
10460 case 0x7: /* ror */
9ee6e8bb 10461 if (s->condexec_mask) {
f669df27
AJ
10462 tcg_gen_andi_i32(tmp, tmp, 0x1f);
10463 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 10464 } else {
9ef39277 10465 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10466 gen_logic_CC(tmp2);
9ee6e8bb 10467 }
99c475ab
FB
10468 break;
10469 case 0x8: /* tst */
396e467c
FN
10470 tcg_gen_and_i32(tmp, tmp, tmp2);
10471 gen_logic_CC(tmp);
99c475ab 10472 rd = 16;
5899f386 10473 break;
99c475ab 10474 case 0x9: /* neg */
9ee6e8bb 10475 if (s->condexec_mask)
396e467c 10476 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 10477 else
72485ec4 10478 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
10479 break;
10480 case 0xa: /* cmp */
72485ec4 10481 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
10482 rd = 16;
10483 break;
10484 case 0xb: /* cmn */
72485ec4 10485 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
10486 rd = 16;
10487 break;
10488 case 0xc: /* orr */
396e467c 10489 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 10490 if (!s->condexec_mask)
396e467c 10491 gen_logic_CC(tmp);
99c475ab
FB
10492 break;
10493 case 0xd: /* mul */
7b2919a0 10494 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 10495 if (!s->condexec_mask)
396e467c 10496 gen_logic_CC(tmp);
99c475ab
FB
10497 break;
10498 case 0xe: /* bic */
f669df27 10499 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 10500 if (!s->condexec_mask)
396e467c 10501 gen_logic_CC(tmp);
99c475ab
FB
10502 break;
10503 case 0xf: /* mvn */
396e467c 10504 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 10505 if (!s->condexec_mask)
396e467c 10506 gen_logic_CC(tmp2);
99c475ab 10507 val = 1;
5899f386 10508 rm = rd;
99c475ab
FB
10509 break;
10510 }
10511 if (rd != 16) {
396e467c
FN
10512 if (val) {
10513 store_reg(s, rm, tmp2);
10514 if (op != 0xf)
7d1b0095 10515 tcg_temp_free_i32(tmp);
396e467c
FN
10516 } else {
10517 store_reg(s, rd, tmp);
7d1b0095 10518 tcg_temp_free_i32(tmp2);
396e467c
FN
10519 }
10520 } else {
7d1b0095
PM
10521 tcg_temp_free_i32(tmp);
10522 tcg_temp_free_i32(tmp2);
99c475ab
FB
10523 }
10524 break;
10525
10526 case 5:
10527 /* load/store register offset. */
10528 rd = insn & 7;
10529 rn = (insn >> 3) & 7;
10530 rm = (insn >> 6) & 7;
10531 op = (insn >> 9) & 7;
b0109805 10532 addr = load_reg(s, rn);
b26eefb6 10533 tmp = load_reg(s, rm);
b0109805 10534 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10535 tcg_temp_free_i32(tmp);
99c475ab 10536
c40c8556 10537 if (op < 3) { /* store */
b0109805 10538 tmp = load_reg(s, rd);
c40c8556
PM
10539 } else {
10540 tmp = tcg_temp_new_i32();
10541 }
99c475ab
FB
10542
10543 switch (op) {
10544 case 0: /* str */
6ce2faf4 10545 gen_aa32_st32(tmp, addr, get_mem_index(s));
99c475ab
FB
10546 break;
10547 case 1: /* strh */
6ce2faf4 10548 gen_aa32_st16(tmp, addr, get_mem_index(s));
99c475ab
FB
10549 break;
10550 case 2: /* strb */
6ce2faf4 10551 gen_aa32_st8(tmp, addr, get_mem_index(s));
99c475ab
FB
10552 break;
10553 case 3: /* ldrsb */
6ce2faf4 10554 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
99c475ab
FB
10555 break;
10556 case 4: /* ldr */
6ce2faf4 10557 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
99c475ab
FB
10558 break;
10559 case 5: /* ldrh */
6ce2faf4 10560 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
99c475ab
FB
10561 break;
10562 case 6: /* ldrb */
6ce2faf4 10563 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
99c475ab
FB
10564 break;
10565 case 7: /* ldrsh */
6ce2faf4 10566 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
99c475ab
FB
10567 break;
10568 }
c40c8556 10569 if (op >= 3) { /* load */
b0109805 10570 store_reg(s, rd, tmp);
c40c8556
PM
10571 } else {
10572 tcg_temp_free_i32(tmp);
10573 }
7d1b0095 10574 tcg_temp_free_i32(addr);
99c475ab
FB
10575 break;
10576
10577 case 6:
10578 /* load/store word immediate offset */
10579 rd = insn & 7;
10580 rn = (insn >> 3) & 7;
b0109805 10581 addr = load_reg(s, rn);
99c475ab 10582 val = (insn >> 4) & 0x7c;
b0109805 10583 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10584
10585 if (insn & (1 << 11)) {
10586 /* load */
c40c8556 10587 tmp = tcg_temp_new_i32();
6ce2faf4 10588 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 10589 store_reg(s, rd, tmp);
99c475ab
FB
10590 } else {
10591 /* store */
b0109805 10592 tmp = load_reg(s, rd);
6ce2faf4 10593 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10594 tcg_temp_free_i32(tmp);
99c475ab 10595 }
7d1b0095 10596 tcg_temp_free_i32(addr);
99c475ab
FB
10597 break;
10598
10599 case 7:
10600 /* load/store byte immediate offset */
10601 rd = insn & 7;
10602 rn = (insn >> 3) & 7;
b0109805 10603 addr = load_reg(s, rn);
99c475ab 10604 val = (insn >> 6) & 0x1f;
b0109805 10605 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10606
10607 if (insn & (1 << 11)) {
10608 /* load */
c40c8556 10609 tmp = tcg_temp_new_i32();
6ce2faf4 10610 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
b0109805 10611 store_reg(s, rd, tmp);
99c475ab
FB
10612 } else {
10613 /* store */
b0109805 10614 tmp = load_reg(s, rd);
6ce2faf4 10615 gen_aa32_st8(tmp, addr, get_mem_index(s));
c40c8556 10616 tcg_temp_free_i32(tmp);
99c475ab 10617 }
7d1b0095 10618 tcg_temp_free_i32(addr);
99c475ab
FB
10619 break;
10620
10621 case 8:
10622 /* load/store halfword immediate offset */
10623 rd = insn & 7;
10624 rn = (insn >> 3) & 7;
b0109805 10625 addr = load_reg(s, rn);
99c475ab 10626 val = (insn >> 5) & 0x3e;
b0109805 10627 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10628
10629 if (insn & (1 << 11)) {
10630 /* load */
c40c8556 10631 tmp = tcg_temp_new_i32();
6ce2faf4 10632 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
b0109805 10633 store_reg(s, rd, tmp);
99c475ab
FB
10634 } else {
10635 /* store */
b0109805 10636 tmp = load_reg(s, rd);
6ce2faf4 10637 gen_aa32_st16(tmp, addr, get_mem_index(s));
c40c8556 10638 tcg_temp_free_i32(tmp);
99c475ab 10639 }
7d1b0095 10640 tcg_temp_free_i32(addr);
99c475ab
FB
10641 break;
10642
10643 case 9:
10644 /* load/store from stack */
10645 rd = (insn >> 8) & 7;
b0109805 10646 addr = load_reg(s, 13);
99c475ab 10647 val = (insn & 0xff) * 4;
b0109805 10648 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10649
10650 if (insn & (1 << 11)) {
10651 /* load */
c40c8556 10652 tmp = tcg_temp_new_i32();
6ce2faf4 10653 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 10654 store_reg(s, rd, tmp);
99c475ab
FB
10655 } else {
10656 /* store */
b0109805 10657 tmp = load_reg(s, rd);
6ce2faf4 10658 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10659 tcg_temp_free_i32(tmp);
99c475ab 10660 }
7d1b0095 10661 tcg_temp_free_i32(addr);
99c475ab
FB
10662 break;
10663
10664 case 10:
10665 /* add to high reg */
10666 rd = (insn >> 8) & 7;
5899f386
FB
10667 if (insn & (1 << 11)) {
10668 /* SP */
5e3f878a 10669 tmp = load_reg(s, 13);
5899f386
FB
10670 } else {
10671 /* PC. bit 1 is ignored. */
7d1b0095 10672 tmp = tcg_temp_new_i32();
5e3f878a 10673 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 10674 }
99c475ab 10675 val = (insn & 0xff) * 4;
5e3f878a
PB
10676 tcg_gen_addi_i32(tmp, tmp, val);
10677 store_reg(s, rd, tmp);
99c475ab
FB
10678 break;
10679
10680 case 11:
10681 /* misc */
10682 op = (insn >> 8) & 0xf;
10683 switch (op) {
10684 case 0:
10685 /* adjust stack pointer */
b26eefb6 10686 tmp = load_reg(s, 13);
99c475ab
FB
10687 val = (insn & 0x7f) * 4;
10688 if (insn & (1 << 7))
6a0d8a1d 10689 val = -(int32_t)val;
b26eefb6
PB
10690 tcg_gen_addi_i32(tmp, tmp, val);
10691 store_reg(s, 13, tmp);
99c475ab
FB
10692 break;
10693
9ee6e8bb
PB
10694 case 2: /* sign/zero extend. */
10695 ARCH(6);
10696 rd = insn & 7;
10697 rm = (insn >> 3) & 7;
b0109805 10698 tmp = load_reg(s, rm);
9ee6e8bb 10699 switch ((insn >> 6) & 3) {
b0109805
PB
10700 case 0: gen_sxth(tmp); break;
10701 case 1: gen_sxtb(tmp); break;
10702 case 2: gen_uxth(tmp); break;
10703 case 3: gen_uxtb(tmp); break;
9ee6e8bb 10704 }
b0109805 10705 store_reg(s, rd, tmp);
9ee6e8bb 10706 break;
99c475ab
FB
10707 case 4: case 5: case 0xc: case 0xd:
10708 /* push/pop */
b0109805 10709 addr = load_reg(s, 13);
5899f386
FB
10710 if (insn & (1 << 8))
10711 offset = 4;
99c475ab 10712 else
5899f386
FB
10713 offset = 0;
10714 for (i = 0; i < 8; i++) {
10715 if (insn & (1 << i))
10716 offset += 4;
10717 }
10718 if ((insn & (1 << 11)) == 0) {
b0109805 10719 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 10720 }
99c475ab
FB
10721 for (i = 0; i < 8; i++) {
10722 if (insn & (1 << i)) {
10723 if (insn & (1 << 11)) {
10724 /* pop */
c40c8556 10725 tmp = tcg_temp_new_i32();
6ce2faf4 10726 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 10727 store_reg(s, i, tmp);
99c475ab
FB
10728 } else {
10729 /* push */
b0109805 10730 tmp = load_reg(s, i);
6ce2faf4 10731 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10732 tcg_temp_free_i32(tmp);
99c475ab 10733 }
5899f386 10734 /* advance to the next address. */
b0109805 10735 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
10736 }
10737 }
39d5492a 10738 TCGV_UNUSED_I32(tmp);
99c475ab
FB
10739 if (insn & (1 << 8)) {
10740 if (insn & (1 << 11)) {
10741 /* pop pc */
c40c8556 10742 tmp = tcg_temp_new_i32();
6ce2faf4 10743 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
99c475ab
FB
10744 /* don't set the pc until the rest of the instruction
10745 has completed */
10746 } else {
10747 /* push lr */
b0109805 10748 tmp = load_reg(s, 14);
6ce2faf4 10749 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10750 tcg_temp_free_i32(tmp);
99c475ab 10751 }
b0109805 10752 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 10753 }
5899f386 10754 if ((insn & (1 << 11)) == 0) {
b0109805 10755 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 10756 }
99c475ab 10757 /* write back the new stack pointer */
b0109805 10758 store_reg(s, 13, addr);
99c475ab 10759 /* set the new PC value */
be5e7a76
DES
10760 if ((insn & 0x0900) == 0x0900) {
10761 store_reg_from_load(env, s, 15, tmp);
10762 }
99c475ab
FB
10763 break;
10764
9ee6e8bb
PB
10765 case 1: case 3: case 9: case 11: /* czb */
10766 rm = insn & 7;
d9ba4830 10767 tmp = load_reg(s, rm);
9ee6e8bb
PB
10768 s->condlabel = gen_new_label();
10769 s->condjmp = 1;
10770 if (insn & (1 << 11))
cb63669a 10771 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 10772 else
cb63669a 10773 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 10774 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10775 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
10776 val = (uint32_t)s->pc + 2;
10777 val += offset;
10778 gen_jmp(s, val);
10779 break;
10780
10781 case 15: /* IT, nop-hint. */
10782 if ((insn & 0xf) == 0) {
10783 gen_nop_hint(s, (insn >> 4) & 0xf);
10784 break;
10785 }
10786 /* If Then. */
10787 s->condexec_cond = (insn >> 4) & 0xe;
10788 s->condexec_mask = insn & 0x1f;
10789 /* No actual code generated for this insn, just setup state. */
10790 break;
10791
06c949e6 10792 case 0xe: /* bkpt */
d4a2dc67
PM
10793 {
10794 int imm8 = extract32(insn, 0, 8);
be5e7a76 10795 ARCH(5);
d4a2dc67 10796 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true));
06c949e6 10797 break;
d4a2dc67 10798 }
06c949e6 10799
9ee6e8bb
PB
10800 case 0xa: /* rev */
10801 ARCH(6);
10802 rn = (insn >> 3) & 0x7;
10803 rd = insn & 0x7;
b0109805 10804 tmp = load_reg(s, rn);
9ee6e8bb 10805 switch ((insn >> 6) & 3) {
66896cb8 10806 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
10807 case 1: gen_rev16(tmp); break;
10808 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
10809 default: goto illegal_op;
10810 }
b0109805 10811 store_reg(s, rd, tmp);
9ee6e8bb
PB
10812 break;
10813
d9e028c1
PM
10814 case 6:
10815 switch ((insn >> 5) & 7) {
10816 case 2:
10817 /* setend */
10818 ARCH(6);
10962fd5
PM
10819 if (((insn >> 3) & 1) != s->bswap_code) {
10820 /* Dynamic endianness switching not implemented. */
e0c270d9 10821 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
d9e028c1
PM
10822 goto illegal_op;
10823 }
9ee6e8bb 10824 break;
d9e028c1
PM
10825 case 3:
10826 /* cps */
10827 ARCH(6);
10828 if (IS_USER(s)) {
10829 break;
8984bd2e 10830 }
d9e028c1
PM
10831 if (IS_M(env)) {
10832 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
10833 /* FAULTMASK */
10834 if (insn & 1) {
10835 addr = tcg_const_i32(19);
10836 gen_helper_v7m_msr(cpu_env, addr, tmp);
10837 tcg_temp_free_i32(addr);
10838 }
10839 /* PRIMASK */
10840 if (insn & 2) {
10841 addr = tcg_const_i32(16);
10842 gen_helper_v7m_msr(cpu_env, addr, tmp);
10843 tcg_temp_free_i32(addr);
10844 }
10845 tcg_temp_free_i32(tmp);
10846 gen_lookup_tb(s);
10847 } else {
10848 if (insn & (1 << 4)) {
10849 shift = CPSR_A | CPSR_I | CPSR_F;
10850 } else {
10851 shift = 0;
10852 }
10853 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 10854 }
d9e028c1
PM
10855 break;
10856 default:
10857 goto undef;
9ee6e8bb
PB
10858 }
10859 break;
10860
99c475ab
FB
10861 default:
10862 goto undef;
10863 }
10864 break;
10865
10866 case 12:
a7d3970d 10867 {
99c475ab 10868 /* load/store multiple */
39d5492a
PM
10869 TCGv_i32 loaded_var;
10870 TCGV_UNUSED_I32(loaded_var);
99c475ab 10871 rn = (insn >> 8) & 0x7;
b0109805 10872 addr = load_reg(s, rn);
99c475ab
FB
10873 for (i = 0; i < 8; i++) {
10874 if (insn & (1 << i)) {
99c475ab
FB
10875 if (insn & (1 << 11)) {
10876 /* load */
c40c8556 10877 tmp = tcg_temp_new_i32();
6ce2faf4 10878 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
a7d3970d
PM
10879 if (i == rn) {
10880 loaded_var = tmp;
10881 } else {
10882 store_reg(s, i, tmp);
10883 }
99c475ab
FB
10884 } else {
10885 /* store */
b0109805 10886 tmp = load_reg(s, i);
6ce2faf4 10887 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10888 tcg_temp_free_i32(tmp);
99c475ab 10889 }
5899f386 10890 /* advance to the next address */
b0109805 10891 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
10892 }
10893 }
b0109805 10894 if ((insn & (1 << rn)) == 0) {
a7d3970d 10895 /* base reg not in list: base register writeback */
b0109805
PB
10896 store_reg(s, rn, addr);
10897 } else {
a7d3970d
PM
10898 /* base reg in list: if load, complete it now */
10899 if (insn & (1 << 11)) {
10900 store_reg(s, rn, loaded_var);
10901 }
7d1b0095 10902 tcg_temp_free_i32(addr);
b0109805 10903 }
99c475ab 10904 break;
a7d3970d 10905 }
99c475ab
FB
10906 case 13:
10907 /* conditional branch or swi */
10908 cond = (insn >> 8) & 0xf;
10909 if (cond == 0xe)
10910 goto undef;
10911
10912 if (cond == 0xf) {
10913 /* swi */
eaed129d 10914 gen_set_pc_im(s, s->pc);
d4a2dc67 10915 s->svc_imm = extract32(insn, 0, 8);
9ee6e8bb 10916 s->is_jmp = DISAS_SWI;
99c475ab
FB
10917 break;
10918 }
10919 /* generate a conditional jump to next instruction */
e50e6a20 10920 s->condlabel = gen_new_label();
39fb730a 10921 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 10922 s->condjmp = 1;
99c475ab
FB
10923
10924 /* jump to the offset */
5899f386 10925 val = (uint32_t)s->pc + 2;
99c475ab 10926 offset = ((int32_t)insn << 24) >> 24;
5899f386 10927 val += offset << 1;
8aaca4c0 10928 gen_jmp(s, val);
99c475ab
FB
10929 break;
10930
10931 case 14:
358bf29e 10932 if (insn & (1 << 11)) {
9ee6e8bb
PB
10933 if (disas_thumb2_insn(env, s, insn))
10934 goto undef32;
358bf29e
PB
10935 break;
10936 }
9ee6e8bb 10937 /* unconditional branch */
99c475ab
FB
10938 val = (uint32_t)s->pc;
10939 offset = ((int32_t)insn << 21) >> 21;
10940 val += (offset << 1) + 2;
8aaca4c0 10941 gen_jmp(s, val);
99c475ab
FB
10942 break;
10943
10944 case 15:
9ee6e8bb 10945 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 10946 goto undef32;
9ee6e8bb 10947 break;
99c475ab
FB
10948 }
10949 return;
9ee6e8bb 10950undef32:
d4a2dc67 10951 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
9ee6e8bb
PB
10952 return;
10953illegal_op:
99c475ab 10954undef:
d4a2dc67 10955 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized());
99c475ab
FB
10956}
10957
2c0262af
FB
10958/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
10959 basic block 'tb'. If search_pc is TRUE, also generate PC
10960 information for each intermediate instruction. */
5639c3f2 10961static inline void gen_intermediate_code_internal(ARMCPU *cpu,
2cfc5f17 10962 TranslationBlock *tb,
5639c3f2 10963 bool search_pc)
2c0262af 10964{
ed2803da 10965 CPUState *cs = CPU(cpu);
5639c3f2 10966 CPUARMState *env = &cpu->env;
2c0262af 10967 DisasContext dc1, *dc = &dc1;
a1d1bb31 10968 CPUBreakpoint *bp;
2c0262af
FB
10969 uint16_t *gen_opc_end;
10970 int j, lj;
0fa85d43 10971 target_ulong pc_start;
0a2461fa 10972 target_ulong next_page_start;
2e70f6ef
PB
10973 int num_insns;
10974 int max_insns;
3b46e624 10975
2c0262af 10976 /* generate intermediate code */
40f860cd
PM
10977
10978 /* The A64 decoder has its own top level loop, because it doesn't need
10979 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
10980 */
10981 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
10982 gen_intermediate_code_internal_a64(cpu, tb, search_pc);
10983 return;
10984 }
10985
0fa85d43 10986 pc_start = tb->pc;
3b46e624 10987
2c0262af
FB
10988 dc->tb = tb;
10989
92414b31 10990 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
10991
10992 dc->is_jmp = DISAS_NEXT;
10993 dc->pc = pc_start;
ed2803da 10994 dc->singlestep_enabled = cs->singlestep_enabled;
e50e6a20 10995 dc->condjmp = 0;
3926cc84 10996
40f860cd
PM
10997 dc->aarch64 = 0;
10998 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
10999 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
11000 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
11001 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
3926cc84 11002#if !defined(CONFIG_USER_ONLY)
40f860cd 11003 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
3926cc84 11004#endif
2c7ffc41 11005 dc->cpacr_fpen = ARM_TBFLAG_CPACR_FPEN(tb->flags);
40f860cd
PM
11006 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
11007 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
11008 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
c0f4af17 11009 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
60322b39
PM
11010 dc->cp_regs = cpu->cp_regs;
11011 dc->current_pl = arm_current_pl(env);
a984e42c 11012 dc->features = env->features;
40f860cd 11013
50225ad0
PM
11014 /* Single step state. The code-generation logic here is:
11015 * SS_ACTIVE == 0:
11016 * generate code with no special handling for single-stepping (except
11017 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11018 * this happens anyway because those changes are all system register or
11019 * PSTATE writes).
11020 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11021 * emit code for one insn
11022 * emit code to clear PSTATE.SS
11023 * emit code to generate software step exception for completed step
11024 * end TB (as usual for having generated an exception)
11025 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11026 * emit code to generate a software step exception
11027 * end the TB
11028 */
11029 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
11030 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
11031 dc->is_ldex = false;
11032 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11033
a7812ae4
PB
11034 cpu_F0s = tcg_temp_new_i32();
11035 cpu_F1s = tcg_temp_new_i32();
11036 cpu_F0d = tcg_temp_new_i64();
11037 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
11038 cpu_V0 = cpu_F0d;
11039 cpu_V1 = cpu_F1d;
e677137d 11040 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 11041 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 11042 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 11043 lj = -1;
2e70f6ef
PB
11044 num_insns = 0;
11045 max_insns = tb->cflags & CF_COUNT_MASK;
11046 if (max_insns == 0)
11047 max_insns = CF_COUNT_MASK;
11048
806f352d 11049 gen_tb_start();
e12ce78d 11050
3849902c
PM
11051 tcg_clear_temp_count();
11052
e12ce78d
PM
11053 /* A note on handling of the condexec (IT) bits:
11054 *
11055 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 11056 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 11057 * (1) if the condexec bits are not already zero then we write
0ecb72a5 11058 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
11059 * to do it at the end of the block. (For example if we don't do this
11060 * it's hard to identify whether we can safely skip writing condexec
11061 * at the end of the TB, which we definitely want to do for the case
11062 * where a TB doesn't do anything with the IT state at all.)
11063 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 11064 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
11065 * This is done both for leaving the TB at the end, and for leaving
11066 * it because of an exception we know will happen, which is done in
11067 * gen_exception_insn(). The latter is necessary because we need to
11068 * leave the TB with the PC/IT state just prior to execution of the
11069 * instruction which caused the exception.
11070 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 11071 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d
PM
11072 * This is handled in the same way as restoration of the
11073 * PC in these situations: we will be called again with search_pc=1
11074 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
11075 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
11076 * this to restore the condexec bits.
e12ce78d
PM
11077 *
11078 * Note that there are no instructions which can read the condexec
11079 * bits, and none which can write non-static values to them, so
0ecb72a5 11080 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
11081 * middle of a TB.
11082 */
11083
9ee6e8bb
PB
11084 /* Reset the conditional execution bits immediately. This avoids
11085 complications trying to do it at the end of the block. */
98eac7ca 11086 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 11087 {
39d5492a 11088 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 11089 tcg_gen_movi_i32(tmp, 0);
d9ba4830 11090 store_cpu_field(tmp, condexec_bits);
8f01245e 11091 }
2c0262af 11092 do {
fbb4a2e3
PB
11093#ifdef CONFIG_USER_ONLY
11094 /* Intercept jump to the magic kernel page. */
40f860cd 11095 if (dc->pc >= 0xffff0000) {
fbb4a2e3
PB
11096 /* We always get here via a jump, so know we are not in a
11097 conditional execution block. */
d4a2dc67 11098 gen_exception_internal(EXCP_KERNEL_TRAP);
fbb4a2e3
PB
11099 dc->is_jmp = DISAS_UPDATE;
11100 break;
11101 }
11102#else
9ee6e8bb
PB
11103 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
11104 /* We always get here via a jump, so know we are not in a
11105 conditional execution block. */
d4a2dc67 11106 gen_exception_internal(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
11107 dc->is_jmp = DISAS_UPDATE;
11108 break;
9ee6e8bb
PB
11109 }
11110#endif
11111
f0c3c505
AF
11112 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
11113 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
a1d1bb31 11114 if (bp->pc == dc->pc) {
d4a2dc67 11115 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
11116 /* Advance PC so that clearing the breakpoint will
11117 invalidate this TB. */
11118 dc->pc += 2;
11119 goto done_generating;
1fddef4b
FB
11120 }
11121 }
11122 }
2c0262af 11123 if (search_pc) {
92414b31 11124 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
11125 if (lj < j) {
11126 lj++;
11127 while (lj < j)
ab1103de 11128 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 11129 }
25983cad 11130 tcg_ctx.gen_opc_pc[lj] = dc->pc;
e12ce78d 11131 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
ab1103de 11132 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 11133 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 11134 }
e50e6a20 11135
2e70f6ef
PB
11136 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
11137 gen_io_start();
11138
fdefe51c 11139 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5642463a
PM
11140 tcg_gen_debug_insn_start(dc->pc);
11141 }
11142
50225ad0
PM
11143 if (dc->ss_active && !dc->pstate_ss) {
11144 /* Singlestep state is Active-pending.
11145 * If we're in this state at the start of a TB then either
11146 * a) we just took an exception to an EL which is being debugged
11147 * and this is the first insn in the exception handler
11148 * b) debug exceptions were masked and we just unmasked them
11149 * without changing EL (eg by clearing PSTATE.D)
11150 * In either case we're going to take a swstep exception in the
11151 * "did not step an insn" case, and so the syndrome ISV and EX
11152 * bits should be zero.
11153 */
11154 assert(num_insns == 0);
11155 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0));
11156 goto done_generating;
11157 }
11158
40f860cd 11159 if (dc->thumb) {
9ee6e8bb
PB
11160 disas_thumb_insn(env, dc);
11161 if (dc->condexec_mask) {
11162 dc->condexec_cond = (dc->condexec_cond & 0xe)
11163 | ((dc->condexec_mask >> 4) & 1);
11164 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
11165 if (dc->condexec_mask == 0) {
11166 dc->condexec_cond = 0;
11167 }
11168 }
11169 } else {
11170 disas_arm_insn(env, dc);
11171 }
e50e6a20
FB
11172
11173 if (dc->condjmp && !dc->is_jmp) {
11174 gen_set_label(dc->condlabel);
11175 dc->condjmp = 0;
11176 }
3849902c
PM
11177
11178 if (tcg_check_temp_count()) {
0a2461fa
AG
11179 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
11180 dc->pc);
3849902c
PM
11181 }
11182
aaf2d97d 11183 /* Translation stops when a conditional branch is encountered.
e50e6a20 11184 * Otherwise the subsequent code could get translated several times.
b5ff1b31 11185 * Also stop translation when a page boundary is reached. This
bf20dc07 11186 * ensures prefetch aborts occur at the right place. */
2e70f6ef 11187 num_insns ++;
efd7f486 11188 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
ed2803da 11189 !cs->singlestep_enabled &&
1b530a6d 11190 !singlestep &&
50225ad0 11191 !dc->ss_active &&
2e70f6ef
PB
11192 dc->pc < next_page_start &&
11193 num_insns < max_insns);
11194
11195 if (tb->cflags & CF_LAST_IO) {
11196 if (dc->condjmp) {
11197 /* FIXME: This can theoretically happen with self-modifying
11198 code. */
a47dddd7 11199 cpu_abort(cs, "IO on conditional branch instruction");
2e70f6ef
PB
11200 }
11201 gen_io_end();
11202 }
9ee6e8bb 11203
b5ff1b31 11204 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
11205 instruction was a conditional branch or trap, and the PC has
11206 already been written. */
50225ad0 11207 if (unlikely(cs->singlestep_enabled || dc->ss_active)) {
8aaca4c0 11208 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 11209 if (dc->condjmp) {
9ee6e8bb
PB
11210 gen_set_condexec(dc);
11211 if (dc->is_jmp == DISAS_SWI) {
50225ad0 11212 gen_ss_advance(dc);
d4a2dc67 11213 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
37e6456e
PM
11214 } else if (dc->is_jmp == DISAS_HVC) {
11215 gen_ss_advance(dc);
11216 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm));
11217 } else if (dc->is_jmp == DISAS_SMC) {
11218 gen_ss_advance(dc);
11219 gen_exception(EXCP_SMC, syn_aa32_smc());
50225ad0
PM
11220 } else if (dc->ss_active) {
11221 gen_step_complete_exception(dc);
9ee6e8bb 11222 } else {
d4a2dc67 11223 gen_exception_internal(EXCP_DEBUG);
9ee6e8bb 11224 }
e50e6a20
FB
11225 gen_set_label(dc->condlabel);
11226 }
11227 if (dc->condjmp || !dc->is_jmp) {
eaed129d 11228 gen_set_pc_im(dc, dc->pc);
e50e6a20 11229 dc->condjmp = 0;
8aaca4c0 11230 }
9ee6e8bb
PB
11231 gen_set_condexec(dc);
11232 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
50225ad0 11233 gen_ss_advance(dc);
d4a2dc67 11234 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
37e6456e
PM
11235 } else if (dc->is_jmp == DISAS_HVC && !dc->condjmp) {
11236 gen_ss_advance(dc);
11237 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm));
11238 } else if (dc->is_jmp == DISAS_SMC && !dc->condjmp) {
11239 gen_ss_advance(dc);
11240 gen_exception(EXCP_SMC, syn_aa32_smc());
50225ad0
PM
11241 } else if (dc->ss_active) {
11242 gen_step_complete_exception(dc);
9ee6e8bb
PB
11243 } else {
11244 /* FIXME: Single stepping a WFI insn will not halt
11245 the CPU. */
d4a2dc67 11246 gen_exception_internal(EXCP_DEBUG);
9ee6e8bb 11247 }
8aaca4c0 11248 } else {
9ee6e8bb
PB
11249 /* While branches must always occur at the end of an IT block,
11250 there are a few other things that can cause us to terminate
65626741 11251 the TB in the middle of an IT block:
9ee6e8bb
PB
11252 - Exception generating instructions (bkpt, swi, undefined).
11253 - Page boundaries.
11254 - Hardware watchpoints.
11255 Hardware breakpoints have already been handled and skip this code.
11256 */
11257 gen_set_condexec(dc);
8aaca4c0 11258 switch(dc->is_jmp) {
8aaca4c0 11259 case DISAS_NEXT:
6e256c93 11260 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
11261 break;
11262 default:
11263 case DISAS_JUMP:
11264 case DISAS_UPDATE:
11265 /* indicate that the hash table must be used to find the next TB */
57fec1fe 11266 tcg_gen_exit_tb(0);
8aaca4c0
FB
11267 break;
11268 case DISAS_TB_JUMP:
11269 /* nothing more to generate */
11270 break;
9ee6e8bb 11271 case DISAS_WFI:
1ce94f81 11272 gen_helper_wfi(cpu_env);
9ee6e8bb 11273 break;
72c1d3af
PM
11274 case DISAS_WFE:
11275 gen_helper_wfe(cpu_env);
11276 break;
9ee6e8bb 11277 case DISAS_SWI:
d4a2dc67 11278 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
9ee6e8bb 11279 break;
37e6456e
PM
11280 case DISAS_HVC:
11281 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm));
11282 break;
11283 case DISAS_SMC:
11284 gen_exception(EXCP_SMC, syn_aa32_smc());
11285 break;
8aaca4c0 11286 }
e50e6a20
FB
11287 if (dc->condjmp) {
11288 gen_set_label(dc->condlabel);
9ee6e8bb 11289 gen_set_condexec(dc);
6e256c93 11290 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
11291 dc->condjmp = 0;
11292 }
2c0262af 11293 }
2e70f6ef 11294
9ee6e8bb 11295done_generating:
806f352d 11296 gen_tb_end(tb, num_insns);
efd7f486 11297 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2c0262af
FB
11298
11299#ifdef DEBUG_DISAS
8fec2b8c 11300 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
11301 qemu_log("----------------\n");
11302 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 11303 log_target_disas(env, pc_start, dc->pc - pc_start,
d8fd2954 11304 dc->thumb | (dc->bswap_code << 1));
93fcfe39 11305 qemu_log("\n");
2c0262af
FB
11306 }
11307#endif
b5ff1b31 11308 if (search_pc) {
92414b31 11309 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
b5ff1b31
FB
11310 lj++;
11311 while (lj <= j)
ab1103de 11312 tcg_ctx.gen_opc_instr_start[lj++] = 0;
b5ff1b31 11313 } else {
2c0262af 11314 tb->size = dc->pc - pc_start;
2e70f6ef 11315 tb->icount = num_insns;
b5ff1b31 11316 }
2c0262af
FB
11317}
11318
0ecb72a5 11319void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 11320{
5639c3f2 11321 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, false);
2c0262af
FB
11322}
11323
0ecb72a5 11324void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
2c0262af 11325{
5639c3f2 11326 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, true);
2c0262af
FB
11327}
11328
b5ff1b31 11329static const char *cpu_mode_names[16] = {
28c9457d
EI
11330 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
11331 "???", "???", "hyp", "und", "???", "???", "???", "sys"
b5ff1b31 11332};
9ee6e8bb 11333
878096ee
AF
11334void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
11335 int flags)
2c0262af 11336{
878096ee
AF
11337 ARMCPU *cpu = ARM_CPU(cs);
11338 CPUARMState *env = &cpu->env;
2c0262af 11339 int i;
b5ff1b31 11340 uint32_t psr;
2c0262af 11341
17731115
PM
11342 if (is_a64(env)) {
11343 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
11344 return;
11345 }
11346
2c0262af 11347 for(i=0;i<16;i++) {
7fe48483 11348 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 11349 if ((i % 4) == 3)
7fe48483 11350 cpu_fprintf(f, "\n");
2c0262af 11351 else
7fe48483 11352 cpu_fprintf(f, " ");
2c0262af 11353 }
b5ff1b31 11354 psr = cpsr_read(env);
687fa640
TS
11355 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
11356 psr,
b5ff1b31
FB
11357 psr & (1 << 31) ? 'N' : '-',
11358 psr & (1 << 30) ? 'Z' : '-',
11359 psr & (1 << 29) ? 'C' : '-',
11360 psr & (1 << 28) ? 'V' : '-',
5fafdf24 11361 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 11362 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 11363
f2617cfc
PM
11364 if (flags & CPU_DUMP_FPU) {
11365 int numvfpregs = 0;
11366 if (arm_feature(env, ARM_FEATURE_VFP)) {
11367 numvfpregs += 16;
11368 }
11369 if (arm_feature(env, ARM_FEATURE_VFP3)) {
11370 numvfpregs += 16;
11371 }
11372 for (i = 0; i < numvfpregs; i++) {
11373 uint64_t v = float64_val(env->vfp.regs[i]);
11374 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
11375 i * 2, (uint32_t)v,
11376 i * 2 + 1, (uint32_t)(v >> 32),
11377 i, v);
11378 }
11379 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 11380 }
2c0262af 11381}
a6b025d3 11382
0ecb72a5 11383void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 11384{
3926cc84
AG
11385 if (is_a64(env)) {
11386 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
40f860cd 11387 env->condexec_bits = 0;
3926cc84
AG
11388 } else {
11389 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
40f860cd 11390 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
3926cc84 11391 }
d2856f1a 11392}
This page took 3.346659 seconds and 4 git commands to generate.