]> Git Repo - qemu.git/blame - target-arm/translate.c
hw/sd/pxa2xx_mmci: Stop using old_mmio in MemoryRegionOps
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
ccd38087 28#include "internals.h"
76cad711 29#include "disas/disas.h"
57fec1fe 30#include "tcg-op.h"
1de7afc9 31#include "qemu/log.h"
534df156 32#include "qemu/bitops.h"
1d854765 33#include "arm_ldst.h"
1497c961 34
2ef6175a
RH
35#include "exec/helper-proto.h"
36#include "exec/helper-gen.h"
2c0262af 37
a7e30d84
LV
38#include "trace-tcg.h"
39
40
2b51668f
PM
41#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 43/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 44#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
9ee6e8bb 45#define ENABLE_ARCH_5J 0
2b51668f
PM
46#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 51
86753403 52#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 53
f570c61e 54#include "translate.h"
e12ce78d
PM
55static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
56
b5ff1b31
FB
57#if defined(CONFIG_USER_ONLY)
58#define IS_USER(s) 1
59#else
60#define IS_USER(s) (s->user)
61#endif
62
3407ad0e 63TCGv_ptr cpu_env;
ad69471c 64/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 65static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 66static TCGv_i32 cpu_R[16];
66c374de 67static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
03d05e2d
PM
68static TCGv_i64 cpu_exclusive_addr;
69static TCGv_i64 cpu_exclusive_val;
426f5abc 70#ifdef CONFIG_USER_ONLY
03d05e2d 71static TCGv_i64 cpu_exclusive_test;
426f5abc
PB
72static TCGv_i32 cpu_exclusive_info;
73#endif
ad69471c 74
b26eefb6 75/* FIXME: These should be removed. */
39d5492a 76static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 77static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 78
022c62cb 79#include "exec/gen-icount.h"
2e70f6ef 80
155c3eac
FN
81static const char *regnames[] =
82 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
83 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
84
b26eefb6
PB
85/* initialize TCG globals. */
86void arm_translate_init(void)
87{
155c3eac
FN
88 int i;
89
a7812ae4
PB
90 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
91
155c3eac
FN
92 for (i = 0; i < 16; i++) {
93 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 94 offsetof(CPUARMState, regs[i]),
155c3eac
FN
95 regnames[i]);
96 }
66c374de
AJ
97 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
98 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
99 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
100 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
101
03d05e2d 102 cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 103 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
03d05e2d 104 cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 105 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 106#ifdef CONFIG_USER_ONLY
03d05e2d 107 cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 108 offsetof(CPUARMState, exclusive_test), "exclusive_test");
426f5abc 109 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 110 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 111#endif
155c3eac 112
14ade10f 113 a64_translate_init();
b26eefb6
PB
114}
115
579d21cc
PM
116static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
117{
118 /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
119 * insns:
120 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
121 * otherwise, access as if at PL0.
122 */
123 switch (s->mmu_idx) {
124 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
125 case ARMMMUIdx_S12NSE0:
126 case ARMMMUIdx_S12NSE1:
127 return ARMMMUIdx_S12NSE0;
128 case ARMMMUIdx_S1E3:
129 case ARMMMUIdx_S1SE0:
130 case ARMMMUIdx_S1SE1:
131 return ARMMMUIdx_S1SE0;
132 case ARMMMUIdx_S2NS:
133 default:
134 g_assert_not_reached();
135 }
136}
137
39d5492a 138static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 139{
39d5492a 140 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
141 tcg_gen_ld_i32(tmp, cpu_env, offset);
142 return tmp;
143}
144
0ecb72a5 145#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 146
39d5492a 147static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
148{
149 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 150 tcg_temp_free_i32(var);
d9ba4830
PB
151}
152
153#define store_cpu_field(var, name) \
0ecb72a5 154 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 155
b26eefb6 156/* Set a variable to the value of a CPU register. */
39d5492a 157static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
158{
159 if (reg == 15) {
160 uint32_t addr;
b90372ad 161 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
162 if (s->thumb)
163 addr = (long)s->pc + 2;
164 else
165 addr = (long)s->pc + 4;
166 tcg_gen_movi_i32(var, addr);
167 } else {
155c3eac 168 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
169 }
170}
171
172/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 173static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 174{
39d5492a 175 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
176 load_reg_var(s, tmp, reg);
177 return tmp;
178}
179
180/* Set a CPU register. The source must be a temporary and will be
181 marked as dead. */
39d5492a 182static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
183{
184 if (reg == 15) {
185 tcg_gen_andi_i32(var, var, ~1);
186 s->is_jmp = DISAS_JUMP;
187 }
155c3eac 188 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 189 tcg_temp_free_i32(var);
b26eefb6
PB
190}
191
b26eefb6 192/* Value extensions. */
86831435
PB
193#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
194#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
195#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
196#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
197
1497c961
PB
198#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
199#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 200
b26eefb6 201
39d5492a 202static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 203{
39d5492a 204 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 205 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
206 tcg_temp_free_i32(tmp_mask);
207}
d9ba4830
PB
208/* Set NZCV flags from the high 4 bits of var. */
209#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
210
d4a2dc67 211static void gen_exception_internal(int excp)
d9ba4830 212{
d4a2dc67
PM
213 TCGv_i32 tcg_excp = tcg_const_i32(excp);
214
215 assert(excp_is_internal(excp));
216 gen_helper_exception_internal(cpu_env, tcg_excp);
217 tcg_temp_free_i32(tcg_excp);
218}
219
73710361 220static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
221{
222 TCGv_i32 tcg_excp = tcg_const_i32(excp);
223 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 224 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 225
73710361
GB
226 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
227 tcg_syn, tcg_el);
228
229 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
230 tcg_temp_free_i32(tcg_syn);
231 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
232}
233
50225ad0
PM
234static void gen_ss_advance(DisasContext *s)
235{
236 /* If the singlestep state is Active-not-pending, advance to
237 * Active-pending.
238 */
239 if (s->ss_active) {
240 s->pstate_ss = 0;
241 gen_helper_clear_pstate_ss(cpu_env);
242 }
243}
244
245static void gen_step_complete_exception(DisasContext *s)
246{
247 /* We just completed step of an insn. Move from Active-not-pending
248 * to Active-pending, and then also take the swstep exception.
249 * This corresponds to making the (IMPDEF) choice to prioritize
250 * swstep exceptions over asynchronous exceptions taken to an exception
251 * level where debug is disabled. This choice has the advantage that
252 * we do not need to maintain internal state corresponding to the
253 * ISV/EX syndrome bits between completion of the step and generation
254 * of the exception, and our syndrome information is always correct.
255 */
256 gen_ss_advance(s);
73710361
GB
257 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
258 default_exception_el(s));
50225ad0
PM
259 s->is_jmp = DISAS_EXC;
260}
261
39d5492a 262static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 263{
39d5492a
PM
264 TCGv_i32 tmp1 = tcg_temp_new_i32();
265 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
266 tcg_gen_ext16s_i32(tmp1, a);
267 tcg_gen_ext16s_i32(tmp2, b);
3670669c 268 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 269 tcg_temp_free_i32(tmp2);
3670669c
PB
270 tcg_gen_sari_i32(a, a, 16);
271 tcg_gen_sari_i32(b, b, 16);
272 tcg_gen_mul_i32(b, b, a);
273 tcg_gen_mov_i32(a, tmp1);
7d1b0095 274 tcg_temp_free_i32(tmp1);
3670669c
PB
275}
276
277/* Byteswap each halfword. */
39d5492a 278static void gen_rev16(TCGv_i32 var)
3670669c 279{
39d5492a 280 TCGv_i32 tmp = tcg_temp_new_i32();
3670669c
PB
281 tcg_gen_shri_i32(tmp, var, 8);
282 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
283 tcg_gen_shli_i32(var, var, 8);
284 tcg_gen_andi_i32(var, var, 0xff00ff00);
285 tcg_gen_or_i32(var, var, tmp);
7d1b0095 286 tcg_temp_free_i32(tmp);
3670669c
PB
287}
288
289/* Byteswap low halfword and sign extend. */
39d5492a 290static void gen_revsh(TCGv_i32 var)
3670669c 291{
1a855029
AJ
292 tcg_gen_ext16u_i32(var, var);
293 tcg_gen_bswap16_i32(var, var);
294 tcg_gen_ext16s_i32(var, var);
3670669c
PB
295}
296
297/* Unsigned bitfield extract. */
39d5492a 298static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
3670669c
PB
299{
300 if (shift)
301 tcg_gen_shri_i32(var, var, shift);
302 tcg_gen_andi_i32(var, var, mask);
303}
304
305/* Signed bitfield extract. */
39d5492a 306static void gen_sbfx(TCGv_i32 var, int shift, int width)
3670669c
PB
307{
308 uint32_t signbit;
309
310 if (shift)
311 tcg_gen_sari_i32(var, var, shift);
312 if (shift + width < 32) {
313 signbit = 1u << (width - 1);
314 tcg_gen_andi_i32(var, var, (1u << width) - 1);
315 tcg_gen_xori_i32(var, var, signbit);
316 tcg_gen_subi_i32(var, var, signbit);
317 }
318}
319
838fa72d 320/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 321static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 322{
838fa72d
AJ
323 TCGv_i64 tmp64 = tcg_temp_new_i64();
324
325 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 326 tcg_temp_free_i32(b);
838fa72d
AJ
327 tcg_gen_shli_i64(tmp64, tmp64, 32);
328 tcg_gen_add_i64(a, tmp64, a);
329
330 tcg_temp_free_i64(tmp64);
331 return a;
332}
333
334/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 335static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
336{
337 TCGv_i64 tmp64 = tcg_temp_new_i64();
338
339 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 340 tcg_temp_free_i32(b);
838fa72d
AJ
341 tcg_gen_shli_i64(tmp64, tmp64, 32);
342 tcg_gen_sub_i64(a, tmp64, a);
343
344 tcg_temp_free_i64(tmp64);
345 return a;
3670669c
PB
346}
347
5e3f878a 348/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 349static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 350{
39d5492a
PM
351 TCGv_i32 lo = tcg_temp_new_i32();
352 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 353 TCGv_i64 ret;
5e3f878a 354
831d7fe8 355 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 356 tcg_temp_free_i32(a);
7d1b0095 357 tcg_temp_free_i32(b);
831d7fe8
RH
358
359 ret = tcg_temp_new_i64();
360 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
361 tcg_temp_free_i32(lo);
362 tcg_temp_free_i32(hi);
831d7fe8
RH
363
364 return ret;
5e3f878a
PB
365}
366
39d5492a 367static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 368{
39d5492a
PM
369 TCGv_i32 lo = tcg_temp_new_i32();
370 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 371 TCGv_i64 ret;
5e3f878a 372
831d7fe8 373 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 374 tcg_temp_free_i32(a);
7d1b0095 375 tcg_temp_free_i32(b);
831d7fe8
RH
376
377 ret = tcg_temp_new_i64();
378 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
379 tcg_temp_free_i32(lo);
380 tcg_temp_free_i32(hi);
831d7fe8
RH
381
382 return ret;
5e3f878a
PB
383}
384
8f01245e 385/* Swap low and high halfwords. */
39d5492a 386static void gen_swap_half(TCGv_i32 var)
8f01245e 387{
39d5492a 388 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
389 tcg_gen_shri_i32(tmp, var, 16);
390 tcg_gen_shli_i32(var, var, 16);
391 tcg_gen_or_i32(var, var, tmp);
7d1b0095 392 tcg_temp_free_i32(tmp);
8f01245e
PB
393}
394
b26eefb6
PB
395/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
396 tmp = (t0 ^ t1) & 0x8000;
397 t0 &= ~0x8000;
398 t1 &= ~0x8000;
399 t0 = (t0 + t1) ^ tmp;
400 */
401
39d5492a 402static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 403{
39d5492a 404 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
405 tcg_gen_xor_i32(tmp, t0, t1);
406 tcg_gen_andi_i32(tmp, tmp, 0x8000);
407 tcg_gen_andi_i32(t0, t0, ~0x8000);
408 tcg_gen_andi_i32(t1, t1, ~0x8000);
409 tcg_gen_add_i32(t0, t0, t1);
410 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
411 tcg_temp_free_i32(tmp);
412 tcg_temp_free_i32(t1);
b26eefb6
PB
413}
414
415/* Set CF to the top bit of var. */
39d5492a 416static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 417{
66c374de 418 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
419}
420
421/* Set N and Z flags from var. */
39d5492a 422static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 423{
66c374de
AJ
424 tcg_gen_mov_i32(cpu_NF, var);
425 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
426}
427
428/* T0 += T1 + CF. */
39d5492a 429static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 430{
396e467c 431 tcg_gen_add_i32(t0, t0, t1);
66c374de 432 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
433}
434
e9bb4aa9 435/* dest = T0 + T1 + CF. */
39d5492a 436static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 437{
e9bb4aa9 438 tcg_gen_add_i32(dest, t0, t1);
66c374de 439 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
440}
441
3670669c 442/* dest = T0 - T1 + CF - 1. */
39d5492a 443static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 444{
3670669c 445 tcg_gen_sub_i32(dest, t0, t1);
66c374de 446 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 447 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
448}
449
72485ec4 450/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 451static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 452{
39d5492a 453 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
454 tcg_gen_movi_i32(tmp, 0);
455 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 456 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 457 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
458 tcg_gen_xor_i32(tmp, t0, t1);
459 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
460 tcg_temp_free_i32(tmp);
461 tcg_gen_mov_i32(dest, cpu_NF);
462}
463
49b4c31e 464/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 465static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 466{
39d5492a 467 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
468 if (TCG_TARGET_HAS_add2_i32) {
469 tcg_gen_movi_i32(tmp, 0);
470 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 471 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
472 } else {
473 TCGv_i64 q0 = tcg_temp_new_i64();
474 TCGv_i64 q1 = tcg_temp_new_i64();
475 tcg_gen_extu_i32_i64(q0, t0);
476 tcg_gen_extu_i32_i64(q1, t1);
477 tcg_gen_add_i64(q0, q0, q1);
478 tcg_gen_extu_i32_i64(q1, cpu_CF);
479 tcg_gen_add_i64(q0, q0, q1);
480 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
481 tcg_temp_free_i64(q0);
482 tcg_temp_free_i64(q1);
483 }
484 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
485 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
486 tcg_gen_xor_i32(tmp, t0, t1);
487 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
488 tcg_temp_free_i32(tmp);
489 tcg_gen_mov_i32(dest, cpu_NF);
490}
491
72485ec4 492/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 493static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 494{
39d5492a 495 TCGv_i32 tmp;
72485ec4
AJ
496 tcg_gen_sub_i32(cpu_NF, t0, t1);
497 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
498 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
499 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
500 tmp = tcg_temp_new_i32();
501 tcg_gen_xor_i32(tmp, t0, t1);
502 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
503 tcg_temp_free_i32(tmp);
504 tcg_gen_mov_i32(dest, cpu_NF);
505}
506
e77f0832 507/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 508static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 509{
39d5492a 510 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
511 tcg_gen_not_i32(tmp, t1);
512 gen_adc_CC(dest, t0, tmp);
39d5492a 513 tcg_temp_free_i32(tmp);
2de68a49
RH
514}
515
365af80e 516#define GEN_SHIFT(name) \
39d5492a 517static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 518{ \
39d5492a 519 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
520 tmp1 = tcg_temp_new_i32(); \
521 tcg_gen_andi_i32(tmp1, t1, 0xff); \
522 tmp2 = tcg_const_i32(0); \
523 tmp3 = tcg_const_i32(0x1f); \
524 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
525 tcg_temp_free_i32(tmp3); \
526 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
527 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
528 tcg_temp_free_i32(tmp2); \
529 tcg_temp_free_i32(tmp1); \
530}
531GEN_SHIFT(shl)
532GEN_SHIFT(shr)
533#undef GEN_SHIFT
534
39d5492a 535static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 536{
39d5492a 537 TCGv_i32 tmp1, tmp2;
365af80e
AJ
538 tmp1 = tcg_temp_new_i32();
539 tcg_gen_andi_i32(tmp1, t1, 0xff);
540 tmp2 = tcg_const_i32(0x1f);
541 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
542 tcg_temp_free_i32(tmp2);
543 tcg_gen_sar_i32(dest, t0, tmp1);
544 tcg_temp_free_i32(tmp1);
545}
546
39d5492a 547static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 548{
39d5492a
PM
549 TCGv_i32 c0 = tcg_const_i32(0);
550 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
551 tcg_gen_neg_i32(tmp, src);
552 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
553 tcg_temp_free_i32(c0);
554 tcg_temp_free_i32(tmp);
555}
ad69471c 556
39d5492a 557static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 558{
9a119ff6 559 if (shift == 0) {
66c374de 560 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 561 } else {
66c374de
AJ
562 tcg_gen_shri_i32(cpu_CF, var, shift);
563 if (shift != 31) {
564 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
565 }
9a119ff6 566 }
9a119ff6 567}
b26eefb6 568
9a119ff6 569/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
570static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
571 int shift, int flags)
9a119ff6
PB
572{
573 switch (shiftop) {
574 case 0: /* LSL */
575 if (shift != 0) {
576 if (flags)
577 shifter_out_im(var, 32 - shift);
578 tcg_gen_shli_i32(var, var, shift);
579 }
580 break;
581 case 1: /* LSR */
582 if (shift == 0) {
583 if (flags) {
66c374de 584 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
585 }
586 tcg_gen_movi_i32(var, 0);
587 } else {
588 if (flags)
589 shifter_out_im(var, shift - 1);
590 tcg_gen_shri_i32(var, var, shift);
591 }
592 break;
593 case 2: /* ASR */
594 if (shift == 0)
595 shift = 32;
596 if (flags)
597 shifter_out_im(var, shift - 1);
598 if (shift == 32)
599 shift = 31;
600 tcg_gen_sari_i32(var, var, shift);
601 break;
602 case 3: /* ROR/RRX */
603 if (shift != 0) {
604 if (flags)
605 shifter_out_im(var, shift - 1);
f669df27 606 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 607 } else {
39d5492a 608 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 609 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
610 if (flags)
611 shifter_out_im(var, 0);
612 tcg_gen_shri_i32(var, var, 1);
b26eefb6 613 tcg_gen_or_i32(var, var, tmp);
7d1b0095 614 tcg_temp_free_i32(tmp);
b26eefb6
PB
615 }
616 }
617};
618
39d5492a
PM
619static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
620 TCGv_i32 shift, int flags)
8984bd2e
PB
621{
622 if (flags) {
623 switch (shiftop) {
9ef39277
BS
624 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
625 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
626 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
627 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
628 }
629 } else {
630 switch (shiftop) {
365af80e
AJ
631 case 0:
632 gen_shl(var, var, shift);
633 break;
634 case 1:
635 gen_shr(var, var, shift);
636 break;
637 case 2:
638 gen_sar(var, var, shift);
639 break;
f669df27
AJ
640 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
641 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
642 }
643 }
7d1b0095 644 tcg_temp_free_i32(shift);
8984bd2e
PB
645}
646
6ddbc6e4
PB
647#define PAS_OP(pfx) \
648 switch (op2) { \
649 case 0: gen_pas_helper(glue(pfx,add16)); break; \
650 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
651 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
652 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
653 case 4: gen_pas_helper(glue(pfx,add8)); break; \
654 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
655 }
39d5492a 656static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 657{
a7812ae4 658 TCGv_ptr tmp;
6ddbc6e4
PB
659
660 switch (op1) {
661#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
662 case 1:
a7812ae4 663 tmp = tcg_temp_new_ptr();
0ecb72a5 664 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 665 PAS_OP(s)
b75263d6 666 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
667 break;
668 case 5:
a7812ae4 669 tmp = tcg_temp_new_ptr();
0ecb72a5 670 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 671 PAS_OP(u)
b75263d6 672 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
673 break;
674#undef gen_pas_helper
675#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
676 case 2:
677 PAS_OP(q);
678 break;
679 case 3:
680 PAS_OP(sh);
681 break;
682 case 6:
683 PAS_OP(uq);
684 break;
685 case 7:
686 PAS_OP(uh);
687 break;
688#undef gen_pas_helper
689 }
690}
9ee6e8bb
PB
691#undef PAS_OP
692
6ddbc6e4
PB
693/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
694#define PAS_OP(pfx) \
ed89a2f1 695 switch (op1) { \
6ddbc6e4
PB
696 case 0: gen_pas_helper(glue(pfx,add8)); break; \
697 case 1: gen_pas_helper(glue(pfx,add16)); break; \
698 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
699 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
700 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
701 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
702 }
39d5492a 703static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 704{
a7812ae4 705 TCGv_ptr tmp;
6ddbc6e4 706
ed89a2f1 707 switch (op2) {
6ddbc6e4
PB
708#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
709 case 0:
a7812ae4 710 tmp = tcg_temp_new_ptr();
0ecb72a5 711 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 712 PAS_OP(s)
b75263d6 713 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
714 break;
715 case 4:
a7812ae4 716 tmp = tcg_temp_new_ptr();
0ecb72a5 717 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 718 PAS_OP(u)
b75263d6 719 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
720 break;
721#undef gen_pas_helper
722#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
723 case 1:
724 PAS_OP(q);
725 break;
726 case 2:
727 PAS_OP(sh);
728 break;
729 case 5:
730 PAS_OP(uq);
731 break;
732 case 6:
733 PAS_OP(uh);
734 break;
735#undef gen_pas_helper
736 }
737}
9ee6e8bb
PB
738#undef PAS_OP
739
39fb730a
AG
740/*
741 * generate a conditional branch based on ARM condition code cc.
742 * This is common between ARM and Aarch64 targets.
743 */
42a268c2 744void arm_gen_test_cc(int cc, TCGLabel *label)
d9ba4830 745{
39d5492a 746 TCGv_i32 tmp;
42a268c2 747 TCGLabel *inv;
d9ba4830 748
d9ba4830
PB
749 switch (cc) {
750 case 0: /* eq: Z */
66c374de 751 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
752 break;
753 case 1: /* ne: !Z */
66c374de 754 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
755 break;
756 case 2: /* cs: C */
66c374de 757 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
d9ba4830
PB
758 break;
759 case 3: /* cc: !C */
66c374de 760 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
d9ba4830
PB
761 break;
762 case 4: /* mi: N */
66c374de 763 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
d9ba4830
PB
764 break;
765 case 5: /* pl: !N */
66c374de 766 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
d9ba4830
PB
767 break;
768 case 6: /* vs: V */
66c374de 769 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
d9ba4830
PB
770 break;
771 case 7: /* vc: !V */
66c374de 772 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
d9ba4830
PB
773 break;
774 case 8: /* hi: C && !Z */
775 inv = gen_new_label();
66c374de
AJ
776 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
777 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
778 gen_set_label(inv);
779 break;
780 case 9: /* ls: !C || Z */
66c374de
AJ
781 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
782 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
783 break;
784 case 10: /* ge: N == V -> N ^ V == 0 */
66c374de
AJ
785 tmp = tcg_temp_new_i32();
786 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 787 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 788 tcg_temp_free_i32(tmp);
d9ba4830
PB
789 break;
790 case 11: /* lt: N != V -> N ^ V != 0 */
66c374de
AJ
791 tmp = tcg_temp_new_i32();
792 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 793 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 794 tcg_temp_free_i32(tmp);
d9ba4830
PB
795 break;
796 case 12: /* gt: !Z && N == V */
797 inv = gen_new_label();
66c374de
AJ
798 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
799 tmp = tcg_temp_new_i32();
800 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 801 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 802 tcg_temp_free_i32(tmp);
d9ba4830
PB
803 gen_set_label(inv);
804 break;
805 case 13: /* le: Z || N != V */
66c374de
AJ
806 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
807 tmp = tcg_temp_new_i32();
808 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 809 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 810 tcg_temp_free_i32(tmp);
d9ba4830
PB
811 break;
812 default:
813 fprintf(stderr, "Bad condition code 0x%x\n", cc);
814 abort();
815 }
d9ba4830 816}
2c0262af 817
b1d8e52e 818static const uint8_t table_logic_cc[16] = {
2c0262af
FB
819 1, /* and */
820 1, /* xor */
821 0, /* sub */
822 0, /* rsb */
823 0, /* add */
824 0, /* adc */
825 0, /* sbc */
826 0, /* rsc */
827 1, /* andl */
828 1, /* xorl */
829 0, /* cmp */
830 0, /* cmn */
831 1, /* orr */
832 1, /* mov */
833 1, /* bic */
834 1, /* mvn */
835};
3b46e624 836
d9ba4830
PB
837/* Set PC and Thumb state from an immediate address. */
838static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 839{
39d5492a 840 TCGv_i32 tmp;
99c475ab 841
b26eefb6 842 s->is_jmp = DISAS_UPDATE;
d9ba4830 843 if (s->thumb != (addr & 1)) {
7d1b0095 844 tmp = tcg_temp_new_i32();
d9ba4830 845 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 846 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 847 tcg_temp_free_i32(tmp);
d9ba4830 848 }
155c3eac 849 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
850}
851
852/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 853static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 854{
d9ba4830 855 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
856 tcg_gen_andi_i32(cpu_R[15], var, ~1);
857 tcg_gen_andi_i32(var, var, 1);
858 store_cpu_field(var, thumb);
d9ba4830
PB
859}
860
21aeb343
JR
861/* Variant of store_reg which uses branch&exchange logic when storing
862 to r15 in ARM architecture v7 and above. The source must be a temporary
863 and will be marked as dead. */
7dcc1f89 864static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
865{
866 if (reg == 15 && ENABLE_ARCH_7) {
867 gen_bx(s, var);
868 } else {
869 store_reg(s, reg, var);
870 }
871}
872
be5e7a76
DES
873/* Variant of store_reg which uses branch&exchange logic when storing
874 * to r15 in ARM architecture v5T and above. This is used for storing
875 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
876 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 877static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
878{
879 if (reg == 15 && ENABLE_ARCH_5) {
880 gen_bx(s, var);
881 } else {
882 store_reg(s, reg, var);
883 }
884}
885
08307563
PM
886/* Abstractions of "generate code to do a guest load/store for
887 * AArch32", where a vaddr is always 32 bits (and is zero
888 * extended if we're a 64 bit core) and data is also
889 * 32 bits unless specifically doing a 64 bit access.
890 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 891 * that the address argument is TCGv_i32 rather than TCGv.
08307563
PM
892 */
893#if TARGET_LONG_BITS == 32
894
09f78135
RH
895#define DO_GEN_LD(SUFF, OPC) \
896static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563 897{ \
09f78135 898 tcg_gen_qemu_ld_i32(val, addr, index, OPC); \
08307563
PM
899}
900
09f78135
RH
901#define DO_GEN_ST(SUFF, OPC) \
902static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563 903{ \
09f78135 904 tcg_gen_qemu_st_i32(val, addr, index, OPC); \
08307563
PM
905}
906
907static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
908{
09f78135 909 tcg_gen_qemu_ld_i64(val, addr, index, MO_TEQ);
08307563
PM
910}
911
912static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
913{
09f78135 914 tcg_gen_qemu_st_i64(val, addr, index, MO_TEQ);
08307563
PM
915}
916
917#else
918
09f78135
RH
919#define DO_GEN_LD(SUFF, OPC) \
920static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563
PM
921{ \
922 TCGv addr64 = tcg_temp_new(); \
08307563 923 tcg_gen_extu_i32_i64(addr64, addr); \
09f78135 924 tcg_gen_qemu_ld_i32(val, addr64, index, OPC); \
08307563 925 tcg_temp_free(addr64); \
08307563
PM
926}
927
09f78135
RH
928#define DO_GEN_ST(SUFF, OPC) \
929static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563
PM
930{ \
931 TCGv addr64 = tcg_temp_new(); \
08307563 932 tcg_gen_extu_i32_i64(addr64, addr); \
09f78135 933 tcg_gen_qemu_st_i32(val, addr64, index, OPC); \
08307563 934 tcg_temp_free(addr64); \
08307563
PM
935}
936
937static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
938{
939 TCGv addr64 = tcg_temp_new();
940 tcg_gen_extu_i32_i64(addr64, addr);
09f78135 941 tcg_gen_qemu_ld_i64(val, addr64, index, MO_TEQ);
08307563
PM
942 tcg_temp_free(addr64);
943}
944
945static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
946{
947 TCGv addr64 = tcg_temp_new();
948 tcg_gen_extu_i32_i64(addr64, addr);
09f78135 949 tcg_gen_qemu_st_i64(val, addr64, index, MO_TEQ);
08307563
PM
950 tcg_temp_free(addr64);
951}
952
953#endif
954
09f78135
RH
955DO_GEN_LD(8s, MO_SB)
956DO_GEN_LD(8u, MO_UB)
957DO_GEN_LD(16s, MO_TESW)
958DO_GEN_LD(16u, MO_TEUW)
959DO_GEN_LD(32u, MO_TEUL)
960DO_GEN_ST(8, MO_UB)
961DO_GEN_ST(16, MO_TEUW)
962DO_GEN_ST(32, MO_TEUL)
08307563 963
eaed129d 964static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
5e3f878a 965{
40f860cd 966 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
967}
968
37e6456e
PM
969static inline void gen_hvc(DisasContext *s, int imm16)
970{
971 /* The pre HVC helper handles cases when HVC gets trapped
972 * as an undefined insn by runtime configuration (ie before
973 * the insn really executes).
974 */
975 gen_set_pc_im(s, s->pc - 4);
976 gen_helper_pre_hvc(cpu_env);
977 /* Otherwise we will treat this as a real exception which
978 * happens after execution of the insn. (The distinction matters
979 * for the PC value reported to the exception handler and also
980 * for single stepping.)
981 */
982 s->svc_imm = imm16;
983 gen_set_pc_im(s, s->pc);
984 s->is_jmp = DISAS_HVC;
985}
986
987static inline void gen_smc(DisasContext *s)
988{
989 /* As with HVC, we may take an exception either before or after
990 * the insn executes.
991 */
992 TCGv_i32 tmp;
993
994 gen_set_pc_im(s, s->pc - 4);
995 tmp = tcg_const_i32(syn_aa32_smc());
996 gen_helper_pre_smc(cpu_env, tmp);
997 tcg_temp_free_i32(tmp);
998 gen_set_pc_im(s, s->pc);
999 s->is_jmp = DISAS_SMC;
1000}
1001
d4a2dc67
PM
1002static inline void
1003gen_set_condexec (DisasContext *s)
1004{
1005 if (s->condexec_mask) {
1006 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
1007 TCGv_i32 tmp = tcg_temp_new_i32();
1008 tcg_gen_movi_i32(tmp, val);
1009 store_cpu_field(tmp, condexec_bits);
1010 }
1011}
1012
1013static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1014{
1015 gen_set_condexec(s);
1016 gen_set_pc_im(s, s->pc - offset);
1017 gen_exception_internal(excp);
1018 s->is_jmp = DISAS_JUMP;
1019}
1020
73710361
GB
1021static void gen_exception_insn(DisasContext *s, int offset, int excp,
1022 int syn, uint32_t target_el)
d4a2dc67
PM
1023{
1024 gen_set_condexec(s);
1025 gen_set_pc_im(s, s->pc - offset);
73710361 1026 gen_exception(excp, syn, target_el);
d4a2dc67
PM
1027 s->is_jmp = DISAS_JUMP;
1028}
1029
b5ff1b31
FB
1030/* Force a TB lookup after an instruction that changes the CPU state. */
1031static inline void gen_lookup_tb(DisasContext *s)
1032{
a6445c52 1033 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
1034 s->is_jmp = DISAS_UPDATE;
1035}
1036
b0109805 1037static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1038 TCGv_i32 var)
2c0262af 1039{
1e8d4eec 1040 int val, rm, shift, shiftop;
39d5492a 1041 TCGv_i32 offset;
2c0262af
FB
1042
1043 if (!(insn & (1 << 25))) {
1044 /* immediate */
1045 val = insn & 0xfff;
1046 if (!(insn & (1 << 23)))
1047 val = -val;
537730b9 1048 if (val != 0)
b0109805 1049 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1050 } else {
1051 /* shift/register */
1052 rm = (insn) & 0xf;
1053 shift = (insn >> 7) & 0x1f;
1e8d4eec 1054 shiftop = (insn >> 5) & 3;
b26eefb6 1055 offset = load_reg(s, rm);
9a119ff6 1056 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1057 if (!(insn & (1 << 23)))
b0109805 1058 tcg_gen_sub_i32(var, var, offset);
2c0262af 1059 else
b0109805 1060 tcg_gen_add_i32(var, var, offset);
7d1b0095 1061 tcg_temp_free_i32(offset);
2c0262af
FB
1062 }
1063}
1064
191f9a93 1065static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1066 int extra, TCGv_i32 var)
2c0262af
FB
1067{
1068 int val, rm;
39d5492a 1069 TCGv_i32 offset;
3b46e624 1070
2c0262af
FB
1071 if (insn & (1 << 22)) {
1072 /* immediate */
1073 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1074 if (!(insn & (1 << 23)))
1075 val = -val;
18acad92 1076 val += extra;
537730b9 1077 if (val != 0)
b0109805 1078 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1079 } else {
1080 /* register */
191f9a93 1081 if (extra)
b0109805 1082 tcg_gen_addi_i32(var, var, extra);
2c0262af 1083 rm = (insn) & 0xf;
b26eefb6 1084 offset = load_reg(s, rm);
2c0262af 1085 if (!(insn & (1 << 23)))
b0109805 1086 tcg_gen_sub_i32(var, var, offset);
2c0262af 1087 else
b0109805 1088 tcg_gen_add_i32(var, var, offset);
7d1b0095 1089 tcg_temp_free_i32(offset);
2c0262af
FB
1090 }
1091}
1092
5aaebd13
PM
1093static TCGv_ptr get_fpstatus_ptr(int neon)
1094{
1095 TCGv_ptr statusptr = tcg_temp_new_ptr();
1096 int offset;
1097 if (neon) {
0ecb72a5 1098 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1099 } else {
0ecb72a5 1100 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1101 }
1102 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1103 return statusptr;
1104}
1105
4373f3ce
PB
1106#define VFP_OP2(name) \
1107static inline void gen_vfp_##name(int dp) \
1108{ \
ae1857ec
PM
1109 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1110 if (dp) { \
1111 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1112 } else { \
1113 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1114 } \
1115 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1116}
1117
4373f3ce
PB
1118VFP_OP2(add)
1119VFP_OP2(sub)
1120VFP_OP2(mul)
1121VFP_OP2(div)
1122
1123#undef VFP_OP2
1124
605a6aed
PM
1125static inline void gen_vfp_F1_mul(int dp)
1126{
1127 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1128 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1129 if (dp) {
ae1857ec 1130 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1131 } else {
ae1857ec 1132 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1133 }
ae1857ec 1134 tcg_temp_free_ptr(fpst);
605a6aed
PM
1135}
1136
1137static inline void gen_vfp_F1_neg(int dp)
1138{
1139 /* Like gen_vfp_neg() but put result in F1 */
1140 if (dp) {
1141 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1142 } else {
1143 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1144 }
1145}
1146
4373f3ce
PB
1147static inline void gen_vfp_abs(int dp)
1148{
1149 if (dp)
1150 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1151 else
1152 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1153}
1154
1155static inline void gen_vfp_neg(int dp)
1156{
1157 if (dp)
1158 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1159 else
1160 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1161}
1162
1163static inline void gen_vfp_sqrt(int dp)
1164{
1165 if (dp)
1166 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1167 else
1168 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1169}
1170
1171static inline void gen_vfp_cmp(int dp)
1172{
1173 if (dp)
1174 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1175 else
1176 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1177}
1178
1179static inline void gen_vfp_cmpe(int dp)
1180{
1181 if (dp)
1182 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1183 else
1184 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1185}
1186
1187static inline void gen_vfp_F1_ld0(int dp)
1188{
1189 if (dp)
5b340b51 1190 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1191 else
5b340b51 1192 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1193}
1194
5500b06c
PM
1195#define VFP_GEN_ITOF(name) \
1196static inline void gen_vfp_##name(int dp, int neon) \
1197{ \
5aaebd13 1198 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1199 if (dp) { \
1200 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1201 } else { \
1202 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1203 } \
b7fa9214 1204 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1205}
1206
5500b06c
PM
1207VFP_GEN_ITOF(uito)
1208VFP_GEN_ITOF(sito)
1209#undef VFP_GEN_ITOF
4373f3ce 1210
5500b06c
PM
1211#define VFP_GEN_FTOI(name) \
1212static inline void gen_vfp_##name(int dp, int neon) \
1213{ \
5aaebd13 1214 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1215 if (dp) { \
1216 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1217 } else { \
1218 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1219 } \
b7fa9214 1220 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1221}
1222
5500b06c
PM
1223VFP_GEN_FTOI(toui)
1224VFP_GEN_FTOI(touiz)
1225VFP_GEN_FTOI(tosi)
1226VFP_GEN_FTOI(tosiz)
1227#undef VFP_GEN_FTOI
4373f3ce 1228
16d5b3ca 1229#define VFP_GEN_FIX(name, round) \
5500b06c 1230static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1231{ \
39d5492a 1232 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1233 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1234 if (dp) { \
16d5b3ca
WN
1235 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1236 statusptr); \
5500b06c 1237 } else { \
16d5b3ca
WN
1238 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1239 statusptr); \
5500b06c 1240 } \
b75263d6 1241 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1242 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1243}
16d5b3ca
WN
1244VFP_GEN_FIX(tosh, _round_to_zero)
1245VFP_GEN_FIX(tosl, _round_to_zero)
1246VFP_GEN_FIX(touh, _round_to_zero)
1247VFP_GEN_FIX(toul, _round_to_zero)
1248VFP_GEN_FIX(shto, )
1249VFP_GEN_FIX(slto, )
1250VFP_GEN_FIX(uhto, )
1251VFP_GEN_FIX(ulto, )
4373f3ce 1252#undef VFP_GEN_FIX
9ee6e8bb 1253
39d5492a 1254static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1255{
08307563 1256 if (dp) {
6ce2faf4 1257 gen_aa32_ld64(cpu_F0d, addr, get_mem_index(s));
08307563 1258 } else {
6ce2faf4 1259 gen_aa32_ld32u(cpu_F0s, addr, get_mem_index(s));
08307563 1260 }
b5ff1b31
FB
1261}
1262
39d5492a 1263static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1264{
08307563 1265 if (dp) {
6ce2faf4 1266 gen_aa32_st64(cpu_F0d, addr, get_mem_index(s));
08307563 1267 } else {
6ce2faf4 1268 gen_aa32_st32(cpu_F0s, addr, get_mem_index(s));
08307563 1269 }
b5ff1b31
FB
1270}
1271
8e96005d
FB
1272static inline long
1273vfp_reg_offset (int dp, int reg)
1274{
1275 if (dp)
1276 return offsetof(CPUARMState, vfp.regs[reg]);
1277 else if (reg & 1) {
1278 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1279 + offsetof(CPU_DoubleU, l.upper);
1280 } else {
1281 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1282 + offsetof(CPU_DoubleU, l.lower);
1283 }
1284}
9ee6e8bb
PB
1285
1286/* Return the offset of a 32-bit piece of a NEON register.
1287 zero is the least significant end of the register. */
1288static inline long
1289neon_reg_offset (int reg, int n)
1290{
1291 int sreg;
1292 sreg = reg * 2 + n;
1293 return vfp_reg_offset(0, sreg);
1294}
1295
39d5492a 1296static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1297{
39d5492a 1298 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1299 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1300 return tmp;
1301}
1302
39d5492a 1303static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1304{
1305 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1306 tcg_temp_free_i32(var);
8f8e3aa4
PB
1307}
1308
a7812ae4 1309static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1310{
1311 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1312}
1313
a7812ae4 1314static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1315{
1316 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1317}
1318
4373f3ce
PB
1319#define tcg_gen_ld_f32 tcg_gen_ld_i32
1320#define tcg_gen_ld_f64 tcg_gen_ld_i64
1321#define tcg_gen_st_f32 tcg_gen_st_i32
1322#define tcg_gen_st_f64 tcg_gen_st_i64
1323
b7bcbe95
FB
1324static inline void gen_mov_F0_vreg(int dp, int reg)
1325{
1326 if (dp)
4373f3ce 1327 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1328 else
4373f3ce 1329 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1330}
1331
1332static inline void gen_mov_F1_vreg(int dp, int reg)
1333{
1334 if (dp)
4373f3ce 1335 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1336 else
4373f3ce 1337 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1338}
1339
1340static inline void gen_mov_vreg_F0(int dp, int reg)
1341{
1342 if (dp)
4373f3ce 1343 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1344 else
4373f3ce 1345 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1346}
1347
18c9b560
AZ
1348#define ARM_CP_RW_BIT (1 << 20)
1349
a7812ae4 1350static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1351{
0ecb72a5 1352 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1353}
1354
a7812ae4 1355static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1356{
0ecb72a5 1357 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1358}
1359
39d5492a 1360static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1361{
39d5492a 1362 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1363 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1364 return var;
e677137d
PB
1365}
1366
39d5492a 1367static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1368{
0ecb72a5 1369 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1370 tcg_temp_free_i32(var);
e677137d
PB
1371}
1372
1373static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1374{
1375 iwmmxt_store_reg(cpu_M0, rn);
1376}
1377
1378static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1379{
1380 iwmmxt_load_reg(cpu_M0, rn);
1381}
1382
1383static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1384{
1385 iwmmxt_load_reg(cpu_V1, rn);
1386 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1387}
1388
1389static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1390{
1391 iwmmxt_load_reg(cpu_V1, rn);
1392 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1393}
1394
1395static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1396{
1397 iwmmxt_load_reg(cpu_V1, rn);
1398 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1399}
1400
1401#define IWMMXT_OP(name) \
1402static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1403{ \
1404 iwmmxt_load_reg(cpu_V1, rn); \
1405 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1406}
1407
477955bd
PM
1408#define IWMMXT_OP_ENV(name) \
1409static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1410{ \
1411 iwmmxt_load_reg(cpu_V1, rn); \
1412 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1413}
1414
1415#define IWMMXT_OP_ENV_SIZE(name) \
1416IWMMXT_OP_ENV(name##b) \
1417IWMMXT_OP_ENV(name##w) \
1418IWMMXT_OP_ENV(name##l)
e677137d 1419
477955bd 1420#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1421static inline void gen_op_iwmmxt_##name##_M0(void) \
1422{ \
477955bd 1423 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1424}
1425
1426IWMMXT_OP(maddsq)
1427IWMMXT_OP(madduq)
1428IWMMXT_OP(sadb)
1429IWMMXT_OP(sadw)
1430IWMMXT_OP(mulslw)
1431IWMMXT_OP(mulshw)
1432IWMMXT_OP(mululw)
1433IWMMXT_OP(muluhw)
1434IWMMXT_OP(macsw)
1435IWMMXT_OP(macuw)
1436
477955bd
PM
1437IWMMXT_OP_ENV_SIZE(unpackl)
1438IWMMXT_OP_ENV_SIZE(unpackh)
1439
1440IWMMXT_OP_ENV1(unpacklub)
1441IWMMXT_OP_ENV1(unpackluw)
1442IWMMXT_OP_ENV1(unpacklul)
1443IWMMXT_OP_ENV1(unpackhub)
1444IWMMXT_OP_ENV1(unpackhuw)
1445IWMMXT_OP_ENV1(unpackhul)
1446IWMMXT_OP_ENV1(unpacklsb)
1447IWMMXT_OP_ENV1(unpacklsw)
1448IWMMXT_OP_ENV1(unpacklsl)
1449IWMMXT_OP_ENV1(unpackhsb)
1450IWMMXT_OP_ENV1(unpackhsw)
1451IWMMXT_OP_ENV1(unpackhsl)
1452
1453IWMMXT_OP_ENV_SIZE(cmpeq)
1454IWMMXT_OP_ENV_SIZE(cmpgtu)
1455IWMMXT_OP_ENV_SIZE(cmpgts)
1456
1457IWMMXT_OP_ENV_SIZE(mins)
1458IWMMXT_OP_ENV_SIZE(minu)
1459IWMMXT_OP_ENV_SIZE(maxs)
1460IWMMXT_OP_ENV_SIZE(maxu)
1461
1462IWMMXT_OP_ENV_SIZE(subn)
1463IWMMXT_OP_ENV_SIZE(addn)
1464IWMMXT_OP_ENV_SIZE(subu)
1465IWMMXT_OP_ENV_SIZE(addu)
1466IWMMXT_OP_ENV_SIZE(subs)
1467IWMMXT_OP_ENV_SIZE(adds)
1468
1469IWMMXT_OP_ENV(avgb0)
1470IWMMXT_OP_ENV(avgb1)
1471IWMMXT_OP_ENV(avgw0)
1472IWMMXT_OP_ENV(avgw1)
e677137d 1473
477955bd
PM
1474IWMMXT_OP_ENV(packuw)
1475IWMMXT_OP_ENV(packul)
1476IWMMXT_OP_ENV(packuq)
1477IWMMXT_OP_ENV(packsw)
1478IWMMXT_OP_ENV(packsl)
1479IWMMXT_OP_ENV(packsq)
e677137d 1480
e677137d
PB
1481static void gen_op_iwmmxt_set_mup(void)
1482{
39d5492a 1483 TCGv_i32 tmp;
e677137d
PB
1484 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1485 tcg_gen_ori_i32(tmp, tmp, 2);
1486 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1487}
1488
1489static void gen_op_iwmmxt_set_cup(void)
1490{
39d5492a 1491 TCGv_i32 tmp;
e677137d
PB
1492 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1493 tcg_gen_ori_i32(tmp, tmp, 1);
1494 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1495}
1496
1497static void gen_op_iwmmxt_setpsr_nz(void)
1498{
39d5492a 1499 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1500 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1501 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1502}
1503
1504static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1505{
1506 iwmmxt_load_reg(cpu_V1, rn);
86831435 1507 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1508 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1509}
1510
39d5492a
PM
1511static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1512 TCGv_i32 dest)
18c9b560
AZ
1513{
1514 int rd;
1515 uint32_t offset;
39d5492a 1516 TCGv_i32 tmp;
18c9b560
AZ
1517
1518 rd = (insn >> 16) & 0xf;
da6b5335 1519 tmp = load_reg(s, rd);
18c9b560
AZ
1520
1521 offset = (insn & 0xff) << ((insn >> 7) & 2);
1522 if (insn & (1 << 24)) {
1523 /* Pre indexed */
1524 if (insn & (1 << 23))
da6b5335 1525 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1526 else
da6b5335
FN
1527 tcg_gen_addi_i32(tmp, tmp, -offset);
1528 tcg_gen_mov_i32(dest, tmp);
18c9b560 1529 if (insn & (1 << 21))
da6b5335
FN
1530 store_reg(s, rd, tmp);
1531 else
7d1b0095 1532 tcg_temp_free_i32(tmp);
18c9b560
AZ
1533 } else if (insn & (1 << 21)) {
1534 /* Post indexed */
da6b5335 1535 tcg_gen_mov_i32(dest, tmp);
18c9b560 1536 if (insn & (1 << 23))
da6b5335 1537 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1538 else
da6b5335
FN
1539 tcg_gen_addi_i32(tmp, tmp, -offset);
1540 store_reg(s, rd, tmp);
18c9b560
AZ
1541 } else if (!(insn & (1 << 23)))
1542 return 1;
1543 return 0;
1544}
1545
39d5492a 1546static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1547{
1548 int rd = (insn >> 0) & 0xf;
39d5492a 1549 TCGv_i32 tmp;
18c9b560 1550
da6b5335
FN
1551 if (insn & (1 << 8)) {
1552 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1553 return 1;
da6b5335
FN
1554 } else {
1555 tmp = iwmmxt_load_creg(rd);
1556 }
1557 } else {
7d1b0095 1558 tmp = tcg_temp_new_i32();
da6b5335
FN
1559 iwmmxt_load_reg(cpu_V0, rd);
1560 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1561 }
1562 tcg_gen_andi_i32(tmp, tmp, mask);
1563 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1564 tcg_temp_free_i32(tmp);
18c9b560
AZ
1565 return 0;
1566}
1567
a1c7273b 1568/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1569 (ie. an undefined instruction). */
7dcc1f89 1570static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1571{
1572 int rd, wrd;
1573 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1574 TCGv_i32 addr;
1575 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1576
1577 if ((insn & 0x0e000e00) == 0x0c000000) {
1578 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1579 wrd = insn & 0xf;
1580 rdlo = (insn >> 12) & 0xf;
1581 rdhi = (insn >> 16) & 0xf;
1582 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1583 iwmmxt_load_reg(cpu_V0, wrd);
1584 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1585 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1586 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1587 } else { /* TMCRR */
da6b5335
FN
1588 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1589 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1590 gen_op_iwmmxt_set_mup();
1591 }
1592 return 0;
1593 }
1594
1595 wrd = (insn >> 12) & 0xf;
7d1b0095 1596 addr = tcg_temp_new_i32();
da6b5335 1597 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1598 tcg_temp_free_i32(addr);
18c9b560 1599 return 1;
da6b5335 1600 }
18c9b560
AZ
1601 if (insn & ARM_CP_RW_BIT) {
1602 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1603 tmp = tcg_temp_new_i32();
6ce2faf4 1604 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
da6b5335 1605 iwmmxt_store_creg(wrd, tmp);
18c9b560 1606 } else {
e677137d
PB
1607 i = 1;
1608 if (insn & (1 << 8)) {
1609 if (insn & (1 << 22)) { /* WLDRD */
6ce2faf4 1610 gen_aa32_ld64(cpu_M0, addr, get_mem_index(s));
e677137d
PB
1611 i = 0;
1612 } else { /* WLDRW wRd */
29531141 1613 tmp = tcg_temp_new_i32();
6ce2faf4 1614 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
e677137d
PB
1615 }
1616 } else {
29531141 1617 tmp = tcg_temp_new_i32();
e677137d 1618 if (insn & (1 << 22)) { /* WLDRH */
6ce2faf4 1619 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
e677137d 1620 } else { /* WLDRB */
6ce2faf4 1621 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
e677137d
PB
1622 }
1623 }
1624 if (i) {
1625 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1626 tcg_temp_free_i32(tmp);
e677137d 1627 }
18c9b560
AZ
1628 gen_op_iwmmxt_movq_wRn_M0(wrd);
1629 }
1630 } else {
1631 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1632 tmp = iwmmxt_load_creg(wrd);
6ce2faf4 1633 gen_aa32_st32(tmp, addr, get_mem_index(s));
18c9b560
AZ
1634 } else {
1635 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1636 tmp = tcg_temp_new_i32();
e677137d
PB
1637 if (insn & (1 << 8)) {
1638 if (insn & (1 << 22)) { /* WSTRD */
6ce2faf4 1639 gen_aa32_st64(cpu_M0, addr, get_mem_index(s));
e677137d
PB
1640 } else { /* WSTRW wRd */
1641 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
6ce2faf4 1642 gen_aa32_st32(tmp, addr, get_mem_index(s));
e677137d
PB
1643 }
1644 } else {
1645 if (insn & (1 << 22)) { /* WSTRH */
1646 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
6ce2faf4 1647 gen_aa32_st16(tmp, addr, get_mem_index(s));
e677137d
PB
1648 } else { /* WSTRB */
1649 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
6ce2faf4 1650 gen_aa32_st8(tmp, addr, get_mem_index(s));
e677137d
PB
1651 }
1652 }
18c9b560 1653 }
29531141 1654 tcg_temp_free_i32(tmp);
18c9b560 1655 }
7d1b0095 1656 tcg_temp_free_i32(addr);
18c9b560
AZ
1657 return 0;
1658 }
1659
1660 if ((insn & 0x0f000000) != 0x0e000000)
1661 return 1;
1662
1663 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1664 case 0x000: /* WOR */
1665 wrd = (insn >> 12) & 0xf;
1666 rd0 = (insn >> 0) & 0xf;
1667 rd1 = (insn >> 16) & 0xf;
1668 gen_op_iwmmxt_movq_M0_wRn(rd0);
1669 gen_op_iwmmxt_orq_M0_wRn(rd1);
1670 gen_op_iwmmxt_setpsr_nz();
1671 gen_op_iwmmxt_movq_wRn_M0(wrd);
1672 gen_op_iwmmxt_set_mup();
1673 gen_op_iwmmxt_set_cup();
1674 break;
1675 case 0x011: /* TMCR */
1676 if (insn & 0xf)
1677 return 1;
1678 rd = (insn >> 12) & 0xf;
1679 wrd = (insn >> 16) & 0xf;
1680 switch (wrd) {
1681 case ARM_IWMMXT_wCID:
1682 case ARM_IWMMXT_wCASF:
1683 break;
1684 case ARM_IWMMXT_wCon:
1685 gen_op_iwmmxt_set_cup();
1686 /* Fall through. */
1687 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1688 tmp = iwmmxt_load_creg(wrd);
1689 tmp2 = load_reg(s, rd);
f669df27 1690 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1691 tcg_temp_free_i32(tmp2);
da6b5335 1692 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1693 break;
1694 case ARM_IWMMXT_wCGR0:
1695 case ARM_IWMMXT_wCGR1:
1696 case ARM_IWMMXT_wCGR2:
1697 case ARM_IWMMXT_wCGR3:
1698 gen_op_iwmmxt_set_cup();
da6b5335
FN
1699 tmp = load_reg(s, rd);
1700 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1701 break;
1702 default:
1703 return 1;
1704 }
1705 break;
1706 case 0x100: /* WXOR */
1707 wrd = (insn >> 12) & 0xf;
1708 rd0 = (insn >> 0) & 0xf;
1709 rd1 = (insn >> 16) & 0xf;
1710 gen_op_iwmmxt_movq_M0_wRn(rd0);
1711 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1712 gen_op_iwmmxt_setpsr_nz();
1713 gen_op_iwmmxt_movq_wRn_M0(wrd);
1714 gen_op_iwmmxt_set_mup();
1715 gen_op_iwmmxt_set_cup();
1716 break;
1717 case 0x111: /* TMRC */
1718 if (insn & 0xf)
1719 return 1;
1720 rd = (insn >> 12) & 0xf;
1721 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1722 tmp = iwmmxt_load_creg(wrd);
1723 store_reg(s, rd, tmp);
18c9b560
AZ
1724 break;
1725 case 0x300: /* WANDN */
1726 wrd = (insn >> 12) & 0xf;
1727 rd0 = (insn >> 0) & 0xf;
1728 rd1 = (insn >> 16) & 0xf;
1729 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1730 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1731 gen_op_iwmmxt_andq_M0_wRn(rd1);
1732 gen_op_iwmmxt_setpsr_nz();
1733 gen_op_iwmmxt_movq_wRn_M0(wrd);
1734 gen_op_iwmmxt_set_mup();
1735 gen_op_iwmmxt_set_cup();
1736 break;
1737 case 0x200: /* WAND */
1738 wrd = (insn >> 12) & 0xf;
1739 rd0 = (insn >> 0) & 0xf;
1740 rd1 = (insn >> 16) & 0xf;
1741 gen_op_iwmmxt_movq_M0_wRn(rd0);
1742 gen_op_iwmmxt_andq_M0_wRn(rd1);
1743 gen_op_iwmmxt_setpsr_nz();
1744 gen_op_iwmmxt_movq_wRn_M0(wrd);
1745 gen_op_iwmmxt_set_mup();
1746 gen_op_iwmmxt_set_cup();
1747 break;
1748 case 0x810: case 0xa10: /* WMADD */
1749 wrd = (insn >> 12) & 0xf;
1750 rd0 = (insn >> 0) & 0xf;
1751 rd1 = (insn >> 16) & 0xf;
1752 gen_op_iwmmxt_movq_M0_wRn(rd0);
1753 if (insn & (1 << 21))
1754 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1755 else
1756 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1757 gen_op_iwmmxt_movq_wRn_M0(wrd);
1758 gen_op_iwmmxt_set_mup();
1759 break;
1760 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1761 wrd = (insn >> 12) & 0xf;
1762 rd0 = (insn >> 16) & 0xf;
1763 rd1 = (insn >> 0) & 0xf;
1764 gen_op_iwmmxt_movq_M0_wRn(rd0);
1765 switch ((insn >> 22) & 3) {
1766 case 0:
1767 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1768 break;
1769 case 1:
1770 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1771 break;
1772 case 2:
1773 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1774 break;
1775 case 3:
1776 return 1;
1777 }
1778 gen_op_iwmmxt_movq_wRn_M0(wrd);
1779 gen_op_iwmmxt_set_mup();
1780 gen_op_iwmmxt_set_cup();
1781 break;
1782 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1783 wrd = (insn >> 12) & 0xf;
1784 rd0 = (insn >> 16) & 0xf;
1785 rd1 = (insn >> 0) & 0xf;
1786 gen_op_iwmmxt_movq_M0_wRn(rd0);
1787 switch ((insn >> 22) & 3) {
1788 case 0:
1789 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1790 break;
1791 case 1:
1792 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1793 break;
1794 case 2:
1795 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1796 break;
1797 case 3:
1798 return 1;
1799 }
1800 gen_op_iwmmxt_movq_wRn_M0(wrd);
1801 gen_op_iwmmxt_set_mup();
1802 gen_op_iwmmxt_set_cup();
1803 break;
1804 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1805 wrd = (insn >> 12) & 0xf;
1806 rd0 = (insn >> 16) & 0xf;
1807 rd1 = (insn >> 0) & 0xf;
1808 gen_op_iwmmxt_movq_M0_wRn(rd0);
1809 if (insn & (1 << 22))
1810 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1811 else
1812 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1813 if (!(insn & (1 << 20)))
1814 gen_op_iwmmxt_addl_M0_wRn(wrd);
1815 gen_op_iwmmxt_movq_wRn_M0(wrd);
1816 gen_op_iwmmxt_set_mup();
1817 break;
1818 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1819 wrd = (insn >> 12) & 0xf;
1820 rd0 = (insn >> 16) & 0xf;
1821 rd1 = (insn >> 0) & 0xf;
1822 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1823 if (insn & (1 << 21)) {
1824 if (insn & (1 << 20))
1825 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1826 else
1827 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1828 } else {
1829 if (insn & (1 << 20))
1830 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1831 else
1832 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1833 }
18c9b560
AZ
1834 gen_op_iwmmxt_movq_wRn_M0(wrd);
1835 gen_op_iwmmxt_set_mup();
1836 break;
1837 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1838 wrd = (insn >> 12) & 0xf;
1839 rd0 = (insn >> 16) & 0xf;
1840 rd1 = (insn >> 0) & 0xf;
1841 gen_op_iwmmxt_movq_M0_wRn(rd0);
1842 if (insn & (1 << 21))
1843 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1844 else
1845 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1846 if (!(insn & (1 << 20))) {
e677137d
PB
1847 iwmmxt_load_reg(cpu_V1, wrd);
1848 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1849 }
1850 gen_op_iwmmxt_movq_wRn_M0(wrd);
1851 gen_op_iwmmxt_set_mup();
1852 break;
1853 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1854 wrd = (insn >> 12) & 0xf;
1855 rd0 = (insn >> 16) & 0xf;
1856 rd1 = (insn >> 0) & 0xf;
1857 gen_op_iwmmxt_movq_M0_wRn(rd0);
1858 switch ((insn >> 22) & 3) {
1859 case 0:
1860 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1861 break;
1862 case 1:
1863 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1864 break;
1865 case 2:
1866 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1867 break;
1868 case 3:
1869 return 1;
1870 }
1871 gen_op_iwmmxt_movq_wRn_M0(wrd);
1872 gen_op_iwmmxt_set_mup();
1873 gen_op_iwmmxt_set_cup();
1874 break;
1875 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1876 wrd = (insn >> 12) & 0xf;
1877 rd0 = (insn >> 16) & 0xf;
1878 rd1 = (insn >> 0) & 0xf;
1879 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1880 if (insn & (1 << 22)) {
1881 if (insn & (1 << 20))
1882 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1883 else
1884 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1885 } else {
1886 if (insn & (1 << 20))
1887 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1888 else
1889 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1890 }
18c9b560
AZ
1891 gen_op_iwmmxt_movq_wRn_M0(wrd);
1892 gen_op_iwmmxt_set_mup();
1893 gen_op_iwmmxt_set_cup();
1894 break;
1895 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1896 wrd = (insn >> 12) & 0xf;
1897 rd0 = (insn >> 16) & 0xf;
1898 rd1 = (insn >> 0) & 0xf;
1899 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1900 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1901 tcg_gen_andi_i32(tmp, tmp, 7);
1902 iwmmxt_load_reg(cpu_V1, rd1);
1903 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1904 tcg_temp_free_i32(tmp);
18c9b560
AZ
1905 gen_op_iwmmxt_movq_wRn_M0(wrd);
1906 gen_op_iwmmxt_set_mup();
1907 break;
1908 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1909 if (((insn >> 6) & 3) == 3)
1910 return 1;
18c9b560
AZ
1911 rd = (insn >> 12) & 0xf;
1912 wrd = (insn >> 16) & 0xf;
da6b5335 1913 tmp = load_reg(s, rd);
18c9b560
AZ
1914 gen_op_iwmmxt_movq_M0_wRn(wrd);
1915 switch ((insn >> 6) & 3) {
1916 case 0:
da6b5335
FN
1917 tmp2 = tcg_const_i32(0xff);
1918 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1919 break;
1920 case 1:
da6b5335
FN
1921 tmp2 = tcg_const_i32(0xffff);
1922 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1923 break;
1924 case 2:
da6b5335
FN
1925 tmp2 = tcg_const_i32(0xffffffff);
1926 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1927 break;
da6b5335 1928 default:
39d5492a
PM
1929 TCGV_UNUSED_I32(tmp2);
1930 TCGV_UNUSED_I32(tmp3);
18c9b560 1931 }
da6b5335 1932 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
1933 tcg_temp_free_i32(tmp3);
1934 tcg_temp_free_i32(tmp2);
7d1b0095 1935 tcg_temp_free_i32(tmp);
18c9b560
AZ
1936 gen_op_iwmmxt_movq_wRn_M0(wrd);
1937 gen_op_iwmmxt_set_mup();
1938 break;
1939 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1940 rd = (insn >> 12) & 0xf;
1941 wrd = (insn >> 16) & 0xf;
da6b5335 1942 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1943 return 1;
1944 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1945 tmp = tcg_temp_new_i32();
18c9b560
AZ
1946 switch ((insn >> 22) & 3) {
1947 case 0:
da6b5335
FN
1948 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1949 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1950 if (insn & 8) {
1951 tcg_gen_ext8s_i32(tmp, tmp);
1952 } else {
1953 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1954 }
1955 break;
1956 case 1:
da6b5335
FN
1957 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1958 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1959 if (insn & 8) {
1960 tcg_gen_ext16s_i32(tmp, tmp);
1961 } else {
1962 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1963 }
1964 break;
1965 case 2:
da6b5335
FN
1966 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1967 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1968 break;
18c9b560 1969 }
da6b5335 1970 store_reg(s, rd, tmp);
18c9b560
AZ
1971 break;
1972 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1973 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1974 return 1;
da6b5335 1975 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1976 switch ((insn >> 22) & 3) {
1977 case 0:
da6b5335 1978 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1979 break;
1980 case 1:
da6b5335 1981 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1982 break;
1983 case 2:
da6b5335 1984 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1985 break;
18c9b560 1986 }
da6b5335
FN
1987 tcg_gen_shli_i32(tmp, tmp, 28);
1988 gen_set_nzcv(tmp);
7d1b0095 1989 tcg_temp_free_i32(tmp);
18c9b560
AZ
1990 break;
1991 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1992 if (((insn >> 6) & 3) == 3)
1993 return 1;
18c9b560
AZ
1994 rd = (insn >> 12) & 0xf;
1995 wrd = (insn >> 16) & 0xf;
da6b5335 1996 tmp = load_reg(s, rd);
18c9b560
AZ
1997 switch ((insn >> 6) & 3) {
1998 case 0:
da6b5335 1999 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2000 break;
2001 case 1:
da6b5335 2002 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2003 break;
2004 case 2:
da6b5335 2005 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2006 break;
18c9b560 2007 }
7d1b0095 2008 tcg_temp_free_i32(tmp);
18c9b560
AZ
2009 gen_op_iwmmxt_movq_wRn_M0(wrd);
2010 gen_op_iwmmxt_set_mup();
2011 break;
2012 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2013 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2014 return 1;
da6b5335 2015 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2016 tmp2 = tcg_temp_new_i32();
da6b5335 2017 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2018 switch ((insn >> 22) & 3) {
2019 case 0:
2020 for (i = 0; i < 7; i ++) {
da6b5335
FN
2021 tcg_gen_shli_i32(tmp2, tmp2, 4);
2022 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2023 }
2024 break;
2025 case 1:
2026 for (i = 0; i < 3; i ++) {
da6b5335
FN
2027 tcg_gen_shli_i32(tmp2, tmp2, 8);
2028 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2029 }
2030 break;
2031 case 2:
da6b5335
FN
2032 tcg_gen_shli_i32(tmp2, tmp2, 16);
2033 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2034 break;
18c9b560 2035 }
da6b5335 2036 gen_set_nzcv(tmp);
7d1b0095
PM
2037 tcg_temp_free_i32(tmp2);
2038 tcg_temp_free_i32(tmp);
18c9b560
AZ
2039 break;
2040 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2041 wrd = (insn >> 12) & 0xf;
2042 rd0 = (insn >> 16) & 0xf;
2043 gen_op_iwmmxt_movq_M0_wRn(rd0);
2044 switch ((insn >> 22) & 3) {
2045 case 0:
e677137d 2046 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2047 break;
2048 case 1:
e677137d 2049 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2050 break;
2051 case 2:
e677137d 2052 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2053 break;
2054 case 3:
2055 return 1;
2056 }
2057 gen_op_iwmmxt_movq_wRn_M0(wrd);
2058 gen_op_iwmmxt_set_mup();
2059 break;
2060 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2061 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2062 return 1;
da6b5335 2063 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2064 tmp2 = tcg_temp_new_i32();
da6b5335 2065 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2066 switch ((insn >> 22) & 3) {
2067 case 0:
2068 for (i = 0; i < 7; i ++) {
da6b5335
FN
2069 tcg_gen_shli_i32(tmp2, tmp2, 4);
2070 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2071 }
2072 break;
2073 case 1:
2074 for (i = 0; i < 3; i ++) {
da6b5335
FN
2075 tcg_gen_shli_i32(tmp2, tmp2, 8);
2076 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2077 }
2078 break;
2079 case 2:
da6b5335
FN
2080 tcg_gen_shli_i32(tmp2, tmp2, 16);
2081 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2082 break;
18c9b560 2083 }
da6b5335 2084 gen_set_nzcv(tmp);
7d1b0095
PM
2085 tcg_temp_free_i32(tmp2);
2086 tcg_temp_free_i32(tmp);
18c9b560
AZ
2087 break;
2088 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2089 rd = (insn >> 12) & 0xf;
2090 rd0 = (insn >> 16) & 0xf;
da6b5335 2091 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2092 return 1;
2093 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2094 tmp = tcg_temp_new_i32();
18c9b560
AZ
2095 switch ((insn >> 22) & 3) {
2096 case 0:
da6b5335 2097 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2098 break;
2099 case 1:
da6b5335 2100 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2101 break;
2102 case 2:
da6b5335 2103 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2104 break;
18c9b560 2105 }
da6b5335 2106 store_reg(s, rd, tmp);
18c9b560
AZ
2107 break;
2108 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2109 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2110 wrd = (insn >> 12) & 0xf;
2111 rd0 = (insn >> 16) & 0xf;
2112 rd1 = (insn >> 0) & 0xf;
2113 gen_op_iwmmxt_movq_M0_wRn(rd0);
2114 switch ((insn >> 22) & 3) {
2115 case 0:
2116 if (insn & (1 << 21))
2117 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2118 else
2119 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2120 break;
2121 case 1:
2122 if (insn & (1 << 21))
2123 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2124 else
2125 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2126 break;
2127 case 2:
2128 if (insn & (1 << 21))
2129 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2130 else
2131 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2132 break;
2133 case 3:
2134 return 1;
2135 }
2136 gen_op_iwmmxt_movq_wRn_M0(wrd);
2137 gen_op_iwmmxt_set_mup();
2138 gen_op_iwmmxt_set_cup();
2139 break;
2140 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2141 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2142 wrd = (insn >> 12) & 0xf;
2143 rd0 = (insn >> 16) & 0xf;
2144 gen_op_iwmmxt_movq_M0_wRn(rd0);
2145 switch ((insn >> 22) & 3) {
2146 case 0:
2147 if (insn & (1 << 21))
2148 gen_op_iwmmxt_unpacklsb_M0();
2149 else
2150 gen_op_iwmmxt_unpacklub_M0();
2151 break;
2152 case 1:
2153 if (insn & (1 << 21))
2154 gen_op_iwmmxt_unpacklsw_M0();
2155 else
2156 gen_op_iwmmxt_unpackluw_M0();
2157 break;
2158 case 2:
2159 if (insn & (1 << 21))
2160 gen_op_iwmmxt_unpacklsl_M0();
2161 else
2162 gen_op_iwmmxt_unpacklul_M0();
2163 break;
2164 case 3:
2165 return 1;
2166 }
2167 gen_op_iwmmxt_movq_wRn_M0(wrd);
2168 gen_op_iwmmxt_set_mup();
2169 gen_op_iwmmxt_set_cup();
2170 break;
2171 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2172 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2173 wrd = (insn >> 12) & 0xf;
2174 rd0 = (insn >> 16) & 0xf;
2175 gen_op_iwmmxt_movq_M0_wRn(rd0);
2176 switch ((insn >> 22) & 3) {
2177 case 0:
2178 if (insn & (1 << 21))
2179 gen_op_iwmmxt_unpackhsb_M0();
2180 else
2181 gen_op_iwmmxt_unpackhub_M0();
2182 break;
2183 case 1:
2184 if (insn & (1 << 21))
2185 gen_op_iwmmxt_unpackhsw_M0();
2186 else
2187 gen_op_iwmmxt_unpackhuw_M0();
2188 break;
2189 case 2:
2190 if (insn & (1 << 21))
2191 gen_op_iwmmxt_unpackhsl_M0();
2192 else
2193 gen_op_iwmmxt_unpackhul_M0();
2194 break;
2195 case 3:
2196 return 1;
2197 }
2198 gen_op_iwmmxt_movq_wRn_M0(wrd);
2199 gen_op_iwmmxt_set_mup();
2200 gen_op_iwmmxt_set_cup();
2201 break;
2202 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2203 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2204 if (((insn >> 22) & 3) == 0)
2205 return 1;
18c9b560
AZ
2206 wrd = (insn >> 12) & 0xf;
2207 rd0 = (insn >> 16) & 0xf;
2208 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2209 tmp = tcg_temp_new_i32();
da6b5335 2210 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2211 tcg_temp_free_i32(tmp);
18c9b560 2212 return 1;
da6b5335 2213 }
18c9b560 2214 switch ((insn >> 22) & 3) {
18c9b560 2215 case 1:
477955bd 2216 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2217 break;
2218 case 2:
477955bd 2219 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2220 break;
2221 case 3:
477955bd 2222 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2223 break;
2224 }
7d1b0095 2225 tcg_temp_free_i32(tmp);
18c9b560
AZ
2226 gen_op_iwmmxt_movq_wRn_M0(wrd);
2227 gen_op_iwmmxt_set_mup();
2228 gen_op_iwmmxt_set_cup();
2229 break;
2230 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2231 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2232 if (((insn >> 22) & 3) == 0)
2233 return 1;
18c9b560
AZ
2234 wrd = (insn >> 12) & 0xf;
2235 rd0 = (insn >> 16) & 0xf;
2236 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2237 tmp = tcg_temp_new_i32();
da6b5335 2238 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2239 tcg_temp_free_i32(tmp);
18c9b560 2240 return 1;
da6b5335 2241 }
18c9b560 2242 switch ((insn >> 22) & 3) {
18c9b560 2243 case 1:
477955bd 2244 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2245 break;
2246 case 2:
477955bd 2247 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2248 break;
2249 case 3:
477955bd 2250 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2251 break;
2252 }
7d1b0095 2253 tcg_temp_free_i32(tmp);
18c9b560
AZ
2254 gen_op_iwmmxt_movq_wRn_M0(wrd);
2255 gen_op_iwmmxt_set_mup();
2256 gen_op_iwmmxt_set_cup();
2257 break;
2258 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2259 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2260 if (((insn >> 22) & 3) == 0)
2261 return 1;
18c9b560
AZ
2262 wrd = (insn >> 12) & 0xf;
2263 rd0 = (insn >> 16) & 0xf;
2264 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2265 tmp = tcg_temp_new_i32();
da6b5335 2266 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2267 tcg_temp_free_i32(tmp);
18c9b560 2268 return 1;
da6b5335 2269 }
18c9b560 2270 switch ((insn >> 22) & 3) {
18c9b560 2271 case 1:
477955bd 2272 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2273 break;
2274 case 2:
477955bd 2275 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2276 break;
2277 case 3:
477955bd 2278 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2279 break;
2280 }
7d1b0095 2281 tcg_temp_free_i32(tmp);
18c9b560
AZ
2282 gen_op_iwmmxt_movq_wRn_M0(wrd);
2283 gen_op_iwmmxt_set_mup();
2284 gen_op_iwmmxt_set_cup();
2285 break;
2286 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2287 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2288 if (((insn >> 22) & 3) == 0)
2289 return 1;
18c9b560
AZ
2290 wrd = (insn >> 12) & 0xf;
2291 rd0 = (insn >> 16) & 0xf;
2292 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2293 tmp = tcg_temp_new_i32();
18c9b560 2294 switch ((insn >> 22) & 3) {
18c9b560 2295 case 1:
da6b5335 2296 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2297 tcg_temp_free_i32(tmp);
18c9b560 2298 return 1;
da6b5335 2299 }
477955bd 2300 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2301 break;
2302 case 2:
da6b5335 2303 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2304 tcg_temp_free_i32(tmp);
18c9b560 2305 return 1;
da6b5335 2306 }
477955bd 2307 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2308 break;
2309 case 3:
da6b5335 2310 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2311 tcg_temp_free_i32(tmp);
18c9b560 2312 return 1;
da6b5335 2313 }
477955bd 2314 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2315 break;
2316 }
7d1b0095 2317 tcg_temp_free_i32(tmp);
18c9b560
AZ
2318 gen_op_iwmmxt_movq_wRn_M0(wrd);
2319 gen_op_iwmmxt_set_mup();
2320 gen_op_iwmmxt_set_cup();
2321 break;
2322 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2323 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2324 wrd = (insn >> 12) & 0xf;
2325 rd0 = (insn >> 16) & 0xf;
2326 rd1 = (insn >> 0) & 0xf;
2327 gen_op_iwmmxt_movq_M0_wRn(rd0);
2328 switch ((insn >> 22) & 3) {
2329 case 0:
2330 if (insn & (1 << 21))
2331 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2332 else
2333 gen_op_iwmmxt_minub_M0_wRn(rd1);
2334 break;
2335 case 1:
2336 if (insn & (1 << 21))
2337 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2338 else
2339 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2340 break;
2341 case 2:
2342 if (insn & (1 << 21))
2343 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2344 else
2345 gen_op_iwmmxt_minul_M0_wRn(rd1);
2346 break;
2347 case 3:
2348 return 1;
2349 }
2350 gen_op_iwmmxt_movq_wRn_M0(wrd);
2351 gen_op_iwmmxt_set_mup();
2352 break;
2353 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2354 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2355 wrd = (insn >> 12) & 0xf;
2356 rd0 = (insn >> 16) & 0xf;
2357 rd1 = (insn >> 0) & 0xf;
2358 gen_op_iwmmxt_movq_M0_wRn(rd0);
2359 switch ((insn >> 22) & 3) {
2360 case 0:
2361 if (insn & (1 << 21))
2362 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2363 else
2364 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2365 break;
2366 case 1:
2367 if (insn & (1 << 21))
2368 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2369 else
2370 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2371 break;
2372 case 2:
2373 if (insn & (1 << 21))
2374 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2375 else
2376 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2377 break;
2378 case 3:
2379 return 1;
2380 }
2381 gen_op_iwmmxt_movq_wRn_M0(wrd);
2382 gen_op_iwmmxt_set_mup();
2383 break;
2384 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2385 case 0x402: case 0x502: case 0x602: case 0x702:
2386 wrd = (insn >> 12) & 0xf;
2387 rd0 = (insn >> 16) & 0xf;
2388 rd1 = (insn >> 0) & 0xf;
2389 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2390 tmp = tcg_const_i32((insn >> 20) & 3);
2391 iwmmxt_load_reg(cpu_V1, rd1);
2392 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2393 tcg_temp_free_i32(tmp);
18c9b560
AZ
2394 gen_op_iwmmxt_movq_wRn_M0(wrd);
2395 gen_op_iwmmxt_set_mup();
2396 break;
2397 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2398 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2399 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2400 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2401 wrd = (insn >> 12) & 0xf;
2402 rd0 = (insn >> 16) & 0xf;
2403 rd1 = (insn >> 0) & 0xf;
2404 gen_op_iwmmxt_movq_M0_wRn(rd0);
2405 switch ((insn >> 20) & 0xf) {
2406 case 0x0:
2407 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2408 break;
2409 case 0x1:
2410 gen_op_iwmmxt_subub_M0_wRn(rd1);
2411 break;
2412 case 0x3:
2413 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2414 break;
2415 case 0x4:
2416 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2417 break;
2418 case 0x5:
2419 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2420 break;
2421 case 0x7:
2422 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2423 break;
2424 case 0x8:
2425 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2426 break;
2427 case 0x9:
2428 gen_op_iwmmxt_subul_M0_wRn(rd1);
2429 break;
2430 case 0xb:
2431 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2432 break;
2433 default:
2434 return 1;
2435 }
2436 gen_op_iwmmxt_movq_wRn_M0(wrd);
2437 gen_op_iwmmxt_set_mup();
2438 gen_op_iwmmxt_set_cup();
2439 break;
2440 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2441 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2442 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2443 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2444 wrd = (insn >> 12) & 0xf;
2445 rd0 = (insn >> 16) & 0xf;
2446 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2447 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2448 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2449 tcg_temp_free_i32(tmp);
18c9b560
AZ
2450 gen_op_iwmmxt_movq_wRn_M0(wrd);
2451 gen_op_iwmmxt_set_mup();
2452 gen_op_iwmmxt_set_cup();
2453 break;
2454 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2455 case 0x418: case 0x518: case 0x618: case 0x718:
2456 case 0x818: case 0x918: case 0xa18: case 0xb18:
2457 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2458 wrd = (insn >> 12) & 0xf;
2459 rd0 = (insn >> 16) & 0xf;
2460 rd1 = (insn >> 0) & 0xf;
2461 gen_op_iwmmxt_movq_M0_wRn(rd0);
2462 switch ((insn >> 20) & 0xf) {
2463 case 0x0:
2464 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2465 break;
2466 case 0x1:
2467 gen_op_iwmmxt_addub_M0_wRn(rd1);
2468 break;
2469 case 0x3:
2470 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2471 break;
2472 case 0x4:
2473 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2474 break;
2475 case 0x5:
2476 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2477 break;
2478 case 0x7:
2479 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2480 break;
2481 case 0x8:
2482 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2483 break;
2484 case 0x9:
2485 gen_op_iwmmxt_addul_M0_wRn(rd1);
2486 break;
2487 case 0xb:
2488 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2489 break;
2490 default:
2491 return 1;
2492 }
2493 gen_op_iwmmxt_movq_wRn_M0(wrd);
2494 gen_op_iwmmxt_set_mup();
2495 gen_op_iwmmxt_set_cup();
2496 break;
2497 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2498 case 0x408: case 0x508: case 0x608: case 0x708:
2499 case 0x808: case 0x908: case 0xa08: case 0xb08:
2500 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2501 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2502 return 1;
18c9b560
AZ
2503 wrd = (insn >> 12) & 0xf;
2504 rd0 = (insn >> 16) & 0xf;
2505 rd1 = (insn >> 0) & 0xf;
2506 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2507 switch ((insn >> 22) & 3) {
18c9b560
AZ
2508 case 1:
2509 if (insn & (1 << 21))
2510 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2511 else
2512 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2513 break;
2514 case 2:
2515 if (insn & (1 << 21))
2516 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2517 else
2518 gen_op_iwmmxt_packul_M0_wRn(rd1);
2519 break;
2520 case 3:
2521 if (insn & (1 << 21))
2522 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2523 else
2524 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2525 break;
2526 }
2527 gen_op_iwmmxt_movq_wRn_M0(wrd);
2528 gen_op_iwmmxt_set_mup();
2529 gen_op_iwmmxt_set_cup();
2530 break;
2531 case 0x201: case 0x203: case 0x205: case 0x207:
2532 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2533 case 0x211: case 0x213: case 0x215: case 0x217:
2534 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2535 wrd = (insn >> 5) & 0xf;
2536 rd0 = (insn >> 12) & 0xf;
2537 rd1 = (insn >> 0) & 0xf;
2538 if (rd0 == 0xf || rd1 == 0xf)
2539 return 1;
2540 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2541 tmp = load_reg(s, rd0);
2542 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2543 switch ((insn >> 16) & 0xf) {
2544 case 0x0: /* TMIA */
da6b5335 2545 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2546 break;
2547 case 0x8: /* TMIAPH */
da6b5335 2548 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2549 break;
2550 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2551 if (insn & (1 << 16))
da6b5335 2552 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2553 if (insn & (1 << 17))
da6b5335
FN
2554 tcg_gen_shri_i32(tmp2, tmp2, 16);
2555 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2556 break;
2557 default:
7d1b0095
PM
2558 tcg_temp_free_i32(tmp2);
2559 tcg_temp_free_i32(tmp);
18c9b560
AZ
2560 return 1;
2561 }
7d1b0095
PM
2562 tcg_temp_free_i32(tmp2);
2563 tcg_temp_free_i32(tmp);
18c9b560
AZ
2564 gen_op_iwmmxt_movq_wRn_M0(wrd);
2565 gen_op_iwmmxt_set_mup();
2566 break;
2567 default:
2568 return 1;
2569 }
2570
2571 return 0;
2572}
2573
a1c7273b 2574/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2575 (ie. an undefined instruction). */
7dcc1f89 2576static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2577{
2578 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2579 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2580
2581 if ((insn & 0x0ff00f10) == 0x0e200010) {
2582 /* Multiply with Internal Accumulate Format */
2583 rd0 = (insn >> 12) & 0xf;
2584 rd1 = insn & 0xf;
2585 acc = (insn >> 5) & 7;
2586
2587 if (acc != 0)
2588 return 1;
2589
3a554c0f
FN
2590 tmp = load_reg(s, rd0);
2591 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2592 switch ((insn >> 16) & 0xf) {
2593 case 0x0: /* MIA */
3a554c0f 2594 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2595 break;
2596 case 0x8: /* MIAPH */
3a554c0f 2597 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2598 break;
2599 case 0xc: /* MIABB */
2600 case 0xd: /* MIABT */
2601 case 0xe: /* MIATB */
2602 case 0xf: /* MIATT */
18c9b560 2603 if (insn & (1 << 16))
3a554c0f 2604 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2605 if (insn & (1 << 17))
3a554c0f
FN
2606 tcg_gen_shri_i32(tmp2, tmp2, 16);
2607 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2608 break;
2609 default:
2610 return 1;
2611 }
7d1b0095
PM
2612 tcg_temp_free_i32(tmp2);
2613 tcg_temp_free_i32(tmp);
18c9b560
AZ
2614
2615 gen_op_iwmmxt_movq_wRn_M0(acc);
2616 return 0;
2617 }
2618
2619 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2620 /* Internal Accumulator Access Format */
2621 rdhi = (insn >> 16) & 0xf;
2622 rdlo = (insn >> 12) & 0xf;
2623 acc = insn & 7;
2624
2625 if (acc != 0)
2626 return 1;
2627
2628 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2629 iwmmxt_load_reg(cpu_V0, acc);
2630 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2631 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2632 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2633 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2634 } else { /* MAR */
3a554c0f
FN
2635 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2636 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2637 }
2638 return 0;
2639 }
2640
2641 return 1;
2642}
2643
9ee6e8bb
PB
2644#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2645#define VFP_SREG(insn, bigbit, smallbit) \
2646 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2647#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2648 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2649 reg = (((insn) >> (bigbit)) & 0x0f) \
2650 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2651 } else { \
2652 if (insn & (1 << (smallbit))) \
2653 return 1; \
2654 reg = ((insn) >> (bigbit)) & 0x0f; \
2655 }} while (0)
2656
2657#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2658#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2659#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2660#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2661#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2662#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2663
4373f3ce 2664/* Move between integer and VFP cores. */
39d5492a 2665static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2666{
39d5492a 2667 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2668 tcg_gen_mov_i32(tmp, cpu_F0s);
2669 return tmp;
2670}
2671
39d5492a 2672static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2673{
2674 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2675 tcg_temp_free_i32(tmp);
4373f3ce
PB
2676}
2677
39d5492a 2678static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2679{
39d5492a 2680 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2681 if (shift)
2682 tcg_gen_shri_i32(var, var, shift);
86831435 2683 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2684 tcg_gen_shli_i32(tmp, var, 8);
2685 tcg_gen_or_i32(var, var, tmp);
2686 tcg_gen_shli_i32(tmp, var, 16);
2687 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2688 tcg_temp_free_i32(tmp);
ad69471c
PB
2689}
2690
39d5492a 2691static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2692{
39d5492a 2693 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2694 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2695 tcg_gen_shli_i32(tmp, var, 16);
2696 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2697 tcg_temp_free_i32(tmp);
ad69471c
PB
2698}
2699
39d5492a 2700static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2701{
39d5492a 2702 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2703 tcg_gen_andi_i32(var, var, 0xffff0000);
2704 tcg_gen_shri_i32(tmp, var, 16);
2705 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2706 tcg_temp_free_i32(tmp);
ad69471c
PB
2707}
2708
39d5492a 2709static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2710{
2711 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2712 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2713 switch (size) {
2714 case 0:
6ce2faf4 2715 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
8e18cde3
PM
2716 gen_neon_dup_u8(tmp, 0);
2717 break;
2718 case 1:
6ce2faf4 2719 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
8e18cde3
PM
2720 gen_neon_dup_low16(tmp);
2721 break;
2722 case 2:
6ce2faf4 2723 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8e18cde3
PM
2724 break;
2725 default: /* Avoid compiler warnings. */
2726 abort();
2727 }
2728 return tmp;
2729}
2730
04731fb5
WN
2731static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2732 uint32_t dp)
2733{
2734 uint32_t cc = extract32(insn, 20, 2);
2735
2736 if (dp) {
2737 TCGv_i64 frn, frm, dest;
2738 TCGv_i64 tmp, zero, zf, nf, vf;
2739
2740 zero = tcg_const_i64(0);
2741
2742 frn = tcg_temp_new_i64();
2743 frm = tcg_temp_new_i64();
2744 dest = tcg_temp_new_i64();
2745
2746 zf = tcg_temp_new_i64();
2747 nf = tcg_temp_new_i64();
2748 vf = tcg_temp_new_i64();
2749
2750 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2751 tcg_gen_ext_i32_i64(nf, cpu_NF);
2752 tcg_gen_ext_i32_i64(vf, cpu_VF);
2753
2754 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2755 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2756 switch (cc) {
2757 case 0: /* eq: Z */
2758 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2759 frn, frm);
2760 break;
2761 case 1: /* vs: V */
2762 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2763 frn, frm);
2764 break;
2765 case 2: /* ge: N == V -> N ^ V == 0 */
2766 tmp = tcg_temp_new_i64();
2767 tcg_gen_xor_i64(tmp, vf, nf);
2768 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2769 frn, frm);
2770 tcg_temp_free_i64(tmp);
2771 break;
2772 case 3: /* gt: !Z && N == V */
2773 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2774 frn, frm);
2775 tmp = tcg_temp_new_i64();
2776 tcg_gen_xor_i64(tmp, vf, nf);
2777 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2778 dest, frm);
2779 tcg_temp_free_i64(tmp);
2780 break;
2781 }
2782 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2783 tcg_temp_free_i64(frn);
2784 tcg_temp_free_i64(frm);
2785 tcg_temp_free_i64(dest);
2786
2787 tcg_temp_free_i64(zf);
2788 tcg_temp_free_i64(nf);
2789 tcg_temp_free_i64(vf);
2790
2791 tcg_temp_free_i64(zero);
2792 } else {
2793 TCGv_i32 frn, frm, dest;
2794 TCGv_i32 tmp, zero;
2795
2796 zero = tcg_const_i32(0);
2797
2798 frn = tcg_temp_new_i32();
2799 frm = tcg_temp_new_i32();
2800 dest = tcg_temp_new_i32();
2801 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2802 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2803 switch (cc) {
2804 case 0: /* eq: Z */
2805 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2806 frn, frm);
2807 break;
2808 case 1: /* vs: V */
2809 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2810 frn, frm);
2811 break;
2812 case 2: /* ge: N == V -> N ^ V == 0 */
2813 tmp = tcg_temp_new_i32();
2814 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2815 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2816 frn, frm);
2817 tcg_temp_free_i32(tmp);
2818 break;
2819 case 3: /* gt: !Z && N == V */
2820 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2821 frn, frm);
2822 tmp = tcg_temp_new_i32();
2823 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2824 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2825 dest, frm);
2826 tcg_temp_free_i32(tmp);
2827 break;
2828 }
2829 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2830 tcg_temp_free_i32(frn);
2831 tcg_temp_free_i32(frm);
2832 tcg_temp_free_i32(dest);
2833
2834 tcg_temp_free_i32(zero);
2835 }
2836
2837 return 0;
2838}
2839
40cfacdd
WN
2840static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2841 uint32_t rm, uint32_t dp)
2842{
2843 uint32_t vmin = extract32(insn, 6, 1);
2844 TCGv_ptr fpst = get_fpstatus_ptr(0);
2845
2846 if (dp) {
2847 TCGv_i64 frn, frm, dest;
2848
2849 frn = tcg_temp_new_i64();
2850 frm = tcg_temp_new_i64();
2851 dest = tcg_temp_new_i64();
2852
2853 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2854 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2855 if (vmin) {
f71a2ae5 2856 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 2857 } else {
f71a2ae5 2858 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
2859 }
2860 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2861 tcg_temp_free_i64(frn);
2862 tcg_temp_free_i64(frm);
2863 tcg_temp_free_i64(dest);
2864 } else {
2865 TCGv_i32 frn, frm, dest;
2866
2867 frn = tcg_temp_new_i32();
2868 frm = tcg_temp_new_i32();
2869 dest = tcg_temp_new_i32();
2870
2871 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2872 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2873 if (vmin) {
f71a2ae5 2874 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 2875 } else {
f71a2ae5 2876 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
2877 }
2878 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2879 tcg_temp_free_i32(frn);
2880 tcg_temp_free_i32(frm);
2881 tcg_temp_free_i32(dest);
2882 }
2883
2884 tcg_temp_free_ptr(fpst);
2885 return 0;
2886}
2887
7655f39b
WN
2888static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2889 int rounding)
2890{
2891 TCGv_ptr fpst = get_fpstatus_ptr(0);
2892 TCGv_i32 tcg_rmode;
2893
2894 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2895 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2896
2897 if (dp) {
2898 TCGv_i64 tcg_op;
2899 TCGv_i64 tcg_res;
2900 tcg_op = tcg_temp_new_i64();
2901 tcg_res = tcg_temp_new_i64();
2902 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2903 gen_helper_rintd(tcg_res, tcg_op, fpst);
2904 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2905 tcg_temp_free_i64(tcg_op);
2906 tcg_temp_free_i64(tcg_res);
2907 } else {
2908 TCGv_i32 tcg_op;
2909 TCGv_i32 tcg_res;
2910 tcg_op = tcg_temp_new_i32();
2911 tcg_res = tcg_temp_new_i32();
2912 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2913 gen_helper_rints(tcg_res, tcg_op, fpst);
2914 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2915 tcg_temp_free_i32(tcg_op);
2916 tcg_temp_free_i32(tcg_res);
2917 }
2918
2919 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2920 tcg_temp_free_i32(tcg_rmode);
2921
2922 tcg_temp_free_ptr(fpst);
2923 return 0;
2924}
2925
c9975a83
WN
2926static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2927 int rounding)
2928{
2929 bool is_signed = extract32(insn, 7, 1);
2930 TCGv_ptr fpst = get_fpstatus_ptr(0);
2931 TCGv_i32 tcg_rmode, tcg_shift;
2932
2933 tcg_shift = tcg_const_i32(0);
2934
2935 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2936 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2937
2938 if (dp) {
2939 TCGv_i64 tcg_double, tcg_res;
2940 TCGv_i32 tcg_tmp;
2941 /* Rd is encoded as a single precision register even when the source
2942 * is double precision.
2943 */
2944 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
2945 tcg_double = tcg_temp_new_i64();
2946 tcg_res = tcg_temp_new_i64();
2947 tcg_tmp = tcg_temp_new_i32();
2948 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
2949 if (is_signed) {
2950 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
2951 } else {
2952 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
2953 }
2954 tcg_gen_trunc_i64_i32(tcg_tmp, tcg_res);
2955 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
2956 tcg_temp_free_i32(tcg_tmp);
2957 tcg_temp_free_i64(tcg_res);
2958 tcg_temp_free_i64(tcg_double);
2959 } else {
2960 TCGv_i32 tcg_single, tcg_res;
2961 tcg_single = tcg_temp_new_i32();
2962 tcg_res = tcg_temp_new_i32();
2963 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
2964 if (is_signed) {
2965 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
2966 } else {
2967 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
2968 }
2969 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
2970 tcg_temp_free_i32(tcg_res);
2971 tcg_temp_free_i32(tcg_single);
2972 }
2973
2974 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2975 tcg_temp_free_i32(tcg_rmode);
2976
2977 tcg_temp_free_i32(tcg_shift);
2978
2979 tcg_temp_free_ptr(fpst);
2980
2981 return 0;
2982}
7655f39b
WN
2983
2984/* Table for converting the most common AArch32 encoding of
2985 * rounding mode to arm_fprounding order (which matches the
2986 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
2987 */
2988static const uint8_t fp_decode_rm[] = {
2989 FPROUNDING_TIEAWAY,
2990 FPROUNDING_TIEEVEN,
2991 FPROUNDING_POSINF,
2992 FPROUNDING_NEGINF,
2993};
2994
7dcc1f89 2995static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
2996{
2997 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
2998
d614a513 2999 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
04731fb5
WN
3000 return 1;
3001 }
3002
3003 if (dp) {
3004 VFP_DREG_D(rd, insn);
3005 VFP_DREG_N(rn, insn);
3006 VFP_DREG_M(rm, insn);
3007 } else {
3008 rd = VFP_SREG_D(insn);
3009 rn = VFP_SREG_N(insn);
3010 rm = VFP_SREG_M(insn);
3011 }
3012
3013 if ((insn & 0x0f800e50) == 0x0e000a00) {
3014 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
3015 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3016 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
3017 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3018 /* VRINTA, VRINTN, VRINTP, VRINTM */
3019 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3020 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
3021 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3022 /* VCVTA, VCVTN, VCVTP, VCVTM */
3023 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3024 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3025 }
3026 return 1;
3027}
3028
a1c7273b 3029/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3030 (ie. an undefined instruction). */
7dcc1f89 3031static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3032{
3033 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3034 int dp, veclen;
39d5492a
PM
3035 TCGv_i32 addr;
3036 TCGv_i32 tmp;
3037 TCGv_i32 tmp2;
b7bcbe95 3038
d614a513 3039 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3040 return 1;
d614a513 3041 }
40f137e1 3042
2c7ffc41
PM
3043 /* FIXME: this access check should not take precedence over UNDEF
3044 * for invalid encodings; we will generate incorrect syndrome information
3045 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3046 */
9dbbc748 3047 if (s->fp_excp_el) {
2c7ffc41 3048 gen_exception_insn(s, 4, EXCP_UDEF,
9dbbc748 3049 syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
2c7ffc41
PM
3050 return 0;
3051 }
3052
5df8bac1 3053 if (!s->vfp_enabled) {
9ee6e8bb 3054 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3055 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3056 return 1;
3057 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3058 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3059 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3060 return 1;
a50c0f51 3061 }
40f137e1 3062 }
6a57f3eb
WN
3063
3064 if (extract32(insn, 28, 4) == 0xf) {
3065 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3066 * only used in v8 and above.
3067 */
7dcc1f89 3068 return disas_vfp_v8_insn(s, insn);
6a57f3eb
WN
3069 }
3070
b7bcbe95
FB
3071 dp = ((insn & 0xf00) == 0xb00);
3072 switch ((insn >> 24) & 0xf) {
3073 case 0xe:
3074 if (insn & (1 << 4)) {
3075 /* single register transfer */
b7bcbe95
FB
3076 rd = (insn >> 12) & 0xf;
3077 if (dp) {
9ee6e8bb
PB
3078 int size;
3079 int pass;
3080
3081 VFP_DREG_N(rn, insn);
3082 if (insn & 0xf)
b7bcbe95 3083 return 1;
9ee6e8bb 3084 if (insn & 0x00c00060
d614a513 3085 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3086 return 1;
d614a513 3087 }
9ee6e8bb
PB
3088
3089 pass = (insn >> 21) & 1;
3090 if (insn & (1 << 22)) {
3091 size = 0;
3092 offset = ((insn >> 5) & 3) * 8;
3093 } else if (insn & (1 << 5)) {
3094 size = 1;
3095 offset = (insn & (1 << 6)) ? 16 : 0;
3096 } else {
3097 size = 2;
3098 offset = 0;
3099 }
18c9b560 3100 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3101 /* vfp->arm */
ad69471c 3102 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3103 switch (size) {
3104 case 0:
9ee6e8bb 3105 if (offset)
ad69471c 3106 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3107 if (insn & (1 << 23))
ad69471c 3108 gen_uxtb(tmp);
9ee6e8bb 3109 else
ad69471c 3110 gen_sxtb(tmp);
9ee6e8bb
PB
3111 break;
3112 case 1:
9ee6e8bb
PB
3113 if (insn & (1 << 23)) {
3114 if (offset) {
ad69471c 3115 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3116 } else {
ad69471c 3117 gen_uxth(tmp);
9ee6e8bb
PB
3118 }
3119 } else {
3120 if (offset) {
ad69471c 3121 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3122 } else {
ad69471c 3123 gen_sxth(tmp);
9ee6e8bb
PB
3124 }
3125 }
3126 break;
3127 case 2:
9ee6e8bb
PB
3128 break;
3129 }
ad69471c 3130 store_reg(s, rd, tmp);
b7bcbe95
FB
3131 } else {
3132 /* arm->vfp */
ad69471c 3133 tmp = load_reg(s, rd);
9ee6e8bb
PB
3134 if (insn & (1 << 23)) {
3135 /* VDUP */
3136 if (size == 0) {
ad69471c 3137 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3138 } else if (size == 1) {
ad69471c 3139 gen_neon_dup_low16(tmp);
9ee6e8bb 3140 }
cbbccffc 3141 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3142 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3143 tcg_gen_mov_i32(tmp2, tmp);
3144 neon_store_reg(rn, n, tmp2);
3145 }
3146 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3147 } else {
3148 /* VMOV */
3149 switch (size) {
3150 case 0:
ad69471c 3151 tmp2 = neon_load_reg(rn, pass);
d593c48e 3152 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3153 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3154 break;
3155 case 1:
ad69471c 3156 tmp2 = neon_load_reg(rn, pass);
d593c48e 3157 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3158 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3159 break;
3160 case 2:
9ee6e8bb
PB
3161 break;
3162 }
ad69471c 3163 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3164 }
b7bcbe95 3165 }
9ee6e8bb
PB
3166 } else { /* !dp */
3167 if ((insn & 0x6f) != 0x00)
3168 return 1;
3169 rn = VFP_SREG_N(insn);
18c9b560 3170 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3171 /* vfp->arm */
3172 if (insn & (1 << 21)) {
3173 /* system register */
40f137e1 3174 rn >>= 1;
9ee6e8bb 3175
b7bcbe95 3176 switch (rn) {
40f137e1 3177 case ARM_VFP_FPSID:
4373f3ce 3178 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3179 VFP3 restricts all id registers to privileged
3180 accesses. */
3181 if (IS_USER(s)
d614a513 3182 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3183 return 1;
d614a513 3184 }
4373f3ce 3185 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3186 break;
40f137e1 3187 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3188 if (IS_USER(s))
3189 return 1;
4373f3ce 3190 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3191 break;
40f137e1
PB
3192 case ARM_VFP_FPINST:
3193 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3194 /* Not present in VFP3. */
3195 if (IS_USER(s)
d614a513 3196 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3197 return 1;
d614a513 3198 }
4373f3ce 3199 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3200 break;
40f137e1 3201 case ARM_VFP_FPSCR:
601d70b9 3202 if (rd == 15) {
4373f3ce
PB
3203 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3204 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3205 } else {
7d1b0095 3206 tmp = tcg_temp_new_i32();
4373f3ce
PB
3207 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3208 }
b7bcbe95 3209 break;
a50c0f51 3210 case ARM_VFP_MVFR2:
d614a513 3211 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3212 return 1;
3213 }
3214 /* fall through */
9ee6e8bb
PB
3215 case ARM_VFP_MVFR0:
3216 case ARM_VFP_MVFR1:
3217 if (IS_USER(s)
d614a513 3218 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3219 return 1;
d614a513 3220 }
4373f3ce 3221 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3222 break;
b7bcbe95
FB
3223 default:
3224 return 1;
3225 }
3226 } else {
3227 gen_mov_F0_vreg(0, rn);
4373f3ce 3228 tmp = gen_vfp_mrs();
b7bcbe95
FB
3229 }
3230 if (rd == 15) {
b5ff1b31 3231 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3232 gen_set_nzcv(tmp);
7d1b0095 3233 tcg_temp_free_i32(tmp);
4373f3ce
PB
3234 } else {
3235 store_reg(s, rd, tmp);
3236 }
b7bcbe95
FB
3237 } else {
3238 /* arm->vfp */
b7bcbe95 3239 if (insn & (1 << 21)) {
40f137e1 3240 rn >>= 1;
b7bcbe95
FB
3241 /* system register */
3242 switch (rn) {
40f137e1 3243 case ARM_VFP_FPSID:
9ee6e8bb
PB
3244 case ARM_VFP_MVFR0:
3245 case ARM_VFP_MVFR1:
b7bcbe95
FB
3246 /* Writes are ignored. */
3247 break;
40f137e1 3248 case ARM_VFP_FPSCR:
e4c1cfa5 3249 tmp = load_reg(s, rd);
4373f3ce 3250 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3251 tcg_temp_free_i32(tmp);
b5ff1b31 3252 gen_lookup_tb(s);
b7bcbe95 3253 break;
40f137e1 3254 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3255 if (IS_USER(s))
3256 return 1;
71b3c3de
JR
3257 /* TODO: VFP subarchitecture support.
3258 * For now, keep the EN bit only */
e4c1cfa5 3259 tmp = load_reg(s, rd);
71b3c3de 3260 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3261 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3262 gen_lookup_tb(s);
3263 break;
3264 case ARM_VFP_FPINST:
3265 case ARM_VFP_FPINST2:
23adb861
PM
3266 if (IS_USER(s)) {
3267 return 1;
3268 }
e4c1cfa5 3269 tmp = load_reg(s, rd);
4373f3ce 3270 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3271 break;
b7bcbe95
FB
3272 default:
3273 return 1;
3274 }
3275 } else {
e4c1cfa5 3276 tmp = load_reg(s, rd);
4373f3ce 3277 gen_vfp_msr(tmp);
b7bcbe95
FB
3278 gen_mov_vreg_F0(0, rn);
3279 }
3280 }
3281 }
3282 } else {
3283 /* data processing */
3284 /* The opcode is in bits 23, 21, 20 and 6. */
3285 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3286 if (dp) {
3287 if (op == 15) {
3288 /* rn is opcode */
3289 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3290 } else {
3291 /* rn is register number */
9ee6e8bb 3292 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3293 }
3294
239c20c7
WN
3295 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3296 ((rn & 0x1e) == 0x6))) {
3297 /* Integer or single/half precision destination. */
9ee6e8bb 3298 rd = VFP_SREG_D(insn);
b7bcbe95 3299 } else {
9ee6e8bb 3300 VFP_DREG_D(rd, insn);
b7bcbe95 3301 }
04595bf6 3302 if (op == 15 &&
239c20c7
WN
3303 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3304 ((rn & 0x1e) == 0x4))) {
3305 /* VCVT from int or half precision is always from S reg
3306 * regardless of dp bit. VCVT with immediate frac_bits
3307 * has same format as SREG_M.
04595bf6
PM
3308 */
3309 rm = VFP_SREG_M(insn);
b7bcbe95 3310 } else {
9ee6e8bb 3311 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3312 }
3313 } else {
9ee6e8bb 3314 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3315 if (op == 15 && rn == 15) {
3316 /* Double precision destination. */
9ee6e8bb
PB
3317 VFP_DREG_D(rd, insn);
3318 } else {
3319 rd = VFP_SREG_D(insn);
3320 }
04595bf6
PM
3321 /* NB that we implicitly rely on the encoding for the frac_bits
3322 * in VCVT of fixed to float being the same as that of an SREG_M
3323 */
9ee6e8bb 3324 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3325 }
3326
69d1fc22 3327 veclen = s->vec_len;
b7bcbe95
FB
3328 if (op == 15 && rn > 3)
3329 veclen = 0;
3330
3331 /* Shut up compiler warnings. */
3332 delta_m = 0;
3333 delta_d = 0;
3334 bank_mask = 0;
3b46e624 3335
b7bcbe95
FB
3336 if (veclen > 0) {
3337 if (dp)
3338 bank_mask = 0xc;
3339 else
3340 bank_mask = 0x18;
3341
3342 /* Figure out what type of vector operation this is. */
3343 if ((rd & bank_mask) == 0) {
3344 /* scalar */
3345 veclen = 0;
3346 } else {
3347 if (dp)
69d1fc22 3348 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3349 else
69d1fc22 3350 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3351
3352 if ((rm & bank_mask) == 0) {
3353 /* mixed scalar/vector */
3354 delta_m = 0;
3355 } else {
3356 /* vector */
3357 delta_m = delta_d;
3358 }
3359 }
3360 }
3361
3362 /* Load the initial operands. */
3363 if (op == 15) {
3364 switch (rn) {
3365 case 16:
3366 case 17:
3367 /* Integer source */
3368 gen_mov_F0_vreg(0, rm);
3369 break;
3370 case 8:
3371 case 9:
3372 /* Compare */
3373 gen_mov_F0_vreg(dp, rd);
3374 gen_mov_F1_vreg(dp, rm);
3375 break;
3376 case 10:
3377 case 11:
3378 /* Compare with zero */
3379 gen_mov_F0_vreg(dp, rd);
3380 gen_vfp_F1_ld0(dp);
3381 break;
9ee6e8bb
PB
3382 case 20:
3383 case 21:
3384 case 22:
3385 case 23:
644ad806
PB
3386 case 28:
3387 case 29:
3388 case 30:
3389 case 31:
9ee6e8bb
PB
3390 /* Source and destination the same. */
3391 gen_mov_F0_vreg(dp, rd);
3392 break;
6e0c0ed1
PM
3393 case 4:
3394 case 5:
3395 case 6:
3396 case 7:
239c20c7
WN
3397 /* VCVTB, VCVTT: only present with the halfprec extension
3398 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3399 * (we choose to UNDEF)
6e0c0ed1 3400 */
d614a513
PM
3401 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3402 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3403 return 1;
3404 }
239c20c7
WN
3405 if (!extract32(rn, 1, 1)) {
3406 /* Half precision source. */
3407 gen_mov_F0_vreg(0, rm);
3408 break;
3409 }
6e0c0ed1 3410 /* Otherwise fall through */
b7bcbe95
FB
3411 default:
3412 /* One source operand. */
3413 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3414 break;
b7bcbe95
FB
3415 }
3416 } else {
3417 /* Two source operands. */
3418 gen_mov_F0_vreg(dp, rn);
3419 gen_mov_F1_vreg(dp, rm);
3420 }
3421
3422 for (;;) {
3423 /* Perform the calculation. */
3424 switch (op) {
605a6aed
PM
3425 case 0: /* VMLA: fd + (fn * fm) */
3426 /* Note that order of inputs to the add matters for NaNs */
3427 gen_vfp_F1_mul(dp);
3428 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3429 gen_vfp_add(dp);
3430 break;
605a6aed 3431 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3432 gen_vfp_mul(dp);
605a6aed
PM
3433 gen_vfp_F1_neg(dp);
3434 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3435 gen_vfp_add(dp);
3436 break;
605a6aed
PM
3437 case 2: /* VNMLS: -fd + (fn * fm) */
3438 /* Note that it isn't valid to replace (-A + B) with (B - A)
3439 * or similar plausible looking simplifications
3440 * because this will give wrong results for NaNs.
3441 */
3442 gen_vfp_F1_mul(dp);
3443 gen_mov_F0_vreg(dp, rd);
3444 gen_vfp_neg(dp);
3445 gen_vfp_add(dp);
b7bcbe95 3446 break;
605a6aed 3447 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3448 gen_vfp_mul(dp);
605a6aed
PM
3449 gen_vfp_F1_neg(dp);
3450 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3451 gen_vfp_neg(dp);
605a6aed 3452 gen_vfp_add(dp);
b7bcbe95
FB
3453 break;
3454 case 4: /* mul: fn * fm */
3455 gen_vfp_mul(dp);
3456 break;
3457 case 5: /* nmul: -(fn * fm) */
3458 gen_vfp_mul(dp);
3459 gen_vfp_neg(dp);
3460 break;
3461 case 6: /* add: fn + fm */
3462 gen_vfp_add(dp);
3463 break;
3464 case 7: /* sub: fn - fm */
3465 gen_vfp_sub(dp);
3466 break;
3467 case 8: /* div: fn / fm */
3468 gen_vfp_div(dp);
3469 break;
da97f52c
PM
3470 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3471 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3472 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3473 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3474 /* These are fused multiply-add, and must be done as one
3475 * floating point operation with no rounding between the
3476 * multiplication and addition steps.
3477 * NB that doing the negations here as separate steps is
3478 * correct : an input NaN should come out with its sign bit
3479 * flipped if it is a negated-input.
3480 */
d614a513 3481 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3482 return 1;
3483 }
3484 if (dp) {
3485 TCGv_ptr fpst;
3486 TCGv_i64 frd;
3487 if (op & 1) {
3488 /* VFNMS, VFMS */
3489 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3490 }
3491 frd = tcg_temp_new_i64();
3492 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3493 if (op & 2) {
3494 /* VFNMA, VFNMS */
3495 gen_helper_vfp_negd(frd, frd);
3496 }
3497 fpst = get_fpstatus_ptr(0);
3498 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3499 cpu_F1d, frd, fpst);
3500 tcg_temp_free_ptr(fpst);
3501 tcg_temp_free_i64(frd);
3502 } else {
3503 TCGv_ptr fpst;
3504 TCGv_i32 frd;
3505 if (op & 1) {
3506 /* VFNMS, VFMS */
3507 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3508 }
3509 frd = tcg_temp_new_i32();
3510 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3511 if (op & 2) {
3512 gen_helper_vfp_negs(frd, frd);
3513 }
3514 fpst = get_fpstatus_ptr(0);
3515 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3516 cpu_F1s, frd, fpst);
3517 tcg_temp_free_ptr(fpst);
3518 tcg_temp_free_i32(frd);
3519 }
3520 break;
9ee6e8bb 3521 case 14: /* fconst */
d614a513
PM
3522 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3523 return 1;
3524 }
9ee6e8bb
PB
3525
3526 n = (insn << 12) & 0x80000000;
3527 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3528 if (dp) {
3529 if (i & 0x40)
3530 i |= 0x3f80;
3531 else
3532 i |= 0x4000;
3533 n |= i << 16;
4373f3ce 3534 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3535 } else {
3536 if (i & 0x40)
3537 i |= 0x780;
3538 else
3539 i |= 0x800;
3540 n |= i << 19;
5b340b51 3541 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3542 }
9ee6e8bb 3543 break;
b7bcbe95
FB
3544 case 15: /* extension space */
3545 switch (rn) {
3546 case 0: /* cpy */
3547 /* no-op */
3548 break;
3549 case 1: /* abs */
3550 gen_vfp_abs(dp);
3551 break;
3552 case 2: /* neg */
3553 gen_vfp_neg(dp);
3554 break;
3555 case 3: /* sqrt */
3556 gen_vfp_sqrt(dp);
3557 break;
239c20c7 3558 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
60011498
PB
3559 tmp = gen_vfp_mrs();
3560 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3561 if (dp) {
3562 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3563 cpu_env);
3564 } else {
3565 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3566 cpu_env);
3567 }
7d1b0095 3568 tcg_temp_free_i32(tmp);
60011498 3569 break;
239c20c7 3570 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
60011498
PB
3571 tmp = gen_vfp_mrs();
3572 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3573 if (dp) {
3574 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3575 cpu_env);
3576 } else {
3577 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3578 cpu_env);
3579 }
7d1b0095 3580 tcg_temp_free_i32(tmp);
60011498 3581 break;
239c20c7 3582 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
7d1b0095 3583 tmp = tcg_temp_new_i32();
239c20c7
WN
3584 if (dp) {
3585 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3586 cpu_env);
3587 } else {
3588 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3589 cpu_env);
3590 }
60011498
PB
3591 gen_mov_F0_vreg(0, rd);
3592 tmp2 = gen_vfp_mrs();
3593 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3594 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3595 tcg_temp_free_i32(tmp2);
60011498
PB
3596 gen_vfp_msr(tmp);
3597 break;
239c20c7 3598 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
7d1b0095 3599 tmp = tcg_temp_new_i32();
239c20c7
WN
3600 if (dp) {
3601 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3602 cpu_env);
3603 } else {
3604 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3605 cpu_env);
3606 }
60011498
PB
3607 tcg_gen_shli_i32(tmp, tmp, 16);
3608 gen_mov_F0_vreg(0, rd);
3609 tmp2 = gen_vfp_mrs();
3610 tcg_gen_ext16u_i32(tmp2, tmp2);
3611 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3612 tcg_temp_free_i32(tmp2);
60011498
PB
3613 gen_vfp_msr(tmp);
3614 break;
b7bcbe95
FB
3615 case 8: /* cmp */
3616 gen_vfp_cmp(dp);
3617 break;
3618 case 9: /* cmpe */
3619 gen_vfp_cmpe(dp);
3620 break;
3621 case 10: /* cmpz */
3622 gen_vfp_cmp(dp);
3623 break;
3624 case 11: /* cmpez */
3625 gen_vfp_F1_ld0(dp);
3626 gen_vfp_cmpe(dp);
3627 break;
664c6733
WN
3628 case 12: /* vrintr */
3629 {
3630 TCGv_ptr fpst = get_fpstatus_ptr(0);
3631 if (dp) {
3632 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3633 } else {
3634 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3635 }
3636 tcg_temp_free_ptr(fpst);
3637 break;
3638 }
a290c62a
WN
3639 case 13: /* vrintz */
3640 {
3641 TCGv_ptr fpst = get_fpstatus_ptr(0);
3642 TCGv_i32 tcg_rmode;
3643 tcg_rmode = tcg_const_i32(float_round_to_zero);
3644 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3645 if (dp) {
3646 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3647 } else {
3648 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3649 }
3650 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3651 tcg_temp_free_i32(tcg_rmode);
3652 tcg_temp_free_ptr(fpst);
3653 break;
3654 }
4e82bc01
WN
3655 case 14: /* vrintx */
3656 {
3657 TCGv_ptr fpst = get_fpstatus_ptr(0);
3658 if (dp) {
3659 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3660 } else {
3661 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3662 }
3663 tcg_temp_free_ptr(fpst);
3664 break;
3665 }
b7bcbe95
FB
3666 case 15: /* single<->double conversion */
3667 if (dp)
4373f3ce 3668 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3669 else
4373f3ce 3670 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3671 break;
3672 case 16: /* fuito */
5500b06c 3673 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3674 break;
3675 case 17: /* fsito */
5500b06c 3676 gen_vfp_sito(dp, 0);
b7bcbe95 3677 break;
9ee6e8bb 3678 case 20: /* fshto */
d614a513
PM
3679 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3680 return 1;
3681 }
5500b06c 3682 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3683 break;
3684 case 21: /* fslto */
d614a513
PM
3685 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3686 return 1;
3687 }
5500b06c 3688 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3689 break;
3690 case 22: /* fuhto */
d614a513
PM
3691 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3692 return 1;
3693 }
5500b06c 3694 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3695 break;
3696 case 23: /* fulto */
d614a513
PM
3697 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3698 return 1;
3699 }
5500b06c 3700 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3701 break;
b7bcbe95 3702 case 24: /* ftoui */
5500b06c 3703 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3704 break;
3705 case 25: /* ftouiz */
5500b06c 3706 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3707 break;
3708 case 26: /* ftosi */
5500b06c 3709 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3710 break;
3711 case 27: /* ftosiz */
5500b06c 3712 gen_vfp_tosiz(dp, 0);
b7bcbe95 3713 break;
9ee6e8bb 3714 case 28: /* ftosh */
d614a513
PM
3715 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3716 return 1;
3717 }
5500b06c 3718 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3719 break;
3720 case 29: /* ftosl */
d614a513
PM
3721 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3722 return 1;
3723 }
5500b06c 3724 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3725 break;
3726 case 30: /* ftouh */
d614a513
PM
3727 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3728 return 1;
3729 }
5500b06c 3730 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3731 break;
3732 case 31: /* ftoul */
d614a513
PM
3733 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3734 return 1;
3735 }
5500b06c 3736 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3737 break;
b7bcbe95 3738 default: /* undefined */
b7bcbe95
FB
3739 return 1;
3740 }
3741 break;
3742 default: /* undefined */
b7bcbe95
FB
3743 return 1;
3744 }
3745
3746 /* Write back the result. */
239c20c7
WN
3747 if (op == 15 && (rn >= 8 && rn <= 11)) {
3748 /* Comparison, do nothing. */
3749 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3750 (rn & 0x1e) == 0x6)) {
3751 /* VCVT double to int: always integer result.
3752 * VCVT double to half precision is always a single
3753 * precision result.
3754 */
b7bcbe95 3755 gen_mov_vreg_F0(0, rd);
239c20c7 3756 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
3757 /* conversion */
3758 gen_mov_vreg_F0(!dp, rd);
239c20c7 3759 } else {
b7bcbe95 3760 gen_mov_vreg_F0(dp, rd);
239c20c7 3761 }
b7bcbe95
FB
3762
3763 /* break out of the loop if we have finished */
3764 if (veclen == 0)
3765 break;
3766
3767 if (op == 15 && delta_m == 0) {
3768 /* single source one-many */
3769 while (veclen--) {
3770 rd = ((rd + delta_d) & (bank_mask - 1))
3771 | (rd & bank_mask);
3772 gen_mov_vreg_F0(dp, rd);
3773 }
3774 break;
3775 }
3776 /* Setup the next operands. */
3777 veclen--;
3778 rd = ((rd + delta_d) & (bank_mask - 1))
3779 | (rd & bank_mask);
3780
3781 if (op == 15) {
3782 /* One source operand. */
3783 rm = ((rm + delta_m) & (bank_mask - 1))
3784 | (rm & bank_mask);
3785 gen_mov_F0_vreg(dp, rm);
3786 } else {
3787 /* Two source operands. */
3788 rn = ((rn + delta_d) & (bank_mask - 1))
3789 | (rn & bank_mask);
3790 gen_mov_F0_vreg(dp, rn);
3791 if (delta_m) {
3792 rm = ((rm + delta_m) & (bank_mask - 1))
3793 | (rm & bank_mask);
3794 gen_mov_F1_vreg(dp, rm);
3795 }
3796 }
3797 }
3798 }
3799 break;
3800 case 0xc:
3801 case 0xd:
8387da81 3802 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3803 /* two-register transfer */
3804 rn = (insn >> 16) & 0xf;
3805 rd = (insn >> 12) & 0xf;
3806 if (dp) {
9ee6e8bb
PB
3807 VFP_DREG_M(rm, insn);
3808 } else {
3809 rm = VFP_SREG_M(insn);
3810 }
b7bcbe95 3811
18c9b560 3812 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3813 /* vfp->arm */
3814 if (dp) {
4373f3ce
PB
3815 gen_mov_F0_vreg(0, rm * 2);
3816 tmp = gen_vfp_mrs();
3817 store_reg(s, rd, tmp);
3818 gen_mov_F0_vreg(0, rm * 2 + 1);
3819 tmp = gen_vfp_mrs();
3820 store_reg(s, rn, tmp);
b7bcbe95
FB
3821 } else {
3822 gen_mov_F0_vreg(0, rm);
4373f3ce 3823 tmp = gen_vfp_mrs();
8387da81 3824 store_reg(s, rd, tmp);
b7bcbe95 3825 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3826 tmp = gen_vfp_mrs();
8387da81 3827 store_reg(s, rn, tmp);
b7bcbe95
FB
3828 }
3829 } else {
3830 /* arm->vfp */
3831 if (dp) {
4373f3ce
PB
3832 tmp = load_reg(s, rd);
3833 gen_vfp_msr(tmp);
3834 gen_mov_vreg_F0(0, rm * 2);
3835 tmp = load_reg(s, rn);
3836 gen_vfp_msr(tmp);
3837 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3838 } else {
8387da81 3839 tmp = load_reg(s, rd);
4373f3ce 3840 gen_vfp_msr(tmp);
b7bcbe95 3841 gen_mov_vreg_F0(0, rm);
8387da81 3842 tmp = load_reg(s, rn);
4373f3ce 3843 gen_vfp_msr(tmp);
b7bcbe95
FB
3844 gen_mov_vreg_F0(0, rm + 1);
3845 }
3846 }
3847 } else {
3848 /* Load/store */
3849 rn = (insn >> 16) & 0xf;
3850 if (dp)
9ee6e8bb 3851 VFP_DREG_D(rd, insn);
b7bcbe95 3852 else
9ee6e8bb 3853 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3854 if ((insn & 0x01200000) == 0x01000000) {
3855 /* Single load/store */
3856 offset = (insn & 0xff) << 2;
3857 if ((insn & (1 << 23)) == 0)
3858 offset = -offset;
934814f1
PM
3859 if (s->thumb && rn == 15) {
3860 /* This is actually UNPREDICTABLE */
3861 addr = tcg_temp_new_i32();
3862 tcg_gen_movi_i32(addr, s->pc & ~2);
3863 } else {
3864 addr = load_reg(s, rn);
3865 }
312eea9f 3866 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3867 if (insn & (1 << 20)) {
312eea9f 3868 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3869 gen_mov_vreg_F0(dp, rd);
3870 } else {
3871 gen_mov_F0_vreg(dp, rd);
312eea9f 3872 gen_vfp_st(s, dp, addr);
b7bcbe95 3873 }
7d1b0095 3874 tcg_temp_free_i32(addr);
b7bcbe95
FB
3875 } else {
3876 /* load/store multiple */
934814f1 3877 int w = insn & (1 << 21);
b7bcbe95
FB
3878 if (dp)
3879 n = (insn >> 1) & 0x7f;
3880 else
3881 n = insn & 0xff;
3882
934814f1
PM
3883 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3884 /* P == U , W == 1 => UNDEF */
3885 return 1;
3886 }
3887 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3888 /* UNPREDICTABLE cases for bad immediates: we choose to
3889 * UNDEF to avoid generating huge numbers of TCG ops
3890 */
3891 return 1;
3892 }
3893 if (rn == 15 && w) {
3894 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3895 return 1;
3896 }
3897
3898 if (s->thumb && rn == 15) {
3899 /* This is actually UNPREDICTABLE */
3900 addr = tcg_temp_new_i32();
3901 tcg_gen_movi_i32(addr, s->pc & ~2);
3902 } else {
3903 addr = load_reg(s, rn);
3904 }
b7bcbe95 3905 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3906 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3907
3908 if (dp)
3909 offset = 8;
3910 else
3911 offset = 4;
3912 for (i = 0; i < n; i++) {
18c9b560 3913 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3914 /* load */
312eea9f 3915 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3916 gen_mov_vreg_F0(dp, rd + i);
3917 } else {
3918 /* store */
3919 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3920 gen_vfp_st(s, dp, addr);
b7bcbe95 3921 }
312eea9f 3922 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3923 }
934814f1 3924 if (w) {
b7bcbe95
FB
3925 /* writeback */
3926 if (insn & (1 << 24))
3927 offset = -offset * n;
3928 else if (dp && (insn & 1))
3929 offset = 4;
3930 else
3931 offset = 0;
3932
3933 if (offset != 0)
312eea9f
FN
3934 tcg_gen_addi_i32(addr, addr, offset);
3935 store_reg(s, rn, addr);
3936 } else {
7d1b0095 3937 tcg_temp_free_i32(addr);
b7bcbe95
FB
3938 }
3939 }
3940 }
3941 break;
3942 default:
3943 /* Should never happen. */
3944 return 1;
3945 }
3946 return 0;
3947}
3948
0a2461fa 3949static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
c53be334 3950{
6e256c93
FB
3951 TranslationBlock *tb;
3952
3953 tb = s->tb;
3954 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3955 tcg_gen_goto_tb(n);
eaed129d 3956 gen_set_pc_im(s, dest);
8cfd0495 3957 tcg_gen_exit_tb((uintptr_t)tb + n);
6e256c93 3958 } else {
eaed129d 3959 gen_set_pc_im(s, dest);
57fec1fe 3960 tcg_gen_exit_tb(0);
6e256c93 3961 }
c53be334
FB
3962}
3963
8aaca4c0
FB
3964static inline void gen_jmp (DisasContext *s, uint32_t dest)
3965{
50225ad0 3966 if (unlikely(s->singlestep_enabled || s->ss_active)) {
8aaca4c0 3967 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3968 if (s->thumb)
d9ba4830
PB
3969 dest |= 1;
3970 gen_bx_im(s, dest);
8aaca4c0 3971 } else {
6e256c93 3972 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3973 s->is_jmp = DISAS_TB_JUMP;
3974 }
3975}
3976
39d5492a 3977static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 3978{
ee097184 3979 if (x)
d9ba4830 3980 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3981 else
d9ba4830 3982 gen_sxth(t0);
ee097184 3983 if (y)
d9ba4830 3984 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3985 else
d9ba4830
PB
3986 gen_sxth(t1);
3987 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3988}
3989
3990/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
3991static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
3992{
b5ff1b31
FB
3993 uint32_t mask;
3994
3995 mask = 0;
3996 if (flags & (1 << 0))
3997 mask |= 0xff;
3998 if (flags & (1 << 1))
3999 mask |= 0xff00;
4000 if (flags & (1 << 2))
4001 mask |= 0xff0000;
4002 if (flags & (1 << 3))
4003 mask |= 0xff000000;
9ee6e8bb 4004
2ae23e75 4005 /* Mask out undefined bits. */
9ee6e8bb 4006 mask &= ~CPSR_RESERVED;
d614a513 4007 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4008 mask &= ~CPSR_T;
d614a513
PM
4009 }
4010 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4011 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4012 }
4013 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4014 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4015 }
4016 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4017 mask &= ~CPSR_IT;
d614a513 4018 }
4051e12c
PM
4019 /* Mask out execution state and reserved bits. */
4020 if (!spsr) {
4021 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4022 }
b5ff1b31
FB
4023 /* Mask out privileged bits. */
4024 if (IS_USER(s))
9ee6e8bb 4025 mask &= CPSR_USER;
b5ff1b31
FB
4026 return mask;
4027}
4028
2fbac54b 4029/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4030static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4031{
39d5492a 4032 TCGv_i32 tmp;
b5ff1b31
FB
4033 if (spsr) {
4034 /* ??? This is also undefined in system mode. */
4035 if (IS_USER(s))
4036 return 1;
d9ba4830
PB
4037
4038 tmp = load_cpu_field(spsr);
4039 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4040 tcg_gen_andi_i32(t0, t0, mask);
4041 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4042 store_cpu_field(tmp, spsr);
b5ff1b31 4043 } else {
2fbac54b 4044 gen_set_cpsr(t0, mask);
b5ff1b31 4045 }
7d1b0095 4046 tcg_temp_free_i32(t0);
b5ff1b31
FB
4047 gen_lookup_tb(s);
4048 return 0;
4049}
4050
2fbac54b
FN
4051/* Returns nonzero if access to the PSR is not permitted. */
4052static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4053{
39d5492a 4054 TCGv_i32 tmp;
7d1b0095 4055 tmp = tcg_temp_new_i32();
2fbac54b
FN
4056 tcg_gen_movi_i32(tmp, val);
4057 return gen_set_psr(s, mask, spsr, tmp);
4058}
4059
e9bb4aa9 4060/* Generate an old-style exception return. Marks pc as dead. */
39d5492a 4061static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4062{
39d5492a 4063 TCGv_i32 tmp;
e9bb4aa9 4064 store_reg(s, 15, pc);
d9ba4830 4065 tmp = load_cpu_field(spsr);
4051e12c 4066 gen_set_cpsr(tmp, CPSR_ERET_MASK);
7d1b0095 4067 tcg_temp_free_i32(tmp);
b5ff1b31
FB
4068 s->is_jmp = DISAS_UPDATE;
4069}
4070
b0109805 4071/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4072static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4073{
4051e12c 4074 gen_set_cpsr(cpsr, CPSR_ERET_MASK);
7d1b0095 4075 tcg_temp_free_i32(cpsr);
b0109805 4076 store_reg(s, 15, pc);
9ee6e8bb
PB
4077 s->is_jmp = DISAS_UPDATE;
4078}
3b46e624 4079
9ee6e8bb
PB
4080static void gen_nop_hint(DisasContext *s, int val)
4081{
4082 switch (val) {
4083 case 3: /* wfi */
eaed129d 4084 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
4085 s->is_jmp = DISAS_WFI;
4086 break;
4087 case 2: /* wfe */
72c1d3af
PM
4088 gen_set_pc_im(s, s->pc);
4089 s->is_jmp = DISAS_WFE;
4090 break;
9ee6e8bb 4091 case 4: /* sev */
12b10571
MR
4092 case 5: /* sevl */
4093 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4094 default: /* nop */
4095 break;
4096 }
4097}
99c475ab 4098
ad69471c 4099#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4100
39d5492a 4101static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4102{
4103 switch (size) {
dd8fbd78
FN
4104 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4105 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4106 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4107 default: abort();
9ee6e8bb 4108 }
9ee6e8bb
PB
4109}
4110
39d5492a 4111static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4112{
4113 switch (size) {
dd8fbd78
FN
4114 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4115 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4116 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4117 default: return;
4118 }
4119}
4120
4121/* 32-bit pairwise ops end up the same as the elementwise versions. */
4122#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4123#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4124#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4125#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4126
ad69471c
PB
4127#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4128 switch ((size << 1) | u) { \
4129 case 0: \
dd8fbd78 4130 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4131 break; \
4132 case 1: \
dd8fbd78 4133 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4134 break; \
4135 case 2: \
dd8fbd78 4136 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4137 break; \
4138 case 3: \
dd8fbd78 4139 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4140 break; \
4141 case 4: \
dd8fbd78 4142 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4143 break; \
4144 case 5: \
dd8fbd78 4145 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4146 break; \
4147 default: return 1; \
4148 }} while (0)
9ee6e8bb
PB
4149
4150#define GEN_NEON_INTEGER_OP(name) do { \
4151 switch ((size << 1) | u) { \
ad69471c 4152 case 0: \
dd8fbd78 4153 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4154 break; \
4155 case 1: \
dd8fbd78 4156 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4157 break; \
4158 case 2: \
dd8fbd78 4159 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4160 break; \
4161 case 3: \
dd8fbd78 4162 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4163 break; \
4164 case 4: \
dd8fbd78 4165 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4166 break; \
4167 case 5: \
dd8fbd78 4168 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4169 break; \
9ee6e8bb
PB
4170 default: return 1; \
4171 }} while (0)
4172
39d5492a 4173static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4174{
39d5492a 4175 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4176 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4177 return tmp;
9ee6e8bb
PB
4178}
4179
39d5492a 4180static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4181{
dd8fbd78 4182 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4183 tcg_temp_free_i32(var);
9ee6e8bb
PB
4184}
4185
39d5492a 4186static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4187{
39d5492a 4188 TCGv_i32 tmp;
9ee6e8bb 4189 if (size == 1) {
0fad6efc
PM
4190 tmp = neon_load_reg(reg & 7, reg >> 4);
4191 if (reg & 8) {
dd8fbd78 4192 gen_neon_dup_high16(tmp);
0fad6efc
PM
4193 } else {
4194 gen_neon_dup_low16(tmp);
dd8fbd78 4195 }
0fad6efc
PM
4196 } else {
4197 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4198 }
dd8fbd78 4199 return tmp;
9ee6e8bb
PB
4200}
4201
02acedf9 4202static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4203{
39d5492a 4204 TCGv_i32 tmp, tmp2;
600b828c 4205 if (!q && size == 2) {
02acedf9
PM
4206 return 1;
4207 }
4208 tmp = tcg_const_i32(rd);
4209 tmp2 = tcg_const_i32(rm);
4210 if (q) {
4211 switch (size) {
4212 case 0:
02da0b2d 4213 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4214 break;
4215 case 1:
02da0b2d 4216 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4217 break;
4218 case 2:
02da0b2d 4219 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
4220 break;
4221 default:
4222 abort();
4223 }
4224 } else {
4225 switch (size) {
4226 case 0:
02da0b2d 4227 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4228 break;
4229 case 1:
02da0b2d 4230 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4231 break;
4232 default:
4233 abort();
4234 }
4235 }
4236 tcg_temp_free_i32(tmp);
4237 tcg_temp_free_i32(tmp2);
4238 return 0;
19457615
FN
4239}
4240
d68a6f3a 4241static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4242{
39d5492a 4243 TCGv_i32 tmp, tmp2;
600b828c 4244 if (!q && size == 2) {
d68a6f3a
PM
4245 return 1;
4246 }
4247 tmp = tcg_const_i32(rd);
4248 tmp2 = tcg_const_i32(rm);
4249 if (q) {
4250 switch (size) {
4251 case 0:
02da0b2d 4252 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4253 break;
4254 case 1:
02da0b2d 4255 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4256 break;
4257 case 2:
02da0b2d 4258 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
4259 break;
4260 default:
4261 abort();
4262 }
4263 } else {
4264 switch (size) {
4265 case 0:
02da0b2d 4266 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4267 break;
4268 case 1:
02da0b2d 4269 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4270 break;
4271 default:
4272 abort();
4273 }
4274 }
4275 tcg_temp_free_i32(tmp);
4276 tcg_temp_free_i32(tmp2);
4277 return 0;
19457615
FN
4278}
4279
39d5492a 4280static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4281{
39d5492a 4282 TCGv_i32 rd, tmp;
19457615 4283
7d1b0095
PM
4284 rd = tcg_temp_new_i32();
4285 tmp = tcg_temp_new_i32();
19457615
FN
4286
4287 tcg_gen_shli_i32(rd, t0, 8);
4288 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4289 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4290 tcg_gen_or_i32(rd, rd, tmp);
4291
4292 tcg_gen_shri_i32(t1, t1, 8);
4293 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4294 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4295 tcg_gen_or_i32(t1, t1, tmp);
4296 tcg_gen_mov_i32(t0, rd);
4297
7d1b0095
PM
4298 tcg_temp_free_i32(tmp);
4299 tcg_temp_free_i32(rd);
19457615
FN
4300}
4301
39d5492a 4302static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4303{
39d5492a 4304 TCGv_i32 rd, tmp;
19457615 4305
7d1b0095
PM
4306 rd = tcg_temp_new_i32();
4307 tmp = tcg_temp_new_i32();
19457615
FN
4308
4309 tcg_gen_shli_i32(rd, t0, 16);
4310 tcg_gen_andi_i32(tmp, t1, 0xffff);
4311 tcg_gen_or_i32(rd, rd, tmp);
4312 tcg_gen_shri_i32(t1, t1, 16);
4313 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4314 tcg_gen_or_i32(t1, t1, tmp);
4315 tcg_gen_mov_i32(t0, rd);
4316
7d1b0095
PM
4317 tcg_temp_free_i32(tmp);
4318 tcg_temp_free_i32(rd);
19457615
FN
4319}
4320
4321
9ee6e8bb
PB
4322static struct {
4323 int nregs;
4324 int interleave;
4325 int spacing;
4326} neon_ls_element_type[11] = {
4327 {4, 4, 1},
4328 {4, 4, 2},
4329 {4, 1, 1},
4330 {4, 2, 1},
4331 {3, 3, 1},
4332 {3, 3, 2},
4333 {3, 1, 1},
4334 {1, 1, 1},
4335 {2, 2, 1},
4336 {2, 2, 2},
4337 {2, 1, 1}
4338};
4339
4340/* Translate a NEON load/store element instruction. Return nonzero if the
4341 instruction is invalid. */
7dcc1f89 4342static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4343{
4344 int rd, rn, rm;
4345 int op;
4346 int nregs;
4347 int interleave;
84496233 4348 int spacing;
9ee6e8bb
PB
4349 int stride;
4350 int size;
4351 int reg;
4352 int pass;
4353 int load;
4354 int shift;
9ee6e8bb 4355 int n;
39d5492a
PM
4356 TCGv_i32 addr;
4357 TCGv_i32 tmp;
4358 TCGv_i32 tmp2;
84496233 4359 TCGv_i64 tmp64;
9ee6e8bb 4360
2c7ffc41
PM
4361 /* FIXME: this access check should not take precedence over UNDEF
4362 * for invalid encodings; we will generate incorrect syndrome information
4363 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4364 */
9dbbc748 4365 if (s->fp_excp_el) {
2c7ffc41 4366 gen_exception_insn(s, 4, EXCP_UDEF,
9dbbc748 4367 syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
2c7ffc41
PM
4368 return 0;
4369 }
4370
5df8bac1 4371 if (!s->vfp_enabled)
9ee6e8bb
PB
4372 return 1;
4373 VFP_DREG_D(rd, insn);
4374 rn = (insn >> 16) & 0xf;
4375 rm = insn & 0xf;
4376 load = (insn & (1 << 21)) != 0;
4377 if ((insn & (1 << 23)) == 0) {
4378 /* Load store all elements. */
4379 op = (insn >> 8) & 0xf;
4380 size = (insn >> 6) & 3;
84496233 4381 if (op > 10)
9ee6e8bb 4382 return 1;
f2dd89d0
PM
4383 /* Catch UNDEF cases for bad values of align field */
4384 switch (op & 0xc) {
4385 case 4:
4386 if (((insn >> 5) & 1) == 1) {
4387 return 1;
4388 }
4389 break;
4390 case 8:
4391 if (((insn >> 4) & 3) == 3) {
4392 return 1;
4393 }
4394 break;
4395 default:
4396 break;
4397 }
9ee6e8bb
PB
4398 nregs = neon_ls_element_type[op].nregs;
4399 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4400 spacing = neon_ls_element_type[op].spacing;
4401 if (size == 3 && (interleave | spacing) != 1)
4402 return 1;
e318a60b 4403 addr = tcg_temp_new_i32();
dcc65026 4404 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4405 stride = (1 << size) * interleave;
4406 for (reg = 0; reg < nregs; reg++) {
4407 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4408 load_reg_var(s, addr, rn);
4409 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4410 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4411 load_reg_var(s, addr, rn);
4412 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4413 }
84496233 4414 if (size == 3) {
8ed1237d 4415 tmp64 = tcg_temp_new_i64();
84496233 4416 if (load) {
6ce2faf4 4417 gen_aa32_ld64(tmp64, addr, get_mem_index(s));
84496233 4418 neon_store_reg64(tmp64, rd);
84496233 4419 } else {
84496233 4420 neon_load_reg64(tmp64, rd);
6ce2faf4 4421 gen_aa32_st64(tmp64, addr, get_mem_index(s));
84496233 4422 }
8ed1237d 4423 tcg_temp_free_i64(tmp64);
84496233
JR
4424 tcg_gen_addi_i32(addr, addr, stride);
4425 } else {
4426 for (pass = 0; pass < 2; pass++) {
4427 if (size == 2) {
4428 if (load) {
58ab8e96 4429 tmp = tcg_temp_new_i32();
6ce2faf4 4430 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
84496233
JR
4431 neon_store_reg(rd, pass, tmp);
4432 } else {
4433 tmp = neon_load_reg(rd, pass);
6ce2faf4 4434 gen_aa32_st32(tmp, addr, get_mem_index(s));
58ab8e96 4435 tcg_temp_free_i32(tmp);
84496233 4436 }
1b2b1e54 4437 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4438 } else if (size == 1) {
4439 if (load) {
58ab8e96 4440 tmp = tcg_temp_new_i32();
6ce2faf4 4441 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
84496233 4442 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4443 tmp2 = tcg_temp_new_i32();
6ce2faf4 4444 gen_aa32_ld16u(tmp2, addr, get_mem_index(s));
84496233 4445 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4446 tcg_gen_shli_i32(tmp2, tmp2, 16);
4447 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4448 tcg_temp_free_i32(tmp2);
84496233
JR
4449 neon_store_reg(rd, pass, tmp);
4450 } else {
4451 tmp = neon_load_reg(rd, pass);
7d1b0095 4452 tmp2 = tcg_temp_new_i32();
84496233 4453 tcg_gen_shri_i32(tmp2, tmp, 16);
6ce2faf4 4454 gen_aa32_st16(tmp, addr, get_mem_index(s));
58ab8e96 4455 tcg_temp_free_i32(tmp);
84496233 4456 tcg_gen_addi_i32(addr, addr, stride);
6ce2faf4 4457 gen_aa32_st16(tmp2, addr, get_mem_index(s));
58ab8e96 4458 tcg_temp_free_i32(tmp2);
1b2b1e54 4459 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4460 }
84496233
JR
4461 } else /* size == 0 */ {
4462 if (load) {
39d5492a 4463 TCGV_UNUSED_I32(tmp2);
84496233 4464 for (n = 0; n < 4; n++) {
58ab8e96 4465 tmp = tcg_temp_new_i32();
6ce2faf4 4466 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
84496233
JR
4467 tcg_gen_addi_i32(addr, addr, stride);
4468 if (n == 0) {
4469 tmp2 = tmp;
4470 } else {
41ba8341
PB
4471 tcg_gen_shli_i32(tmp, tmp, n * 8);
4472 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4473 tcg_temp_free_i32(tmp);
84496233 4474 }
9ee6e8bb 4475 }
84496233
JR
4476 neon_store_reg(rd, pass, tmp2);
4477 } else {
4478 tmp2 = neon_load_reg(rd, pass);
4479 for (n = 0; n < 4; n++) {
7d1b0095 4480 tmp = tcg_temp_new_i32();
84496233
JR
4481 if (n == 0) {
4482 tcg_gen_mov_i32(tmp, tmp2);
4483 } else {
4484 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4485 }
6ce2faf4 4486 gen_aa32_st8(tmp, addr, get_mem_index(s));
58ab8e96 4487 tcg_temp_free_i32(tmp);
84496233
JR
4488 tcg_gen_addi_i32(addr, addr, stride);
4489 }
7d1b0095 4490 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4491 }
4492 }
4493 }
4494 }
84496233 4495 rd += spacing;
9ee6e8bb 4496 }
e318a60b 4497 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4498 stride = nregs * 8;
4499 } else {
4500 size = (insn >> 10) & 3;
4501 if (size == 3) {
4502 /* Load single element to all lanes. */
8e18cde3
PM
4503 int a = (insn >> 4) & 1;
4504 if (!load) {
9ee6e8bb 4505 return 1;
8e18cde3 4506 }
9ee6e8bb
PB
4507 size = (insn >> 6) & 3;
4508 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4509
4510 if (size == 3) {
4511 if (nregs != 4 || a == 0) {
9ee6e8bb 4512 return 1;
99c475ab 4513 }
8e18cde3
PM
4514 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4515 size = 2;
4516 }
4517 if (nregs == 1 && a == 1 && size == 0) {
4518 return 1;
4519 }
4520 if (nregs == 3 && a == 1) {
4521 return 1;
4522 }
e318a60b 4523 addr = tcg_temp_new_i32();
8e18cde3
PM
4524 load_reg_var(s, addr, rn);
4525 if (nregs == 1) {
4526 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4527 tmp = gen_load_and_replicate(s, addr, size);
4528 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4529 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4530 if (insn & (1 << 5)) {
4531 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4532 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4533 }
4534 tcg_temp_free_i32(tmp);
4535 } else {
4536 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4537 stride = (insn & (1 << 5)) ? 2 : 1;
4538 for (reg = 0; reg < nregs; reg++) {
4539 tmp = gen_load_and_replicate(s, addr, size);
4540 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4541 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4542 tcg_temp_free_i32(tmp);
4543 tcg_gen_addi_i32(addr, addr, 1 << size);
4544 rd += stride;
4545 }
9ee6e8bb 4546 }
e318a60b 4547 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4548 stride = (1 << size) * nregs;
4549 } else {
4550 /* Single element. */
93262b16 4551 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4552 pass = (insn >> 7) & 1;
4553 switch (size) {
4554 case 0:
4555 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4556 stride = 1;
4557 break;
4558 case 1:
4559 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4560 stride = (insn & (1 << 5)) ? 2 : 1;
4561 break;
4562 case 2:
4563 shift = 0;
9ee6e8bb
PB
4564 stride = (insn & (1 << 6)) ? 2 : 1;
4565 break;
4566 default:
4567 abort();
4568 }
4569 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4570 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4571 switch (nregs) {
4572 case 1:
4573 if (((idx & (1 << size)) != 0) ||
4574 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4575 return 1;
4576 }
4577 break;
4578 case 3:
4579 if ((idx & 1) != 0) {
4580 return 1;
4581 }
4582 /* fall through */
4583 case 2:
4584 if (size == 2 && (idx & 2) != 0) {
4585 return 1;
4586 }
4587 break;
4588 case 4:
4589 if ((size == 2) && ((idx & 3) == 3)) {
4590 return 1;
4591 }
4592 break;
4593 default:
4594 abort();
4595 }
4596 if ((rd + stride * (nregs - 1)) > 31) {
4597 /* Attempts to write off the end of the register file
4598 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4599 * the neon_load_reg() would write off the end of the array.
4600 */
4601 return 1;
4602 }
e318a60b 4603 addr = tcg_temp_new_i32();
dcc65026 4604 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4605 for (reg = 0; reg < nregs; reg++) {
4606 if (load) {
58ab8e96 4607 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
4608 switch (size) {
4609 case 0:
6ce2faf4 4610 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4611 break;
4612 case 1:
6ce2faf4 4613 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4614 break;
4615 case 2:
6ce2faf4 4616 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9ee6e8bb 4617 break;
a50f5b91
PB
4618 default: /* Avoid compiler warnings. */
4619 abort();
9ee6e8bb
PB
4620 }
4621 if (size != 2) {
8f8e3aa4 4622 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4623 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4624 shift, size ? 16 : 8);
7d1b0095 4625 tcg_temp_free_i32(tmp2);
9ee6e8bb 4626 }
8f8e3aa4 4627 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4628 } else { /* Store */
8f8e3aa4
PB
4629 tmp = neon_load_reg(rd, pass);
4630 if (shift)
4631 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4632 switch (size) {
4633 case 0:
6ce2faf4 4634 gen_aa32_st8(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4635 break;
4636 case 1:
6ce2faf4 4637 gen_aa32_st16(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4638 break;
4639 case 2:
6ce2faf4 4640 gen_aa32_st32(tmp, addr, get_mem_index(s));
9ee6e8bb 4641 break;
99c475ab 4642 }
58ab8e96 4643 tcg_temp_free_i32(tmp);
99c475ab 4644 }
9ee6e8bb 4645 rd += stride;
1b2b1e54 4646 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4647 }
e318a60b 4648 tcg_temp_free_i32(addr);
9ee6e8bb 4649 stride = nregs * (1 << size);
99c475ab 4650 }
9ee6e8bb
PB
4651 }
4652 if (rm != 15) {
39d5492a 4653 TCGv_i32 base;
b26eefb6
PB
4654
4655 base = load_reg(s, rn);
9ee6e8bb 4656 if (rm == 13) {
b26eefb6 4657 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4658 } else {
39d5492a 4659 TCGv_i32 index;
b26eefb6
PB
4660 index = load_reg(s, rm);
4661 tcg_gen_add_i32(base, base, index);
7d1b0095 4662 tcg_temp_free_i32(index);
9ee6e8bb 4663 }
b26eefb6 4664 store_reg(s, rn, base);
9ee6e8bb
PB
4665 }
4666 return 0;
4667}
3b46e624 4668
8f8e3aa4 4669/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 4670static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
4671{
4672 tcg_gen_and_i32(t, t, c);
f669df27 4673 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4674 tcg_gen_or_i32(dest, t, f);
4675}
4676
39d5492a 4677static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4678{
4679 switch (size) {
4680 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4681 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4682 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4683 default: abort();
4684 }
4685}
4686
39d5492a 4687static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4688{
4689 switch (size) {
02da0b2d
PM
4690 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4691 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4692 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4693 default: abort();
4694 }
4695}
4696
39d5492a 4697static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4698{
4699 switch (size) {
02da0b2d
PM
4700 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4701 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4702 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4703 default: abort();
4704 }
4705}
4706
39d5492a 4707static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
4708{
4709 switch (size) {
02da0b2d
PM
4710 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4711 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4712 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4713 default: abort();
4714 }
4715}
4716
39d5492a 4717static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
4718 int q, int u)
4719{
4720 if (q) {
4721 if (u) {
4722 switch (size) {
4723 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4724 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4725 default: abort();
4726 }
4727 } else {
4728 switch (size) {
4729 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4730 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4731 default: abort();
4732 }
4733 }
4734 } else {
4735 if (u) {
4736 switch (size) {
b408a9b0
CL
4737 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4738 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4739 default: abort();
4740 }
4741 } else {
4742 switch (size) {
4743 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4744 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4745 default: abort();
4746 }
4747 }
4748 }
4749}
4750
39d5492a 4751static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
4752{
4753 if (u) {
4754 switch (size) {
4755 case 0: gen_helper_neon_widen_u8(dest, src); break;
4756 case 1: gen_helper_neon_widen_u16(dest, src); break;
4757 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4758 default: abort();
4759 }
4760 } else {
4761 switch (size) {
4762 case 0: gen_helper_neon_widen_s8(dest, src); break;
4763 case 1: gen_helper_neon_widen_s16(dest, src); break;
4764 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4765 default: abort();
4766 }
4767 }
7d1b0095 4768 tcg_temp_free_i32(src);
ad69471c
PB
4769}
4770
4771static inline void gen_neon_addl(int size)
4772{
4773 switch (size) {
4774 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4775 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4776 case 2: tcg_gen_add_i64(CPU_V001); break;
4777 default: abort();
4778 }
4779}
4780
4781static inline void gen_neon_subl(int size)
4782{
4783 switch (size) {
4784 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4785 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4786 case 2: tcg_gen_sub_i64(CPU_V001); break;
4787 default: abort();
4788 }
4789}
4790
a7812ae4 4791static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4792{
4793 switch (size) {
4794 case 0: gen_helper_neon_negl_u16(var, var); break;
4795 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
4796 case 2:
4797 tcg_gen_neg_i64(var, var);
4798 break;
ad69471c
PB
4799 default: abort();
4800 }
4801}
4802
a7812ae4 4803static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4804{
4805 switch (size) {
02da0b2d
PM
4806 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4807 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4808 default: abort();
4809 }
4810}
4811
39d5492a
PM
4812static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4813 int size, int u)
ad69471c 4814{
a7812ae4 4815 TCGv_i64 tmp;
ad69471c
PB
4816
4817 switch ((size << 1) | u) {
4818 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4819 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4820 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4821 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4822 case 4:
4823 tmp = gen_muls_i64_i32(a, b);
4824 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4825 tcg_temp_free_i64(tmp);
ad69471c
PB
4826 break;
4827 case 5:
4828 tmp = gen_mulu_i64_i32(a, b);
4829 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4830 tcg_temp_free_i64(tmp);
ad69471c
PB
4831 break;
4832 default: abort();
4833 }
c6067f04
CL
4834
4835 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4836 Don't forget to clean them now. */
4837 if (size < 2) {
7d1b0095
PM
4838 tcg_temp_free_i32(a);
4839 tcg_temp_free_i32(b);
c6067f04 4840 }
ad69471c
PB
4841}
4842
39d5492a
PM
4843static void gen_neon_narrow_op(int op, int u, int size,
4844 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
4845{
4846 if (op) {
4847 if (u) {
4848 gen_neon_unarrow_sats(size, dest, src);
4849 } else {
4850 gen_neon_narrow(size, dest, src);
4851 }
4852 } else {
4853 if (u) {
4854 gen_neon_narrow_satu(size, dest, src);
4855 } else {
4856 gen_neon_narrow_sats(size, dest, src);
4857 }
4858 }
4859}
4860
62698be3
PM
4861/* Symbolic constants for op fields for Neon 3-register same-length.
4862 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4863 * table A7-9.
4864 */
4865#define NEON_3R_VHADD 0
4866#define NEON_3R_VQADD 1
4867#define NEON_3R_VRHADD 2
4868#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4869#define NEON_3R_VHSUB 4
4870#define NEON_3R_VQSUB 5
4871#define NEON_3R_VCGT 6
4872#define NEON_3R_VCGE 7
4873#define NEON_3R_VSHL 8
4874#define NEON_3R_VQSHL 9
4875#define NEON_3R_VRSHL 10
4876#define NEON_3R_VQRSHL 11
4877#define NEON_3R_VMAX 12
4878#define NEON_3R_VMIN 13
4879#define NEON_3R_VABD 14
4880#define NEON_3R_VABA 15
4881#define NEON_3R_VADD_VSUB 16
4882#define NEON_3R_VTST_VCEQ 17
4883#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4884#define NEON_3R_VMUL 19
4885#define NEON_3R_VPMAX 20
4886#define NEON_3R_VPMIN 21
4887#define NEON_3R_VQDMULH_VQRDMULH 22
4888#define NEON_3R_VPADD 23
f1ecb913 4889#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
da97f52c 4890#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
4891#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4892#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4893#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4894#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4895#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 4896#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
4897
4898static const uint8_t neon_3r_sizes[] = {
4899 [NEON_3R_VHADD] = 0x7,
4900 [NEON_3R_VQADD] = 0xf,
4901 [NEON_3R_VRHADD] = 0x7,
4902 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4903 [NEON_3R_VHSUB] = 0x7,
4904 [NEON_3R_VQSUB] = 0xf,
4905 [NEON_3R_VCGT] = 0x7,
4906 [NEON_3R_VCGE] = 0x7,
4907 [NEON_3R_VSHL] = 0xf,
4908 [NEON_3R_VQSHL] = 0xf,
4909 [NEON_3R_VRSHL] = 0xf,
4910 [NEON_3R_VQRSHL] = 0xf,
4911 [NEON_3R_VMAX] = 0x7,
4912 [NEON_3R_VMIN] = 0x7,
4913 [NEON_3R_VABD] = 0x7,
4914 [NEON_3R_VABA] = 0x7,
4915 [NEON_3R_VADD_VSUB] = 0xf,
4916 [NEON_3R_VTST_VCEQ] = 0x7,
4917 [NEON_3R_VML] = 0x7,
4918 [NEON_3R_VMUL] = 0x7,
4919 [NEON_3R_VPMAX] = 0x7,
4920 [NEON_3R_VPMIN] = 0x7,
4921 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4922 [NEON_3R_VPADD] = 0x7,
f1ecb913 4923 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
da97f52c 4924 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4925 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4926 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4927 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4928 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4929 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 4930 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4931};
4932
600b828c
PM
4933/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4934 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4935 * table A7-13.
4936 */
4937#define NEON_2RM_VREV64 0
4938#define NEON_2RM_VREV32 1
4939#define NEON_2RM_VREV16 2
4940#define NEON_2RM_VPADDL 4
4941#define NEON_2RM_VPADDL_U 5
9d935509
AB
4942#define NEON_2RM_AESE 6 /* Includes AESD */
4943#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
4944#define NEON_2RM_VCLS 8
4945#define NEON_2RM_VCLZ 9
4946#define NEON_2RM_VCNT 10
4947#define NEON_2RM_VMVN 11
4948#define NEON_2RM_VPADAL 12
4949#define NEON_2RM_VPADAL_U 13
4950#define NEON_2RM_VQABS 14
4951#define NEON_2RM_VQNEG 15
4952#define NEON_2RM_VCGT0 16
4953#define NEON_2RM_VCGE0 17
4954#define NEON_2RM_VCEQ0 18
4955#define NEON_2RM_VCLE0 19
4956#define NEON_2RM_VCLT0 20
f1ecb913 4957#define NEON_2RM_SHA1H 21
600b828c
PM
4958#define NEON_2RM_VABS 22
4959#define NEON_2RM_VNEG 23
4960#define NEON_2RM_VCGT0_F 24
4961#define NEON_2RM_VCGE0_F 25
4962#define NEON_2RM_VCEQ0_F 26
4963#define NEON_2RM_VCLE0_F 27
4964#define NEON_2RM_VCLT0_F 28
4965#define NEON_2RM_VABS_F 30
4966#define NEON_2RM_VNEG_F 31
4967#define NEON_2RM_VSWP 32
4968#define NEON_2RM_VTRN 33
4969#define NEON_2RM_VUZP 34
4970#define NEON_2RM_VZIP 35
4971#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4972#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4973#define NEON_2RM_VSHLL 38
f1ecb913 4974#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 4975#define NEON_2RM_VRINTN 40
2ce70625 4976#define NEON_2RM_VRINTX 41
34f7b0a2
WN
4977#define NEON_2RM_VRINTA 42
4978#define NEON_2RM_VRINTZ 43
600b828c 4979#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 4980#define NEON_2RM_VRINTM 45
600b828c 4981#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 4982#define NEON_2RM_VRINTP 47
901ad525
WN
4983#define NEON_2RM_VCVTAU 48
4984#define NEON_2RM_VCVTAS 49
4985#define NEON_2RM_VCVTNU 50
4986#define NEON_2RM_VCVTNS 51
4987#define NEON_2RM_VCVTPU 52
4988#define NEON_2RM_VCVTPS 53
4989#define NEON_2RM_VCVTMU 54
4990#define NEON_2RM_VCVTMS 55
600b828c
PM
4991#define NEON_2RM_VRECPE 56
4992#define NEON_2RM_VRSQRTE 57
4993#define NEON_2RM_VRECPE_F 58
4994#define NEON_2RM_VRSQRTE_F 59
4995#define NEON_2RM_VCVT_FS 60
4996#define NEON_2RM_VCVT_FU 61
4997#define NEON_2RM_VCVT_SF 62
4998#define NEON_2RM_VCVT_UF 63
4999
5000static int neon_2rm_is_float_op(int op)
5001{
5002 /* Return true if this neon 2reg-misc op is float-to-float */
5003 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5004 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5005 op == NEON_2RM_VRINTM ||
5006 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5007 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5008}
5009
5010/* Each entry in this array has bit n set if the insn allows
5011 * size value n (otherwise it will UNDEF). Since unallocated
5012 * op values will have no bits set they always UNDEF.
5013 */
5014static const uint8_t neon_2rm_sizes[] = {
5015 [NEON_2RM_VREV64] = 0x7,
5016 [NEON_2RM_VREV32] = 0x3,
5017 [NEON_2RM_VREV16] = 0x1,
5018 [NEON_2RM_VPADDL] = 0x7,
5019 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5020 [NEON_2RM_AESE] = 0x1,
5021 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5022 [NEON_2RM_VCLS] = 0x7,
5023 [NEON_2RM_VCLZ] = 0x7,
5024 [NEON_2RM_VCNT] = 0x1,
5025 [NEON_2RM_VMVN] = 0x1,
5026 [NEON_2RM_VPADAL] = 0x7,
5027 [NEON_2RM_VPADAL_U] = 0x7,
5028 [NEON_2RM_VQABS] = 0x7,
5029 [NEON_2RM_VQNEG] = 0x7,
5030 [NEON_2RM_VCGT0] = 0x7,
5031 [NEON_2RM_VCGE0] = 0x7,
5032 [NEON_2RM_VCEQ0] = 0x7,
5033 [NEON_2RM_VCLE0] = 0x7,
5034 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5035 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5036 [NEON_2RM_VABS] = 0x7,
5037 [NEON_2RM_VNEG] = 0x7,
5038 [NEON_2RM_VCGT0_F] = 0x4,
5039 [NEON_2RM_VCGE0_F] = 0x4,
5040 [NEON_2RM_VCEQ0_F] = 0x4,
5041 [NEON_2RM_VCLE0_F] = 0x4,
5042 [NEON_2RM_VCLT0_F] = 0x4,
5043 [NEON_2RM_VABS_F] = 0x4,
5044 [NEON_2RM_VNEG_F] = 0x4,
5045 [NEON_2RM_VSWP] = 0x1,
5046 [NEON_2RM_VTRN] = 0x7,
5047 [NEON_2RM_VUZP] = 0x7,
5048 [NEON_2RM_VZIP] = 0x7,
5049 [NEON_2RM_VMOVN] = 0x7,
5050 [NEON_2RM_VQMOVN] = 0x7,
5051 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5052 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5053 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5054 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5055 [NEON_2RM_VRINTA] = 0x4,
5056 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5057 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5058 [NEON_2RM_VRINTM] = 0x4,
600b828c 5059 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5060 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5061 [NEON_2RM_VCVTAU] = 0x4,
5062 [NEON_2RM_VCVTAS] = 0x4,
5063 [NEON_2RM_VCVTNU] = 0x4,
5064 [NEON_2RM_VCVTNS] = 0x4,
5065 [NEON_2RM_VCVTPU] = 0x4,
5066 [NEON_2RM_VCVTPS] = 0x4,
5067 [NEON_2RM_VCVTMU] = 0x4,
5068 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5069 [NEON_2RM_VRECPE] = 0x4,
5070 [NEON_2RM_VRSQRTE] = 0x4,
5071 [NEON_2RM_VRECPE_F] = 0x4,
5072 [NEON_2RM_VRSQRTE_F] = 0x4,
5073 [NEON_2RM_VCVT_FS] = 0x4,
5074 [NEON_2RM_VCVT_FU] = 0x4,
5075 [NEON_2RM_VCVT_SF] = 0x4,
5076 [NEON_2RM_VCVT_UF] = 0x4,
5077};
5078
9ee6e8bb
PB
5079/* Translate a NEON data processing instruction. Return nonzero if the
5080 instruction is invalid.
ad69471c
PB
5081 We process data in a mixture of 32-bit and 64-bit chunks.
5082 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 5083
7dcc1f89 5084static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5085{
5086 int op;
5087 int q;
5088 int rd, rn, rm;
5089 int size;
5090 int shift;
5091 int pass;
5092 int count;
5093 int pairwise;
5094 int u;
ca9a32e4 5095 uint32_t imm, mask;
39d5492a 5096 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 5097 TCGv_i64 tmp64;
9ee6e8bb 5098
2c7ffc41
PM
5099 /* FIXME: this access check should not take precedence over UNDEF
5100 * for invalid encodings; we will generate incorrect syndrome information
5101 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5102 */
9dbbc748 5103 if (s->fp_excp_el) {
2c7ffc41 5104 gen_exception_insn(s, 4, EXCP_UDEF,
9dbbc748 5105 syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
2c7ffc41
PM
5106 return 0;
5107 }
5108
5df8bac1 5109 if (!s->vfp_enabled)
9ee6e8bb
PB
5110 return 1;
5111 q = (insn & (1 << 6)) != 0;
5112 u = (insn >> 24) & 1;
5113 VFP_DREG_D(rd, insn);
5114 VFP_DREG_N(rn, insn);
5115 VFP_DREG_M(rm, insn);
5116 size = (insn >> 20) & 3;
5117 if ((insn & (1 << 23)) == 0) {
5118 /* Three register same length. */
5119 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5120 /* Catch invalid op and bad size combinations: UNDEF */
5121 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5122 return 1;
5123 }
25f84f79
PM
5124 /* All insns of this form UNDEF for either this condition or the
5125 * superset of cases "Q==1"; we catch the latter later.
5126 */
5127 if (q && ((rd | rn | rm) & 1)) {
5128 return 1;
5129 }
f1ecb913
AB
5130 /*
5131 * The SHA-1/SHA-256 3-register instructions require special treatment
5132 * here, as their size field is overloaded as an op type selector, and
5133 * they all consume their input in a single pass.
5134 */
5135 if (op == NEON_3R_SHA) {
5136 if (!q) {
5137 return 1;
5138 }
5139 if (!u) { /* SHA-1 */
d614a513 5140 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
5141 return 1;
5142 }
5143 tmp = tcg_const_i32(rd);
5144 tmp2 = tcg_const_i32(rn);
5145 tmp3 = tcg_const_i32(rm);
5146 tmp4 = tcg_const_i32(size);
5147 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5148 tcg_temp_free_i32(tmp4);
5149 } else { /* SHA-256 */
d614a513 5150 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
f1ecb913
AB
5151 return 1;
5152 }
5153 tmp = tcg_const_i32(rd);
5154 tmp2 = tcg_const_i32(rn);
5155 tmp3 = tcg_const_i32(rm);
5156 switch (size) {
5157 case 0:
5158 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5159 break;
5160 case 1:
5161 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5162 break;
5163 case 2:
5164 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5165 break;
5166 }
5167 }
5168 tcg_temp_free_i32(tmp);
5169 tcg_temp_free_i32(tmp2);
5170 tcg_temp_free_i32(tmp3);
5171 return 0;
5172 }
62698be3
PM
5173 if (size == 3 && op != NEON_3R_LOGIC) {
5174 /* 64-bit element instructions. */
9ee6e8bb 5175 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5176 neon_load_reg64(cpu_V0, rn + pass);
5177 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5178 switch (op) {
62698be3 5179 case NEON_3R_VQADD:
9ee6e8bb 5180 if (u) {
02da0b2d
PM
5181 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5182 cpu_V0, cpu_V1);
2c0262af 5183 } else {
02da0b2d
PM
5184 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5185 cpu_V0, cpu_V1);
2c0262af 5186 }
9ee6e8bb 5187 break;
62698be3 5188 case NEON_3R_VQSUB:
9ee6e8bb 5189 if (u) {
02da0b2d
PM
5190 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5191 cpu_V0, cpu_V1);
ad69471c 5192 } else {
02da0b2d
PM
5193 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5194 cpu_V0, cpu_V1);
ad69471c
PB
5195 }
5196 break;
62698be3 5197 case NEON_3R_VSHL:
ad69471c
PB
5198 if (u) {
5199 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5200 } else {
5201 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5202 }
5203 break;
62698be3 5204 case NEON_3R_VQSHL:
ad69471c 5205 if (u) {
02da0b2d
PM
5206 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5207 cpu_V1, cpu_V0);
ad69471c 5208 } else {
02da0b2d
PM
5209 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5210 cpu_V1, cpu_V0);
ad69471c
PB
5211 }
5212 break;
62698be3 5213 case NEON_3R_VRSHL:
ad69471c
PB
5214 if (u) {
5215 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5216 } else {
ad69471c
PB
5217 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5218 }
5219 break;
62698be3 5220 case NEON_3R_VQRSHL:
ad69471c 5221 if (u) {
02da0b2d
PM
5222 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5223 cpu_V1, cpu_V0);
ad69471c 5224 } else {
02da0b2d
PM
5225 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5226 cpu_V1, cpu_V0);
1e8d4eec 5227 }
9ee6e8bb 5228 break;
62698be3 5229 case NEON_3R_VADD_VSUB:
9ee6e8bb 5230 if (u) {
ad69471c 5231 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5232 } else {
ad69471c 5233 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5234 }
5235 break;
5236 default:
5237 abort();
2c0262af 5238 }
ad69471c 5239 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5240 }
9ee6e8bb 5241 return 0;
2c0262af 5242 }
25f84f79 5243 pairwise = 0;
9ee6e8bb 5244 switch (op) {
62698be3
PM
5245 case NEON_3R_VSHL:
5246 case NEON_3R_VQSHL:
5247 case NEON_3R_VRSHL:
5248 case NEON_3R_VQRSHL:
9ee6e8bb 5249 {
ad69471c
PB
5250 int rtmp;
5251 /* Shift instruction operands are reversed. */
5252 rtmp = rn;
9ee6e8bb 5253 rn = rm;
ad69471c 5254 rm = rtmp;
9ee6e8bb 5255 }
2c0262af 5256 break;
25f84f79
PM
5257 case NEON_3R_VPADD:
5258 if (u) {
5259 return 1;
5260 }
5261 /* Fall through */
62698be3
PM
5262 case NEON_3R_VPMAX:
5263 case NEON_3R_VPMIN:
9ee6e8bb 5264 pairwise = 1;
2c0262af 5265 break;
25f84f79
PM
5266 case NEON_3R_FLOAT_ARITH:
5267 pairwise = (u && size < 2); /* if VPADD (float) */
5268 break;
5269 case NEON_3R_FLOAT_MINMAX:
5270 pairwise = u; /* if VPMIN/VPMAX (float) */
5271 break;
5272 case NEON_3R_FLOAT_CMP:
5273 if (!u && size) {
5274 /* no encoding for U=0 C=1x */
5275 return 1;
5276 }
5277 break;
5278 case NEON_3R_FLOAT_ACMP:
5279 if (!u) {
5280 return 1;
5281 }
5282 break;
505935fc
WN
5283 case NEON_3R_FLOAT_MISC:
5284 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5285 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5286 return 1;
5287 }
2c0262af 5288 break;
25f84f79
PM
5289 case NEON_3R_VMUL:
5290 if (u && (size != 0)) {
5291 /* UNDEF on invalid size for polynomial subcase */
5292 return 1;
5293 }
2c0262af 5294 break;
da97f52c 5295 case NEON_3R_VFM:
d614a513 5296 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
da97f52c
PM
5297 return 1;
5298 }
5299 break;
9ee6e8bb 5300 default:
2c0262af 5301 break;
9ee6e8bb 5302 }
dd8fbd78 5303
25f84f79
PM
5304 if (pairwise && q) {
5305 /* All the pairwise insns UNDEF if Q is set */
5306 return 1;
5307 }
5308
9ee6e8bb
PB
5309 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5310
5311 if (pairwise) {
5312 /* Pairwise. */
a5a14945
JR
5313 if (pass < 1) {
5314 tmp = neon_load_reg(rn, 0);
5315 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5316 } else {
a5a14945
JR
5317 tmp = neon_load_reg(rm, 0);
5318 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5319 }
5320 } else {
5321 /* Elementwise. */
dd8fbd78
FN
5322 tmp = neon_load_reg(rn, pass);
5323 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5324 }
5325 switch (op) {
62698be3 5326 case NEON_3R_VHADD:
9ee6e8bb
PB
5327 GEN_NEON_INTEGER_OP(hadd);
5328 break;
62698be3 5329 case NEON_3R_VQADD:
02da0b2d 5330 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5331 break;
62698be3 5332 case NEON_3R_VRHADD:
9ee6e8bb 5333 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5334 break;
62698be3 5335 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5336 switch ((u << 2) | size) {
5337 case 0: /* VAND */
dd8fbd78 5338 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5339 break;
5340 case 1: /* BIC */
f669df27 5341 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5342 break;
5343 case 2: /* VORR */
dd8fbd78 5344 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5345 break;
5346 case 3: /* VORN */
f669df27 5347 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5348 break;
5349 case 4: /* VEOR */
dd8fbd78 5350 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5351 break;
5352 case 5: /* VBSL */
dd8fbd78
FN
5353 tmp3 = neon_load_reg(rd, pass);
5354 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5355 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5356 break;
5357 case 6: /* VBIT */
dd8fbd78
FN
5358 tmp3 = neon_load_reg(rd, pass);
5359 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5360 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5361 break;
5362 case 7: /* VBIF */
dd8fbd78
FN
5363 tmp3 = neon_load_reg(rd, pass);
5364 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5365 tcg_temp_free_i32(tmp3);
9ee6e8bb 5366 break;
2c0262af
FB
5367 }
5368 break;
62698be3 5369 case NEON_3R_VHSUB:
9ee6e8bb
PB
5370 GEN_NEON_INTEGER_OP(hsub);
5371 break;
62698be3 5372 case NEON_3R_VQSUB:
02da0b2d 5373 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5374 break;
62698be3 5375 case NEON_3R_VCGT:
9ee6e8bb
PB
5376 GEN_NEON_INTEGER_OP(cgt);
5377 break;
62698be3 5378 case NEON_3R_VCGE:
9ee6e8bb
PB
5379 GEN_NEON_INTEGER_OP(cge);
5380 break;
62698be3 5381 case NEON_3R_VSHL:
ad69471c 5382 GEN_NEON_INTEGER_OP(shl);
2c0262af 5383 break;
62698be3 5384 case NEON_3R_VQSHL:
02da0b2d 5385 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5386 break;
62698be3 5387 case NEON_3R_VRSHL:
ad69471c 5388 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5389 break;
62698be3 5390 case NEON_3R_VQRSHL:
02da0b2d 5391 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5392 break;
62698be3 5393 case NEON_3R_VMAX:
9ee6e8bb
PB
5394 GEN_NEON_INTEGER_OP(max);
5395 break;
62698be3 5396 case NEON_3R_VMIN:
9ee6e8bb
PB
5397 GEN_NEON_INTEGER_OP(min);
5398 break;
62698be3 5399 case NEON_3R_VABD:
9ee6e8bb
PB
5400 GEN_NEON_INTEGER_OP(abd);
5401 break;
62698be3 5402 case NEON_3R_VABA:
9ee6e8bb 5403 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5404 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5405 tmp2 = neon_load_reg(rd, pass);
5406 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5407 break;
62698be3 5408 case NEON_3R_VADD_VSUB:
9ee6e8bb 5409 if (!u) { /* VADD */
62698be3 5410 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5411 } else { /* VSUB */
5412 switch (size) {
dd8fbd78
FN
5413 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5414 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5415 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 5416 default: abort();
9ee6e8bb
PB
5417 }
5418 }
5419 break;
62698be3 5420 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
5421 if (!u) { /* VTST */
5422 switch (size) {
dd8fbd78
FN
5423 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5424 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5425 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 5426 default: abort();
9ee6e8bb
PB
5427 }
5428 } else { /* VCEQ */
5429 switch (size) {
dd8fbd78
FN
5430 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5431 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5432 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 5433 default: abort();
9ee6e8bb
PB
5434 }
5435 }
5436 break;
62698be3 5437 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 5438 switch (size) {
dd8fbd78
FN
5439 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5440 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5441 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5442 default: abort();
9ee6e8bb 5443 }
7d1b0095 5444 tcg_temp_free_i32(tmp2);
dd8fbd78 5445 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5446 if (u) { /* VMLS */
dd8fbd78 5447 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 5448 } else { /* VMLA */
dd8fbd78 5449 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5450 }
5451 break;
62698be3 5452 case NEON_3R_VMUL:
9ee6e8bb 5453 if (u) { /* polynomial */
dd8fbd78 5454 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
5455 } else { /* Integer */
5456 switch (size) {
dd8fbd78
FN
5457 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5458 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5459 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5460 default: abort();
9ee6e8bb
PB
5461 }
5462 }
5463 break;
62698be3 5464 case NEON_3R_VPMAX:
9ee6e8bb
PB
5465 GEN_NEON_INTEGER_OP(pmax);
5466 break;
62698be3 5467 case NEON_3R_VPMIN:
9ee6e8bb
PB
5468 GEN_NEON_INTEGER_OP(pmin);
5469 break;
62698be3 5470 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5471 if (!u) { /* VQDMULH */
5472 switch (size) {
02da0b2d
PM
5473 case 1:
5474 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5475 break;
5476 case 2:
5477 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5478 break;
62698be3 5479 default: abort();
9ee6e8bb 5480 }
62698be3 5481 } else { /* VQRDMULH */
9ee6e8bb 5482 switch (size) {
02da0b2d
PM
5483 case 1:
5484 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5485 break;
5486 case 2:
5487 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5488 break;
62698be3 5489 default: abort();
9ee6e8bb
PB
5490 }
5491 }
5492 break;
62698be3 5493 case NEON_3R_VPADD:
9ee6e8bb 5494 switch (size) {
dd8fbd78
FN
5495 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5496 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5497 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5498 default: abort();
9ee6e8bb
PB
5499 }
5500 break;
62698be3 5501 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5502 {
5503 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5504 switch ((u << 2) | size) {
5505 case 0: /* VADD */
aa47cfdd
PM
5506 case 4: /* VPADD */
5507 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5508 break;
5509 case 2: /* VSUB */
aa47cfdd 5510 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5511 break;
5512 case 6: /* VABD */
aa47cfdd 5513 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5514 break;
5515 default:
62698be3 5516 abort();
9ee6e8bb 5517 }
aa47cfdd 5518 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5519 break;
aa47cfdd 5520 }
62698be3 5521 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5522 {
5523 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5524 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5525 if (!u) {
7d1b0095 5526 tcg_temp_free_i32(tmp2);
dd8fbd78 5527 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5528 if (size == 0) {
aa47cfdd 5529 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5530 } else {
aa47cfdd 5531 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5532 }
5533 }
aa47cfdd 5534 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5535 break;
aa47cfdd 5536 }
62698be3 5537 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5538 {
5539 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5540 if (!u) {
aa47cfdd 5541 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5542 } else {
aa47cfdd
PM
5543 if (size == 0) {
5544 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5545 } else {
5546 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5547 }
b5ff1b31 5548 }
aa47cfdd 5549 tcg_temp_free_ptr(fpstatus);
2c0262af 5550 break;
aa47cfdd 5551 }
62698be3 5552 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
5553 {
5554 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5555 if (size == 0) {
5556 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5557 } else {
5558 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5559 }
5560 tcg_temp_free_ptr(fpstatus);
2c0262af 5561 break;
aa47cfdd 5562 }
62698be3 5563 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
5564 {
5565 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5566 if (size == 0) {
f71a2ae5 5567 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 5568 } else {
f71a2ae5 5569 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
5570 }
5571 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5572 break;
aa47cfdd 5573 }
505935fc
WN
5574 case NEON_3R_FLOAT_MISC:
5575 if (u) {
5576 /* VMAXNM/VMINNM */
5577 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5578 if (size == 0) {
f71a2ae5 5579 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 5580 } else {
f71a2ae5 5581 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
5582 }
5583 tcg_temp_free_ptr(fpstatus);
5584 } else {
5585 if (size == 0) {
5586 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5587 } else {
5588 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5589 }
5590 }
2c0262af 5591 break;
da97f52c
PM
5592 case NEON_3R_VFM:
5593 {
5594 /* VFMA, VFMS: fused multiply-add */
5595 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5596 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5597 if (size) {
5598 /* VFMS */
5599 gen_helper_vfp_negs(tmp, tmp);
5600 }
5601 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5602 tcg_temp_free_i32(tmp3);
5603 tcg_temp_free_ptr(fpstatus);
5604 break;
5605 }
9ee6e8bb
PB
5606 default:
5607 abort();
2c0262af 5608 }
7d1b0095 5609 tcg_temp_free_i32(tmp2);
dd8fbd78 5610
9ee6e8bb
PB
5611 /* Save the result. For elementwise operations we can put it
5612 straight into the destination register. For pairwise operations
5613 we have to be careful to avoid clobbering the source operands. */
5614 if (pairwise && rd == rm) {
dd8fbd78 5615 neon_store_scratch(pass, tmp);
9ee6e8bb 5616 } else {
dd8fbd78 5617 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5618 }
5619
5620 } /* for pass */
5621 if (pairwise && rd == rm) {
5622 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5623 tmp = neon_load_scratch(pass);
5624 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5625 }
5626 }
ad69471c 5627 /* End of 3 register same size operations. */
9ee6e8bb
PB
5628 } else if (insn & (1 << 4)) {
5629 if ((insn & 0x00380080) != 0) {
5630 /* Two registers and shift. */
5631 op = (insn >> 8) & 0xf;
5632 if (insn & (1 << 7)) {
cc13115b
PM
5633 /* 64-bit shift. */
5634 if (op > 7) {
5635 return 1;
5636 }
9ee6e8bb
PB
5637 size = 3;
5638 } else {
5639 size = 2;
5640 while ((insn & (1 << (size + 19))) == 0)
5641 size--;
5642 }
5643 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 5644 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
5645 by immediate using the variable shift operations. */
5646 if (op < 8) {
5647 /* Shift by immediate:
5648 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5649 if (q && ((rd | rm) & 1)) {
5650 return 1;
5651 }
5652 if (!u && (op == 4 || op == 6)) {
5653 return 1;
5654 }
9ee6e8bb
PB
5655 /* Right shifts are encoded as N - shift, where N is the
5656 element size in bits. */
5657 if (op <= 4)
5658 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
5659 if (size == 3) {
5660 count = q + 1;
5661 } else {
5662 count = q ? 4: 2;
5663 }
5664 switch (size) {
5665 case 0:
5666 imm = (uint8_t) shift;
5667 imm |= imm << 8;
5668 imm |= imm << 16;
5669 break;
5670 case 1:
5671 imm = (uint16_t) shift;
5672 imm |= imm << 16;
5673 break;
5674 case 2:
5675 case 3:
5676 imm = shift;
5677 break;
5678 default:
5679 abort();
5680 }
5681
5682 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5683 if (size == 3) {
5684 neon_load_reg64(cpu_V0, rm + pass);
5685 tcg_gen_movi_i64(cpu_V1, imm);
5686 switch (op) {
5687 case 0: /* VSHR */
5688 case 1: /* VSRA */
5689 if (u)
5690 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5691 else
ad69471c 5692 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5693 break;
ad69471c
PB
5694 case 2: /* VRSHR */
5695 case 3: /* VRSRA */
5696 if (u)
5697 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5698 else
ad69471c 5699 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5700 break;
ad69471c 5701 case 4: /* VSRI */
ad69471c
PB
5702 case 5: /* VSHL, VSLI */
5703 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5704 break;
0322b26e 5705 case 6: /* VQSHLU */
02da0b2d
PM
5706 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5707 cpu_V0, cpu_V1);
ad69471c 5708 break;
0322b26e
PM
5709 case 7: /* VQSHL */
5710 if (u) {
02da0b2d 5711 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5712 cpu_V0, cpu_V1);
5713 } else {
02da0b2d 5714 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5715 cpu_V0, cpu_V1);
5716 }
9ee6e8bb 5717 break;
9ee6e8bb 5718 }
ad69471c
PB
5719 if (op == 1 || op == 3) {
5720 /* Accumulate. */
5371cb81 5721 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5722 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5723 } else if (op == 4 || (op == 5 && u)) {
5724 /* Insert */
923e6509
CL
5725 neon_load_reg64(cpu_V1, rd + pass);
5726 uint64_t mask;
5727 if (shift < -63 || shift > 63) {
5728 mask = 0;
5729 } else {
5730 if (op == 4) {
5731 mask = 0xffffffffffffffffull >> -shift;
5732 } else {
5733 mask = 0xffffffffffffffffull << shift;
5734 }
5735 }
5736 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5737 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5738 }
5739 neon_store_reg64(cpu_V0, rd + pass);
5740 } else { /* size < 3 */
5741 /* Operands in T0 and T1. */
dd8fbd78 5742 tmp = neon_load_reg(rm, pass);
7d1b0095 5743 tmp2 = tcg_temp_new_i32();
dd8fbd78 5744 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5745 switch (op) {
5746 case 0: /* VSHR */
5747 case 1: /* VSRA */
5748 GEN_NEON_INTEGER_OP(shl);
5749 break;
5750 case 2: /* VRSHR */
5751 case 3: /* VRSRA */
5752 GEN_NEON_INTEGER_OP(rshl);
5753 break;
5754 case 4: /* VSRI */
ad69471c
PB
5755 case 5: /* VSHL, VSLI */
5756 switch (size) {
dd8fbd78
FN
5757 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5758 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5759 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5760 default: abort();
ad69471c
PB
5761 }
5762 break;
0322b26e 5763 case 6: /* VQSHLU */
ad69471c 5764 switch (size) {
0322b26e 5765 case 0:
02da0b2d
PM
5766 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5767 tmp, tmp2);
0322b26e
PM
5768 break;
5769 case 1:
02da0b2d
PM
5770 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5771 tmp, tmp2);
0322b26e
PM
5772 break;
5773 case 2:
02da0b2d
PM
5774 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5775 tmp, tmp2);
0322b26e
PM
5776 break;
5777 default:
cc13115b 5778 abort();
ad69471c
PB
5779 }
5780 break;
0322b26e 5781 case 7: /* VQSHL */
02da0b2d 5782 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5783 break;
ad69471c 5784 }
7d1b0095 5785 tcg_temp_free_i32(tmp2);
ad69471c
PB
5786
5787 if (op == 1 || op == 3) {
5788 /* Accumulate. */
dd8fbd78 5789 tmp2 = neon_load_reg(rd, pass);
5371cb81 5790 gen_neon_add(size, tmp, tmp2);
7d1b0095 5791 tcg_temp_free_i32(tmp2);
ad69471c
PB
5792 } else if (op == 4 || (op == 5 && u)) {
5793 /* Insert */
5794 switch (size) {
5795 case 0:
5796 if (op == 4)
ca9a32e4 5797 mask = 0xff >> -shift;
ad69471c 5798 else
ca9a32e4
JR
5799 mask = (uint8_t)(0xff << shift);
5800 mask |= mask << 8;
5801 mask |= mask << 16;
ad69471c
PB
5802 break;
5803 case 1:
5804 if (op == 4)
ca9a32e4 5805 mask = 0xffff >> -shift;
ad69471c 5806 else
ca9a32e4
JR
5807 mask = (uint16_t)(0xffff << shift);
5808 mask |= mask << 16;
ad69471c
PB
5809 break;
5810 case 2:
ca9a32e4
JR
5811 if (shift < -31 || shift > 31) {
5812 mask = 0;
5813 } else {
5814 if (op == 4)
5815 mask = 0xffffffffu >> -shift;
5816 else
5817 mask = 0xffffffffu << shift;
5818 }
ad69471c
PB
5819 break;
5820 default:
5821 abort();
5822 }
dd8fbd78 5823 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5824 tcg_gen_andi_i32(tmp, tmp, mask);
5825 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5826 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5827 tcg_temp_free_i32(tmp2);
ad69471c 5828 }
dd8fbd78 5829 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5830 }
5831 } /* for pass */
5832 } else if (op < 10) {
ad69471c 5833 /* Shift by immediate and narrow:
9ee6e8bb 5834 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5835 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5836 if (rm & 1) {
5837 return 1;
5838 }
9ee6e8bb
PB
5839 shift = shift - (1 << (size + 3));
5840 size++;
92cdfaeb 5841 if (size == 3) {
a7812ae4 5842 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5843 neon_load_reg64(cpu_V0, rm);
5844 neon_load_reg64(cpu_V1, rm + 1);
5845 for (pass = 0; pass < 2; pass++) {
5846 TCGv_i64 in;
5847 if (pass == 0) {
5848 in = cpu_V0;
5849 } else {
5850 in = cpu_V1;
5851 }
ad69471c 5852 if (q) {
0b36f4cd 5853 if (input_unsigned) {
92cdfaeb 5854 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5855 } else {
92cdfaeb 5856 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5857 }
ad69471c 5858 } else {
0b36f4cd 5859 if (input_unsigned) {
92cdfaeb 5860 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5861 } else {
92cdfaeb 5862 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5863 }
ad69471c 5864 }
7d1b0095 5865 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5866 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5867 neon_store_reg(rd, pass, tmp);
5868 } /* for pass */
5869 tcg_temp_free_i64(tmp64);
5870 } else {
5871 if (size == 1) {
5872 imm = (uint16_t)shift;
5873 imm |= imm << 16;
2c0262af 5874 } else {
92cdfaeb
PM
5875 /* size == 2 */
5876 imm = (uint32_t)shift;
5877 }
5878 tmp2 = tcg_const_i32(imm);
5879 tmp4 = neon_load_reg(rm + 1, 0);
5880 tmp5 = neon_load_reg(rm + 1, 1);
5881 for (pass = 0; pass < 2; pass++) {
5882 if (pass == 0) {
5883 tmp = neon_load_reg(rm, 0);
5884 } else {
5885 tmp = tmp4;
5886 }
0b36f4cd
CL
5887 gen_neon_shift_narrow(size, tmp, tmp2, q,
5888 input_unsigned);
92cdfaeb
PM
5889 if (pass == 0) {
5890 tmp3 = neon_load_reg(rm, 1);
5891 } else {
5892 tmp3 = tmp5;
5893 }
0b36f4cd
CL
5894 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5895 input_unsigned);
36aa55dc 5896 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5897 tcg_temp_free_i32(tmp);
5898 tcg_temp_free_i32(tmp3);
5899 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5900 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5901 neon_store_reg(rd, pass, tmp);
5902 } /* for pass */
c6067f04 5903 tcg_temp_free_i32(tmp2);
b75263d6 5904 }
9ee6e8bb 5905 } else if (op == 10) {
cc13115b
PM
5906 /* VSHLL, VMOVL */
5907 if (q || (rd & 1)) {
9ee6e8bb 5908 return 1;
cc13115b 5909 }
ad69471c
PB
5910 tmp = neon_load_reg(rm, 0);
5911 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5912 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5913 if (pass == 1)
5914 tmp = tmp2;
5915
5916 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5917
9ee6e8bb
PB
5918 if (shift != 0) {
5919 /* The shift is less than the width of the source
ad69471c
PB
5920 type, so we can just shift the whole register. */
5921 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5922 /* Widen the result of shift: we need to clear
5923 * the potential overflow bits resulting from
5924 * left bits of the narrow input appearing as
5925 * right bits of left the neighbour narrow
5926 * input. */
ad69471c
PB
5927 if (size < 2 || !u) {
5928 uint64_t imm64;
5929 if (size == 0) {
5930 imm = (0xffu >> (8 - shift));
5931 imm |= imm << 16;
acdf01ef 5932 } else if (size == 1) {
ad69471c 5933 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5934 } else {
5935 /* size == 2 */
5936 imm = 0xffffffff >> (32 - shift);
5937 }
5938 if (size < 2) {
5939 imm64 = imm | (((uint64_t)imm) << 32);
5940 } else {
5941 imm64 = imm;
9ee6e8bb 5942 }
acdf01ef 5943 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5944 }
5945 }
ad69471c 5946 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5947 }
f73534a5 5948 } else if (op >= 14) {
9ee6e8bb 5949 /* VCVT fixed-point. */
cc13115b
PM
5950 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5951 return 1;
5952 }
f73534a5
PM
5953 /* We have already masked out the must-be-1 top bit of imm6,
5954 * hence this 32-shift where the ARM ARM has 64-imm6.
5955 */
5956 shift = 32 - shift;
9ee6e8bb 5957 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5958 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5959 if (!(op & 1)) {
9ee6e8bb 5960 if (u)
5500b06c 5961 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5962 else
5500b06c 5963 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5964 } else {
5965 if (u)
5500b06c 5966 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5967 else
5500b06c 5968 gen_vfp_tosl(0, shift, 1);
2c0262af 5969 }
4373f3ce 5970 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5971 }
5972 } else {
9ee6e8bb
PB
5973 return 1;
5974 }
5975 } else { /* (insn & 0x00380080) == 0 */
5976 int invert;
7d80fee5
PM
5977 if (q && (rd & 1)) {
5978 return 1;
5979 }
9ee6e8bb
PB
5980
5981 op = (insn >> 8) & 0xf;
5982 /* One register and immediate. */
5983 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5984 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5985 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5986 * We choose to not special-case this and will behave as if a
5987 * valid constant encoding of 0 had been given.
5988 */
9ee6e8bb
PB
5989 switch (op) {
5990 case 0: case 1:
5991 /* no-op */
5992 break;
5993 case 2: case 3:
5994 imm <<= 8;
5995 break;
5996 case 4: case 5:
5997 imm <<= 16;
5998 break;
5999 case 6: case 7:
6000 imm <<= 24;
6001 break;
6002 case 8: case 9:
6003 imm |= imm << 16;
6004 break;
6005 case 10: case 11:
6006 imm = (imm << 8) | (imm << 24);
6007 break;
6008 case 12:
8e31209e 6009 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
6010 break;
6011 case 13:
6012 imm = (imm << 16) | 0xffff;
6013 break;
6014 case 14:
6015 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6016 if (invert)
6017 imm = ~imm;
6018 break;
6019 case 15:
7d80fee5
PM
6020 if (invert) {
6021 return 1;
6022 }
9ee6e8bb
PB
6023 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6024 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6025 break;
6026 }
6027 if (invert)
6028 imm = ~imm;
6029
9ee6e8bb
PB
6030 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6031 if (op & 1 && op < 12) {
ad69471c 6032 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
6033 if (invert) {
6034 /* The immediate value has already been inverted, so
6035 BIC becomes AND. */
ad69471c 6036 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 6037 } else {
ad69471c 6038 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 6039 }
9ee6e8bb 6040 } else {
ad69471c 6041 /* VMOV, VMVN. */
7d1b0095 6042 tmp = tcg_temp_new_i32();
9ee6e8bb 6043 if (op == 14 && invert) {
a5a14945 6044 int n;
ad69471c
PB
6045 uint32_t val;
6046 val = 0;
9ee6e8bb
PB
6047 for (n = 0; n < 4; n++) {
6048 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 6049 val |= 0xff << (n * 8);
9ee6e8bb 6050 }
ad69471c
PB
6051 tcg_gen_movi_i32(tmp, val);
6052 } else {
6053 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 6054 }
9ee6e8bb 6055 }
ad69471c 6056 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6057 }
6058 }
e4b3861d 6059 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
6060 if (size != 3) {
6061 op = (insn >> 8) & 0xf;
6062 if ((insn & (1 << 6)) == 0) {
6063 /* Three registers of different lengths. */
6064 int src1_wide;
6065 int src2_wide;
6066 int prewiden;
526d0096
PM
6067 /* undefreq: bit 0 : UNDEF if size == 0
6068 * bit 1 : UNDEF if size == 1
6069 * bit 2 : UNDEF if size == 2
6070 * bit 3 : UNDEF if U == 1
6071 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
6072 */
6073 int undefreq;
6074 /* prewiden, src1_wide, src2_wide, undefreq */
6075 static const int neon_3reg_wide[16][4] = {
6076 {1, 0, 0, 0}, /* VADDL */
6077 {1, 1, 0, 0}, /* VADDW */
6078 {1, 0, 0, 0}, /* VSUBL */
6079 {1, 1, 0, 0}, /* VSUBW */
6080 {0, 1, 1, 0}, /* VADDHN */
6081 {0, 0, 0, 0}, /* VABAL */
6082 {0, 1, 1, 0}, /* VSUBHN */
6083 {0, 0, 0, 0}, /* VABDL */
6084 {0, 0, 0, 0}, /* VMLAL */
526d0096 6085 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 6086 {0, 0, 0, 0}, /* VMLSL */
526d0096 6087 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 6088 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 6089 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 6090 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 6091 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
6092 };
6093
6094 prewiden = neon_3reg_wide[op][0];
6095 src1_wide = neon_3reg_wide[op][1];
6096 src2_wide = neon_3reg_wide[op][2];
695272dc 6097 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 6098
526d0096
PM
6099 if ((undefreq & (1 << size)) ||
6100 ((undefreq & 8) && u)) {
695272dc
PM
6101 return 1;
6102 }
6103 if ((src1_wide && (rn & 1)) ||
6104 (src2_wide && (rm & 1)) ||
6105 (!src2_wide && (rd & 1))) {
ad69471c 6106 return 1;
695272dc 6107 }
ad69471c 6108
4e624eda
PM
6109 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6110 * outside the loop below as it only performs a single pass.
6111 */
6112 if (op == 14 && size == 2) {
6113 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6114
d614a513 6115 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
4e624eda
PM
6116 return 1;
6117 }
6118 tcg_rn = tcg_temp_new_i64();
6119 tcg_rm = tcg_temp_new_i64();
6120 tcg_rd = tcg_temp_new_i64();
6121 neon_load_reg64(tcg_rn, rn);
6122 neon_load_reg64(tcg_rm, rm);
6123 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6124 neon_store_reg64(tcg_rd, rd);
6125 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6126 neon_store_reg64(tcg_rd, rd + 1);
6127 tcg_temp_free_i64(tcg_rn);
6128 tcg_temp_free_i64(tcg_rm);
6129 tcg_temp_free_i64(tcg_rd);
6130 return 0;
6131 }
6132
9ee6e8bb
PB
6133 /* Avoid overlapping operands. Wide source operands are
6134 always aligned so will never overlap with wide
6135 destinations in problematic ways. */
8f8e3aa4 6136 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6137 tmp = neon_load_reg(rm, 1);
6138 neon_store_scratch(2, tmp);
8f8e3aa4 6139 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6140 tmp = neon_load_reg(rn, 1);
6141 neon_store_scratch(2, tmp);
9ee6e8bb 6142 }
39d5492a 6143 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 6144 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6145 if (src1_wide) {
6146 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 6147 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6148 } else {
ad69471c 6149 if (pass == 1 && rd == rn) {
dd8fbd78 6150 tmp = neon_load_scratch(2);
9ee6e8bb 6151 } else {
ad69471c
PB
6152 tmp = neon_load_reg(rn, pass);
6153 }
6154 if (prewiden) {
6155 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6156 }
6157 }
ad69471c
PB
6158 if (src2_wide) {
6159 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 6160 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6161 } else {
ad69471c 6162 if (pass == 1 && rd == rm) {
dd8fbd78 6163 tmp2 = neon_load_scratch(2);
9ee6e8bb 6164 } else {
ad69471c
PB
6165 tmp2 = neon_load_reg(rm, pass);
6166 }
6167 if (prewiden) {
6168 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6169 }
9ee6e8bb
PB
6170 }
6171 switch (op) {
6172 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6173 gen_neon_addl(size);
9ee6e8bb 6174 break;
79b0e534 6175 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6176 gen_neon_subl(size);
9ee6e8bb
PB
6177 break;
6178 case 5: case 7: /* VABAL, VABDL */
6179 switch ((size << 1) | u) {
ad69471c
PB
6180 case 0:
6181 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6182 break;
6183 case 1:
6184 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6185 break;
6186 case 2:
6187 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6188 break;
6189 case 3:
6190 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6191 break;
6192 case 4:
6193 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6194 break;
6195 case 5:
6196 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6197 break;
9ee6e8bb
PB
6198 default: abort();
6199 }
7d1b0095
PM
6200 tcg_temp_free_i32(tmp2);
6201 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6202 break;
6203 case 8: case 9: case 10: case 11: case 12: case 13:
6204 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6205 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6206 break;
6207 case 14: /* Polynomial VMULL */
e5ca24cb 6208 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6209 tcg_temp_free_i32(tmp2);
6210 tcg_temp_free_i32(tmp);
e5ca24cb 6211 break;
695272dc
PM
6212 default: /* 15 is RESERVED: caught earlier */
6213 abort();
9ee6e8bb 6214 }
ebcd88ce
PM
6215 if (op == 13) {
6216 /* VQDMULL */
6217 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6218 neon_store_reg64(cpu_V0, rd + pass);
6219 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6220 /* Accumulate. */
ebcd88ce 6221 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6222 switch (op) {
4dc064e6
PM
6223 case 10: /* VMLSL */
6224 gen_neon_negl(cpu_V0, size);
6225 /* Fall through */
6226 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6227 gen_neon_addl(size);
9ee6e8bb
PB
6228 break;
6229 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6230 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6231 if (op == 11) {
6232 gen_neon_negl(cpu_V0, size);
6233 }
ad69471c
PB
6234 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6235 break;
9ee6e8bb
PB
6236 default:
6237 abort();
6238 }
ad69471c 6239 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6240 } else if (op == 4 || op == 6) {
6241 /* Narrowing operation. */
7d1b0095 6242 tmp = tcg_temp_new_i32();
79b0e534 6243 if (!u) {
9ee6e8bb 6244 switch (size) {
ad69471c
PB
6245 case 0:
6246 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6247 break;
6248 case 1:
6249 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6250 break;
6251 case 2:
6252 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6253 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
6254 break;
9ee6e8bb
PB
6255 default: abort();
6256 }
6257 } else {
6258 switch (size) {
ad69471c
PB
6259 case 0:
6260 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6261 break;
6262 case 1:
6263 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6264 break;
6265 case 2:
6266 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6267 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6268 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
6269 break;
9ee6e8bb
PB
6270 default: abort();
6271 }
6272 }
ad69471c
PB
6273 if (pass == 0) {
6274 tmp3 = tmp;
6275 } else {
6276 neon_store_reg(rd, 0, tmp3);
6277 neon_store_reg(rd, 1, tmp);
6278 }
9ee6e8bb
PB
6279 } else {
6280 /* Write back the result. */
ad69471c 6281 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6282 }
6283 }
6284 } else {
3e3326df
PM
6285 /* Two registers and a scalar. NB that for ops of this form
6286 * the ARM ARM labels bit 24 as Q, but it is in our variable
6287 * 'u', not 'q'.
6288 */
6289 if (size == 0) {
6290 return 1;
6291 }
9ee6e8bb 6292 switch (op) {
9ee6e8bb 6293 case 1: /* Float VMLA scalar */
9ee6e8bb 6294 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6295 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6296 if (size == 1) {
6297 return 1;
6298 }
6299 /* fall through */
6300 case 0: /* Integer VMLA scalar */
6301 case 4: /* Integer VMLS scalar */
6302 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6303 case 12: /* VQDMULH scalar */
6304 case 13: /* VQRDMULH scalar */
3e3326df
PM
6305 if (u && ((rd | rn) & 1)) {
6306 return 1;
6307 }
dd8fbd78
FN
6308 tmp = neon_get_scalar(size, rm);
6309 neon_store_scratch(0, tmp);
9ee6e8bb 6310 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6311 tmp = neon_load_scratch(0);
6312 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6313 if (op == 12) {
6314 if (size == 1) {
02da0b2d 6315 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6316 } else {
02da0b2d 6317 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6318 }
6319 } else if (op == 13) {
6320 if (size == 1) {
02da0b2d 6321 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6322 } else {
02da0b2d 6323 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6324 }
6325 } else if (op & 1) {
aa47cfdd
PM
6326 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6327 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6328 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6329 } else {
6330 switch (size) {
dd8fbd78
FN
6331 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6332 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6333 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6334 default: abort();
9ee6e8bb
PB
6335 }
6336 }
7d1b0095 6337 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6338 if (op < 8) {
6339 /* Accumulate. */
dd8fbd78 6340 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6341 switch (op) {
6342 case 0:
dd8fbd78 6343 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6344 break;
6345 case 1:
aa47cfdd
PM
6346 {
6347 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6348 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6349 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6350 break;
aa47cfdd 6351 }
9ee6e8bb 6352 case 4:
dd8fbd78 6353 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6354 break;
6355 case 5:
aa47cfdd
PM
6356 {
6357 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6358 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6359 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6360 break;
aa47cfdd 6361 }
9ee6e8bb
PB
6362 default:
6363 abort();
6364 }
7d1b0095 6365 tcg_temp_free_i32(tmp2);
9ee6e8bb 6366 }
dd8fbd78 6367 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6368 }
6369 break;
9ee6e8bb 6370 case 3: /* VQDMLAL scalar */
9ee6e8bb 6371 case 7: /* VQDMLSL scalar */
9ee6e8bb 6372 case 11: /* VQDMULL scalar */
3e3326df 6373 if (u == 1) {
ad69471c 6374 return 1;
3e3326df
PM
6375 }
6376 /* fall through */
6377 case 2: /* VMLAL sclar */
6378 case 6: /* VMLSL scalar */
6379 case 10: /* VMULL scalar */
6380 if (rd & 1) {
6381 return 1;
6382 }
dd8fbd78 6383 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6384 /* We need a copy of tmp2 because gen_neon_mull
6385 * deletes it during pass 0. */
7d1b0095 6386 tmp4 = tcg_temp_new_i32();
c6067f04 6387 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6388 tmp3 = neon_load_reg(rn, 1);
ad69471c 6389
9ee6e8bb 6390 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6391 if (pass == 0) {
6392 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6393 } else {
dd8fbd78 6394 tmp = tmp3;
c6067f04 6395 tmp2 = tmp4;
9ee6e8bb 6396 }
ad69471c 6397 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6398 if (op != 11) {
6399 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6400 }
9ee6e8bb 6401 switch (op) {
4dc064e6
PM
6402 case 6:
6403 gen_neon_negl(cpu_V0, size);
6404 /* Fall through */
6405 case 2:
ad69471c 6406 gen_neon_addl(size);
9ee6e8bb
PB
6407 break;
6408 case 3: case 7:
ad69471c 6409 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6410 if (op == 7) {
6411 gen_neon_negl(cpu_V0, size);
6412 }
ad69471c 6413 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6414 break;
6415 case 10:
6416 /* no-op */
6417 break;
6418 case 11:
ad69471c 6419 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6420 break;
6421 default:
6422 abort();
6423 }
ad69471c 6424 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6425 }
dd8fbd78 6426
dd8fbd78 6427
9ee6e8bb
PB
6428 break;
6429 default: /* 14 and 15 are RESERVED */
6430 return 1;
6431 }
6432 }
6433 } else { /* size == 3 */
6434 if (!u) {
6435 /* Extract. */
9ee6e8bb 6436 imm = (insn >> 8) & 0xf;
ad69471c
PB
6437
6438 if (imm > 7 && !q)
6439 return 1;
6440
52579ea1
PM
6441 if (q && ((rd | rn | rm) & 1)) {
6442 return 1;
6443 }
6444
ad69471c
PB
6445 if (imm == 0) {
6446 neon_load_reg64(cpu_V0, rn);
6447 if (q) {
6448 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6449 }
ad69471c
PB
6450 } else if (imm == 8) {
6451 neon_load_reg64(cpu_V0, rn + 1);
6452 if (q) {
6453 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6454 }
ad69471c 6455 } else if (q) {
a7812ae4 6456 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6457 if (imm < 8) {
6458 neon_load_reg64(cpu_V0, rn);
a7812ae4 6459 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6460 } else {
6461 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6462 neon_load_reg64(tmp64, rm);
ad69471c
PB
6463 }
6464 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6465 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6466 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6467 if (imm < 8) {
6468 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6469 } else {
ad69471c
PB
6470 neon_load_reg64(cpu_V1, rm + 1);
6471 imm -= 8;
9ee6e8bb 6472 }
ad69471c 6473 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6474 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6475 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6476 tcg_temp_free_i64(tmp64);
ad69471c 6477 } else {
a7812ae4 6478 /* BUGFIX */
ad69471c 6479 neon_load_reg64(cpu_V0, rn);
a7812ae4 6480 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6481 neon_load_reg64(cpu_V1, rm);
a7812ae4 6482 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6483 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6484 }
6485 neon_store_reg64(cpu_V0, rd);
6486 if (q) {
6487 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6488 }
6489 } else if ((insn & (1 << 11)) == 0) {
6490 /* Two register misc. */
6491 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6492 size = (insn >> 18) & 3;
600b828c
PM
6493 /* UNDEF for unknown op values and bad op-size combinations */
6494 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6495 return 1;
6496 }
fc2a9b37
PM
6497 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6498 q && ((rm | rd) & 1)) {
6499 return 1;
6500 }
9ee6e8bb 6501 switch (op) {
600b828c 6502 case NEON_2RM_VREV64:
9ee6e8bb 6503 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6504 tmp = neon_load_reg(rm, pass * 2);
6505 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6506 switch (size) {
dd8fbd78
FN
6507 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6508 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6509 case 2: /* no-op */ break;
6510 default: abort();
6511 }
dd8fbd78 6512 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6513 if (size == 2) {
dd8fbd78 6514 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6515 } else {
9ee6e8bb 6516 switch (size) {
dd8fbd78
FN
6517 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6518 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6519 default: abort();
6520 }
dd8fbd78 6521 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6522 }
6523 }
6524 break;
600b828c
PM
6525 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6526 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
6527 for (pass = 0; pass < q + 1; pass++) {
6528 tmp = neon_load_reg(rm, pass * 2);
6529 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6530 tmp = neon_load_reg(rm, pass * 2 + 1);
6531 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6532 switch (size) {
6533 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6534 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6535 case 2: tcg_gen_add_i64(CPU_V001); break;
6536 default: abort();
6537 }
600b828c 6538 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 6539 /* Accumulate. */
ad69471c
PB
6540 neon_load_reg64(cpu_V1, rd + pass);
6541 gen_neon_addl(size);
9ee6e8bb 6542 }
ad69471c 6543 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6544 }
6545 break;
600b828c 6546 case NEON_2RM_VTRN:
9ee6e8bb 6547 if (size == 2) {
a5a14945 6548 int n;
9ee6e8bb 6549 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
6550 tmp = neon_load_reg(rm, n);
6551 tmp2 = neon_load_reg(rd, n + 1);
6552 neon_store_reg(rm, n, tmp2);
6553 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
6554 }
6555 } else {
6556 goto elementwise;
6557 }
6558 break;
600b828c 6559 case NEON_2RM_VUZP:
02acedf9 6560 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 6561 return 1;
9ee6e8bb
PB
6562 }
6563 break;
600b828c 6564 case NEON_2RM_VZIP:
d68a6f3a 6565 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 6566 return 1;
9ee6e8bb
PB
6567 }
6568 break;
600b828c
PM
6569 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6570 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
6571 if (rm & 1) {
6572 return 1;
6573 }
39d5492a 6574 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6575 for (pass = 0; pass < 2; pass++) {
ad69471c 6576 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 6577 tmp = tcg_temp_new_i32();
600b828c
PM
6578 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6579 tmp, cpu_V0);
ad69471c
PB
6580 if (pass == 0) {
6581 tmp2 = tmp;
6582 } else {
6583 neon_store_reg(rd, 0, tmp2);
6584 neon_store_reg(rd, 1, tmp);
9ee6e8bb 6585 }
9ee6e8bb
PB
6586 }
6587 break;
600b828c 6588 case NEON_2RM_VSHLL:
fc2a9b37 6589 if (q || (rd & 1)) {
9ee6e8bb 6590 return 1;
600b828c 6591 }
ad69471c
PB
6592 tmp = neon_load_reg(rm, 0);
6593 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6594 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6595 if (pass == 1)
6596 tmp = tmp2;
6597 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 6598 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 6599 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6600 }
6601 break;
600b828c 6602 case NEON_2RM_VCVT_F16_F32:
d614a513 6603 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
6604 q || (rm & 1)) {
6605 return 1;
6606 }
7d1b0095
PM
6607 tmp = tcg_temp_new_i32();
6608 tmp2 = tcg_temp_new_i32();
60011498 6609 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 6610 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 6611 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 6612 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6613 tcg_gen_shli_i32(tmp2, tmp2, 16);
6614 tcg_gen_or_i32(tmp2, tmp2, tmp);
6615 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 6616 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
6617 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6618 neon_store_reg(rd, 0, tmp2);
7d1b0095 6619 tmp2 = tcg_temp_new_i32();
2d981da7 6620 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6621 tcg_gen_shli_i32(tmp2, tmp2, 16);
6622 tcg_gen_or_i32(tmp2, tmp2, tmp);
6623 neon_store_reg(rd, 1, tmp2);
7d1b0095 6624 tcg_temp_free_i32(tmp);
60011498 6625 break;
600b828c 6626 case NEON_2RM_VCVT_F32_F16:
d614a513 6627 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
6628 q || (rd & 1)) {
6629 return 1;
6630 }
7d1b0095 6631 tmp3 = tcg_temp_new_i32();
60011498
PB
6632 tmp = neon_load_reg(rm, 0);
6633 tmp2 = neon_load_reg(rm, 1);
6634 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 6635 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6636 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6637 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 6638 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6639 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 6640 tcg_temp_free_i32(tmp);
60011498 6641 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 6642 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6643 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6644 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 6645 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6646 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
6647 tcg_temp_free_i32(tmp2);
6648 tcg_temp_free_i32(tmp3);
60011498 6649 break;
9d935509 6650 case NEON_2RM_AESE: case NEON_2RM_AESMC:
d614a513 6651 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
9d935509
AB
6652 || ((rm | rd) & 1)) {
6653 return 1;
6654 }
6655 tmp = tcg_const_i32(rd);
6656 tmp2 = tcg_const_i32(rm);
6657
6658 /* Bit 6 is the lowest opcode bit; it distinguishes between
6659 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6660 */
6661 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6662
6663 if (op == NEON_2RM_AESE) {
6664 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
6665 } else {
6666 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
6667 }
6668 tcg_temp_free_i32(tmp);
6669 tcg_temp_free_i32(tmp2);
6670 tcg_temp_free_i32(tmp3);
6671 break;
f1ecb913 6672 case NEON_2RM_SHA1H:
d614a513 6673 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
f1ecb913
AB
6674 || ((rm | rd) & 1)) {
6675 return 1;
6676 }
6677 tmp = tcg_const_i32(rd);
6678 tmp2 = tcg_const_i32(rm);
6679
6680 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
6681
6682 tcg_temp_free_i32(tmp);
6683 tcg_temp_free_i32(tmp2);
6684 break;
6685 case NEON_2RM_SHA1SU1:
6686 if ((rm | rd) & 1) {
6687 return 1;
6688 }
6689 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6690 if (q) {
d614a513 6691 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
f1ecb913
AB
6692 return 1;
6693 }
d614a513 6694 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
6695 return 1;
6696 }
6697 tmp = tcg_const_i32(rd);
6698 tmp2 = tcg_const_i32(rm);
6699 if (q) {
6700 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
6701 } else {
6702 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
6703 }
6704 tcg_temp_free_i32(tmp);
6705 tcg_temp_free_i32(tmp2);
6706 break;
9ee6e8bb
PB
6707 default:
6708 elementwise:
6709 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 6710 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6711 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6712 neon_reg_offset(rm, pass));
39d5492a 6713 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6714 } else {
dd8fbd78 6715 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
6716 }
6717 switch (op) {
600b828c 6718 case NEON_2RM_VREV32:
9ee6e8bb 6719 switch (size) {
dd8fbd78
FN
6720 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6721 case 1: gen_swap_half(tmp); break;
600b828c 6722 default: abort();
9ee6e8bb
PB
6723 }
6724 break;
600b828c 6725 case NEON_2RM_VREV16:
dd8fbd78 6726 gen_rev16(tmp);
9ee6e8bb 6727 break;
600b828c 6728 case NEON_2RM_VCLS:
9ee6e8bb 6729 switch (size) {
dd8fbd78
FN
6730 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6731 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6732 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6733 default: abort();
9ee6e8bb
PB
6734 }
6735 break;
600b828c 6736 case NEON_2RM_VCLZ:
9ee6e8bb 6737 switch (size) {
dd8fbd78
FN
6738 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6739 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6740 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 6741 default: abort();
9ee6e8bb
PB
6742 }
6743 break;
600b828c 6744 case NEON_2RM_VCNT:
dd8fbd78 6745 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6746 break;
600b828c 6747 case NEON_2RM_VMVN:
dd8fbd78 6748 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 6749 break;
600b828c 6750 case NEON_2RM_VQABS:
9ee6e8bb 6751 switch (size) {
02da0b2d
PM
6752 case 0:
6753 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6754 break;
6755 case 1:
6756 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6757 break;
6758 case 2:
6759 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6760 break;
600b828c 6761 default: abort();
9ee6e8bb
PB
6762 }
6763 break;
600b828c 6764 case NEON_2RM_VQNEG:
9ee6e8bb 6765 switch (size) {
02da0b2d
PM
6766 case 0:
6767 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6768 break;
6769 case 1:
6770 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6771 break;
6772 case 2:
6773 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6774 break;
600b828c 6775 default: abort();
9ee6e8bb
PB
6776 }
6777 break;
600b828c 6778 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6779 tmp2 = tcg_const_i32(0);
9ee6e8bb 6780 switch(size) {
dd8fbd78
FN
6781 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6782 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6783 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6784 default: abort();
9ee6e8bb 6785 }
39d5492a 6786 tcg_temp_free_i32(tmp2);
600b828c 6787 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6788 tcg_gen_not_i32(tmp, tmp);
600b828c 6789 }
9ee6e8bb 6790 break;
600b828c 6791 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6792 tmp2 = tcg_const_i32(0);
9ee6e8bb 6793 switch(size) {
dd8fbd78
FN
6794 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6795 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6796 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6797 default: abort();
9ee6e8bb 6798 }
39d5492a 6799 tcg_temp_free_i32(tmp2);
600b828c 6800 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6801 tcg_gen_not_i32(tmp, tmp);
600b828c 6802 }
9ee6e8bb 6803 break;
600b828c 6804 case NEON_2RM_VCEQ0:
dd8fbd78 6805 tmp2 = tcg_const_i32(0);
9ee6e8bb 6806 switch(size) {
dd8fbd78
FN
6807 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6808 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6809 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6810 default: abort();
9ee6e8bb 6811 }
39d5492a 6812 tcg_temp_free_i32(tmp2);
9ee6e8bb 6813 break;
600b828c 6814 case NEON_2RM_VABS:
9ee6e8bb 6815 switch(size) {
dd8fbd78
FN
6816 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6817 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6818 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6819 default: abort();
9ee6e8bb
PB
6820 }
6821 break;
600b828c 6822 case NEON_2RM_VNEG:
dd8fbd78
FN
6823 tmp2 = tcg_const_i32(0);
6824 gen_neon_rsb(size, tmp, tmp2);
39d5492a 6825 tcg_temp_free_i32(tmp2);
9ee6e8bb 6826 break;
600b828c 6827 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6828 {
6829 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6830 tmp2 = tcg_const_i32(0);
aa47cfdd 6831 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6832 tcg_temp_free_i32(tmp2);
aa47cfdd 6833 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6834 break;
aa47cfdd 6835 }
600b828c 6836 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6837 {
6838 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6839 tmp2 = tcg_const_i32(0);
aa47cfdd 6840 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6841 tcg_temp_free_i32(tmp2);
aa47cfdd 6842 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6843 break;
aa47cfdd 6844 }
600b828c 6845 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6846 {
6847 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6848 tmp2 = tcg_const_i32(0);
aa47cfdd 6849 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6850 tcg_temp_free_i32(tmp2);
aa47cfdd 6851 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6852 break;
aa47cfdd 6853 }
600b828c 6854 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6855 {
6856 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6857 tmp2 = tcg_const_i32(0);
aa47cfdd 6858 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6859 tcg_temp_free_i32(tmp2);
aa47cfdd 6860 tcg_temp_free_ptr(fpstatus);
0e326109 6861 break;
aa47cfdd 6862 }
600b828c 6863 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6864 {
6865 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6866 tmp2 = tcg_const_i32(0);
aa47cfdd 6867 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6868 tcg_temp_free_i32(tmp2);
aa47cfdd 6869 tcg_temp_free_ptr(fpstatus);
0e326109 6870 break;
aa47cfdd 6871 }
600b828c 6872 case NEON_2RM_VABS_F:
4373f3ce 6873 gen_vfp_abs(0);
9ee6e8bb 6874 break;
600b828c 6875 case NEON_2RM_VNEG_F:
4373f3ce 6876 gen_vfp_neg(0);
9ee6e8bb 6877 break;
600b828c 6878 case NEON_2RM_VSWP:
dd8fbd78
FN
6879 tmp2 = neon_load_reg(rd, pass);
6880 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6881 break;
600b828c 6882 case NEON_2RM_VTRN:
dd8fbd78 6883 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6884 switch (size) {
dd8fbd78
FN
6885 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6886 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6887 default: abort();
9ee6e8bb 6888 }
dd8fbd78 6889 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6890 break;
34f7b0a2
WN
6891 case NEON_2RM_VRINTN:
6892 case NEON_2RM_VRINTA:
6893 case NEON_2RM_VRINTM:
6894 case NEON_2RM_VRINTP:
6895 case NEON_2RM_VRINTZ:
6896 {
6897 TCGv_i32 tcg_rmode;
6898 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6899 int rmode;
6900
6901 if (op == NEON_2RM_VRINTZ) {
6902 rmode = FPROUNDING_ZERO;
6903 } else {
6904 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6905 }
6906
6907 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6908 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6909 cpu_env);
6910 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
6911 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6912 cpu_env);
6913 tcg_temp_free_ptr(fpstatus);
6914 tcg_temp_free_i32(tcg_rmode);
6915 break;
6916 }
2ce70625
WN
6917 case NEON_2RM_VRINTX:
6918 {
6919 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6920 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
6921 tcg_temp_free_ptr(fpstatus);
6922 break;
6923 }
901ad525
WN
6924 case NEON_2RM_VCVTAU:
6925 case NEON_2RM_VCVTAS:
6926 case NEON_2RM_VCVTNU:
6927 case NEON_2RM_VCVTNS:
6928 case NEON_2RM_VCVTPU:
6929 case NEON_2RM_VCVTPS:
6930 case NEON_2RM_VCVTMU:
6931 case NEON_2RM_VCVTMS:
6932 {
6933 bool is_signed = !extract32(insn, 7, 1);
6934 TCGv_ptr fpst = get_fpstatus_ptr(1);
6935 TCGv_i32 tcg_rmode, tcg_shift;
6936 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6937
6938 tcg_shift = tcg_const_i32(0);
6939 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6940 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6941 cpu_env);
6942
6943 if (is_signed) {
6944 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
6945 tcg_shift, fpst);
6946 } else {
6947 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
6948 tcg_shift, fpst);
6949 }
6950
6951 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6952 cpu_env);
6953 tcg_temp_free_i32(tcg_rmode);
6954 tcg_temp_free_i32(tcg_shift);
6955 tcg_temp_free_ptr(fpst);
6956 break;
6957 }
600b828c 6958 case NEON_2RM_VRECPE:
b6d4443a
AB
6959 {
6960 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6961 gen_helper_recpe_u32(tmp, tmp, fpstatus);
6962 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6963 break;
b6d4443a 6964 }
600b828c 6965 case NEON_2RM_VRSQRTE:
c2fb418e
AB
6966 {
6967 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6968 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
6969 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6970 break;
c2fb418e 6971 }
600b828c 6972 case NEON_2RM_VRECPE_F:
b6d4443a
AB
6973 {
6974 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6975 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
6976 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6977 break;
b6d4443a 6978 }
600b828c 6979 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
6980 {
6981 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6982 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
6983 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6984 break;
c2fb418e 6985 }
600b828c 6986 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6987 gen_vfp_sito(0, 1);
9ee6e8bb 6988 break;
600b828c 6989 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6990 gen_vfp_uito(0, 1);
9ee6e8bb 6991 break;
600b828c 6992 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6993 gen_vfp_tosiz(0, 1);
9ee6e8bb 6994 break;
600b828c 6995 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6996 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6997 break;
6998 default:
600b828c
PM
6999 /* Reserved op values were caught by the
7000 * neon_2rm_sizes[] check earlier.
7001 */
7002 abort();
9ee6e8bb 7003 }
600b828c 7004 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7005 tcg_gen_st_f32(cpu_F0s, cpu_env,
7006 neon_reg_offset(rd, pass));
9ee6e8bb 7007 } else {
dd8fbd78 7008 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7009 }
7010 }
7011 break;
7012 }
7013 } else if ((insn & (1 << 10)) == 0) {
7014 /* VTBL, VTBX. */
56907d77
PM
7015 int n = ((insn >> 8) & 3) + 1;
7016 if ((rn + n) > 32) {
7017 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7018 * helper function running off the end of the register file.
7019 */
7020 return 1;
7021 }
7022 n <<= 3;
9ee6e8bb 7023 if (insn & (1 << 6)) {
8f8e3aa4 7024 tmp = neon_load_reg(rd, 0);
9ee6e8bb 7025 } else {
7d1b0095 7026 tmp = tcg_temp_new_i32();
8f8e3aa4 7027 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7028 }
8f8e3aa4 7029 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
7030 tmp4 = tcg_const_i32(rn);
7031 tmp5 = tcg_const_i32(n);
9ef39277 7032 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 7033 tcg_temp_free_i32(tmp);
9ee6e8bb 7034 if (insn & (1 << 6)) {
8f8e3aa4 7035 tmp = neon_load_reg(rd, 1);
9ee6e8bb 7036 } else {
7d1b0095 7037 tmp = tcg_temp_new_i32();
8f8e3aa4 7038 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7039 }
8f8e3aa4 7040 tmp3 = neon_load_reg(rm, 1);
9ef39277 7041 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
7042 tcg_temp_free_i32(tmp5);
7043 tcg_temp_free_i32(tmp4);
8f8e3aa4 7044 neon_store_reg(rd, 0, tmp2);
3018f259 7045 neon_store_reg(rd, 1, tmp3);
7d1b0095 7046 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7047 } else if ((insn & 0x380) == 0) {
7048 /* VDUP */
133da6aa
JR
7049 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7050 return 1;
7051 }
9ee6e8bb 7052 if (insn & (1 << 19)) {
dd8fbd78 7053 tmp = neon_load_reg(rm, 1);
9ee6e8bb 7054 } else {
dd8fbd78 7055 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
7056 }
7057 if (insn & (1 << 16)) {
dd8fbd78 7058 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
7059 } else if (insn & (1 << 17)) {
7060 if ((insn >> 18) & 1)
dd8fbd78 7061 gen_neon_dup_high16(tmp);
9ee6e8bb 7062 else
dd8fbd78 7063 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
7064 }
7065 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 7066 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
7067 tcg_gen_mov_i32(tmp2, tmp);
7068 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 7069 }
7d1b0095 7070 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7071 } else {
7072 return 1;
7073 }
7074 }
7075 }
7076 return 0;
7077}
7078
7dcc1f89 7079static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7080{
4b6a83fb
PM
7081 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7082 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7083
7084 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7085
7086 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7087 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7088 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7089 return 1;
7090 }
d614a513 7091 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7092 return disas_iwmmxt_insn(s, insn);
d614a513 7093 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7094 return disas_dsp_insn(s, insn);
c0f4af17
PM
7095 }
7096 return 1;
4b6a83fb
PM
7097 }
7098
7099 /* Otherwise treat as a generic register access */
7100 is64 = (insn & (1 << 25)) == 0;
7101 if (!is64 && ((insn & (1 << 4)) == 0)) {
7102 /* cdp */
7103 return 1;
7104 }
7105
7106 crm = insn & 0xf;
7107 if (is64) {
7108 crn = 0;
7109 opc1 = (insn >> 4) & 0xf;
7110 opc2 = 0;
7111 rt2 = (insn >> 16) & 0xf;
7112 } else {
7113 crn = (insn >> 16) & 0xf;
7114 opc1 = (insn >> 21) & 7;
7115 opc2 = (insn >> 5) & 7;
7116 rt2 = 0;
7117 }
7118 isread = (insn >> 20) & 1;
7119 rt = (insn >> 12) & 0xf;
7120
60322b39 7121 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7122 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7123 if (ri) {
7124 /* Check access permissions */
dcbff19b 7125 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7126 return 1;
7127 }
7128
c0f4af17 7129 if (ri->accessfn ||
d614a513 7130 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7131 /* Emit code to perform further access permissions checks at
7132 * runtime; this may result in an exception.
c0f4af17
PM
7133 * Note that on XScale all cp0..c13 registers do an access check
7134 * call in order to handle c15_cpar.
f59df3f2
PM
7135 */
7136 TCGv_ptr tmpptr;
8bcbf37c
PM
7137 TCGv_i32 tcg_syn;
7138 uint32_t syndrome;
7139
7140 /* Note that since we are an implementation which takes an
7141 * exception on a trapped conditional instruction only if the
7142 * instruction passes its condition code check, we can take
7143 * advantage of the clause in the ARM ARM that allows us to set
7144 * the COND field in the instruction to 0xE in all cases.
7145 * We could fish the actual condition out of the insn (ARM)
7146 * or the condexec bits (Thumb) but it isn't necessary.
7147 */
7148 switch (cpnum) {
7149 case 14:
7150 if (is64) {
7151 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7152 isread, s->thumb);
7153 } else {
7154 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7155 rt, isread, s->thumb);
7156 }
7157 break;
7158 case 15:
7159 if (is64) {
7160 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7161 isread, s->thumb);
7162 } else {
7163 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7164 rt, isread, s->thumb);
7165 }
7166 break;
7167 default:
7168 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7169 * so this can only happen if this is an ARMv7 or earlier CPU,
7170 * in which case the syndrome information won't actually be
7171 * guest visible.
7172 */
d614a513 7173 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
7174 syndrome = syn_uncategorized();
7175 break;
7176 }
7177
f59df3f2
PM
7178 gen_set_pc_im(s, s->pc);
7179 tmpptr = tcg_const_ptr(ri);
8bcbf37c
PM
7180 tcg_syn = tcg_const_i32(syndrome);
7181 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn);
f59df3f2 7182 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7183 tcg_temp_free_i32(tcg_syn);
f59df3f2
PM
7184 }
7185
4b6a83fb
PM
7186 /* Handle special cases first */
7187 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7188 case ARM_CP_NOP:
7189 return 0;
7190 case ARM_CP_WFI:
7191 if (isread) {
7192 return 1;
7193 }
eaed129d 7194 gen_set_pc_im(s, s->pc);
4b6a83fb 7195 s->is_jmp = DISAS_WFI;
2bee5105 7196 return 0;
4b6a83fb
PM
7197 default:
7198 break;
7199 }
7200
bd79255d 7201 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7202 gen_io_start();
7203 }
7204
4b6a83fb
PM
7205 if (isread) {
7206 /* Read */
7207 if (is64) {
7208 TCGv_i64 tmp64;
7209 TCGv_i32 tmp;
7210 if (ri->type & ARM_CP_CONST) {
7211 tmp64 = tcg_const_i64(ri->resetvalue);
7212 } else if (ri->readfn) {
7213 TCGv_ptr tmpptr;
4b6a83fb
PM
7214 tmp64 = tcg_temp_new_i64();
7215 tmpptr = tcg_const_ptr(ri);
7216 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7217 tcg_temp_free_ptr(tmpptr);
7218 } else {
7219 tmp64 = tcg_temp_new_i64();
7220 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7221 }
7222 tmp = tcg_temp_new_i32();
7223 tcg_gen_trunc_i64_i32(tmp, tmp64);
7224 store_reg(s, rt, tmp);
7225 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 7226 tmp = tcg_temp_new_i32();
4b6a83fb 7227 tcg_gen_trunc_i64_i32(tmp, tmp64);
ed336850 7228 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7229 store_reg(s, rt2, tmp);
7230 } else {
39d5492a 7231 TCGv_i32 tmp;
4b6a83fb
PM
7232 if (ri->type & ARM_CP_CONST) {
7233 tmp = tcg_const_i32(ri->resetvalue);
7234 } else if (ri->readfn) {
7235 TCGv_ptr tmpptr;
4b6a83fb
PM
7236 tmp = tcg_temp_new_i32();
7237 tmpptr = tcg_const_ptr(ri);
7238 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7239 tcg_temp_free_ptr(tmpptr);
7240 } else {
7241 tmp = load_cpu_offset(ri->fieldoffset);
7242 }
7243 if (rt == 15) {
7244 /* Destination register of r15 for 32 bit loads sets
7245 * the condition codes from the high 4 bits of the value
7246 */
7247 gen_set_nzcv(tmp);
7248 tcg_temp_free_i32(tmp);
7249 } else {
7250 store_reg(s, rt, tmp);
7251 }
7252 }
7253 } else {
7254 /* Write */
7255 if (ri->type & ARM_CP_CONST) {
7256 /* If not forbidden by access permissions, treat as WI */
7257 return 0;
7258 }
7259
7260 if (is64) {
39d5492a 7261 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7262 TCGv_i64 tmp64 = tcg_temp_new_i64();
7263 tmplo = load_reg(s, rt);
7264 tmphi = load_reg(s, rt2);
7265 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7266 tcg_temp_free_i32(tmplo);
7267 tcg_temp_free_i32(tmphi);
7268 if (ri->writefn) {
7269 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7270 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7271 tcg_temp_free_ptr(tmpptr);
7272 } else {
7273 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7274 }
7275 tcg_temp_free_i64(tmp64);
7276 } else {
7277 if (ri->writefn) {
39d5492a 7278 TCGv_i32 tmp;
4b6a83fb 7279 TCGv_ptr tmpptr;
4b6a83fb
PM
7280 tmp = load_reg(s, rt);
7281 tmpptr = tcg_const_ptr(ri);
7282 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7283 tcg_temp_free_ptr(tmpptr);
7284 tcg_temp_free_i32(tmp);
7285 } else {
39d5492a 7286 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7287 store_cpu_offset(tmp, ri->fieldoffset);
7288 }
7289 }
2452731c
PM
7290 }
7291
bd79255d 7292 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7293 /* I/O operations must end the TB here (whether read or write) */
7294 gen_io_end();
7295 gen_lookup_tb(s);
7296 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7297 /* We default to ending the TB on a coprocessor register write,
7298 * but allow this to be suppressed by the register definition
7299 * (usually only necessary to work around guest bugs).
7300 */
2452731c 7301 gen_lookup_tb(s);
4b6a83fb 7302 }
2452731c 7303
4b6a83fb
PM
7304 return 0;
7305 }
7306
626187d8
PM
7307 /* Unknown register; this might be a guest error or a QEMU
7308 * unimplemented feature.
7309 */
7310 if (is64) {
7311 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7312 "64 bit system register cp:%d opc1: %d crm:%d "
7313 "(%s)\n",
7314 isread ? "read" : "write", cpnum, opc1, crm,
7315 s->ns ? "non-secure" : "secure");
626187d8
PM
7316 } else {
7317 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7318 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7319 "(%s)\n",
7320 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7321 s->ns ? "non-secure" : "secure");
626187d8
PM
7322 }
7323
4a9a539f 7324 return 1;
9ee6e8bb
PB
7325}
7326
5e3f878a
PB
7327
7328/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7329static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7330{
39d5492a 7331 TCGv_i32 tmp;
7d1b0095 7332 tmp = tcg_temp_new_i32();
5e3f878a
PB
7333 tcg_gen_trunc_i64_i32(tmp, val);
7334 store_reg(s, rlow, tmp);
7d1b0095 7335 tmp = tcg_temp_new_i32();
5e3f878a
PB
7336 tcg_gen_shri_i64(val, val, 32);
7337 tcg_gen_trunc_i64_i32(tmp, val);
7338 store_reg(s, rhigh, tmp);
7339}
7340
7341/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7342static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7343{
a7812ae4 7344 TCGv_i64 tmp;
39d5492a 7345 TCGv_i32 tmp2;
5e3f878a 7346
36aa55dc 7347 /* Load value and extend to 64 bits. */
a7812ae4 7348 tmp = tcg_temp_new_i64();
5e3f878a
PB
7349 tmp2 = load_reg(s, rlow);
7350 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7351 tcg_temp_free_i32(tmp2);
5e3f878a 7352 tcg_gen_add_i64(val, val, tmp);
b75263d6 7353 tcg_temp_free_i64(tmp);
5e3f878a
PB
7354}
7355
7356/* load and add a 64-bit value from a register pair. */
a7812ae4 7357static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7358{
a7812ae4 7359 TCGv_i64 tmp;
39d5492a
PM
7360 TCGv_i32 tmpl;
7361 TCGv_i32 tmph;
5e3f878a
PB
7362
7363 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7364 tmpl = load_reg(s, rlow);
7365 tmph = load_reg(s, rhigh);
a7812ae4 7366 tmp = tcg_temp_new_i64();
36aa55dc 7367 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7368 tcg_temp_free_i32(tmpl);
7369 tcg_temp_free_i32(tmph);
5e3f878a 7370 tcg_gen_add_i64(val, val, tmp);
b75263d6 7371 tcg_temp_free_i64(tmp);
5e3f878a
PB
7372}
7373
c9f10124 7374/* Set N and Z flags from hi|lo. */
39d5492a 7375static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7376{
c9f10124
RH
7377 tcg_gen_mov_i32(cpu_NF, hi);
7378 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7379}
7380
426f5abc
PB
7381/* Load/Store exclusive instructions are implemented by remembering
7382 the value/address loaded, and seeing if these are the same
b90372ad 7383 when the store is performed. This should be sufficient to implement
426f5abc
PB
7384 the architecturally mandated semantics, and avoids having to monitor
7385 regular stores.
7386
7387 In system emulation mode only one CPU will be running at once, so
7388 this sequence is effectively atomic. In user emulation mode we
7389 throw an exception and handle the atomic operation elsewhere. */
7390static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7391 TCGv_i32 addr, int size)
426f5abc 7392{
94ee24e7 7393 TCGv_i32 tmp = tcg_temp_new_i32();
426f5abc 7394
50225ad0
PM
7395 s->is_ldex = true;
7396
426f5abc
PB
7397 switch (size) {
7398 case 0:
6ce2faf4 7399 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
426f5abc
PB
7400 break;
7401 case 1:
6ce2faf4 7402 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
426f5abc
PB
7403 break;
7404 case 2:
7405 case 3:
6ce2faf4 7406 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
426f5abc
PB
7407 break;
7408 default:
7409 abort();
7410 }
03d05e2d 7411
426f5abc 7412 if (size == 3) {
39d5492a 7413 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d
PM
7414 TCGv_i32 tmp3 = tcg_temp_new_i32();
7415
2c9adbda 7416 tcg_gen_addi_i32(tmp2, addr, 4);
6ce2faf4 7417 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7d1b0095 7418 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7419 tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
7420 store_reg(s, rt2, tmp3);
7421 } else {
7422 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7423 }
03d05e2d
PM
7424
7425 store_reg(s, rt, tmp);
7426 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7427}
7428
7429static void gen_clrex(DisasContext *s)
7430{
03d05e2d 7431 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7432}
7433
7434#ifdef CONFIG_USER_ONLY
7435static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7436 TCGv_i32 addr, int size)
426f5abc 7437{
03d05e2d 7438 tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
426f5abc
PB
7439 tcg_gen_movi_i32(cpu_exclusive_info,
7440 size | (rd << 4) | (rt << 8) | (rt2 << 12));
d4a2dc67 7441 gen_exception_internal_insn(s, 4, EXCP_STREX);
426f5abc
PB
7442}
7443#else
7444static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7445 TCGv_i32 addr, int size)
426f5abc 7446{
39d5492a 7447 TCGv_i32 tmp;
03d05e2d 7448 TCGv_i64 val64, extaddr;
42a268c2
RH
7449 TCGLabel *done_label;
7450 TCGLabel *fail_label;
426f5abc
PB
7451
7452 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7453 [addr] = {Rt};
7454 {Rd} = 0;
7455 } else {
7456 {Rd} = 1;
7457 } */
7458 fail_label = gen_new_label();
7459 done_label = gen_new_label();
03d05e2d
PM
7460 extaddr = tcg_temp_new_i64();
7461 tcg_gen_extu_i32_i64(extaddr, addr);
7462 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7463 tcg_temp_free_i64(extaddr);
7464
94ee24e7 7465 tmp = tcg_temp_new_i32();
426f5abc
PB
7466 switch (size) {
7467 case 0:
6ce2faf4 7468 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
426f5abc
PB
7469 break;
7470 case 1:
6ce2faf4 7471 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
426f5abc
PB
7472 break;
7473 case 2:
7474 case 3:
6ce2faf4 7475 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
426f5abc
PB
7476 break;
7477 default:
7478 abort();
7479 }
03d05e2d
PM
7480
7481 val64 = tcg_temp_new_i64();
426f5abc 7482 if (size == 3) {
39d5492a 7483 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d 7484 TCGv_i32 tmp3 = tcg_temp_new_i32();
426f5abc 7485 tcg_gen_addi_i32(tmp2, addr, 4);
6ce2faf4 7486 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7d1b0095 7487 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7488 tcg_gen_concat_i32_i64(val64, tmp, tmp3);
7489 tcg_temp_free_i32(tmp3);
7490 } else {
7491 tcg_gen_extu_i32_i64(val64, tmp);
426f5abc 7492 }
03d05e2d
PM
7493 tcg_temp_free_i32(tmp);
7494
7495 tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
7496 tcg_temp_free_i64(val64);
7497
426f5abc
PB
7498 tmp = load_reg(s, rt);
7499 switch (size) {
7500 case 0:
6ce2faf4 7501 gen_aa32_st8(tmp, addr, get_mem_index(s));
426f5abc
PB
7502 break;
7503 case 1:
6ce2faf4 7504 gen_aa32_st16(tmp, addr, get_mem_index(s));
426f5abc
PB
7505 break;
7506 case 2:
7507 case 3:
6ce2faf4 7508 gen_aa32_st32(tmp, addr, get_mem_index(s));
426f5abc
PB
7509 break;
7510 default:
7511 abort();
7512 }
94ee24e7 7513 tcg_temp_free_i32(tmp);
426f5abc
PB
7514 if (size == 3) {
7515 tcg_gen_addi_i32(addr, addr, 4);
7516 tmp = load_reg(s, rt2);
6ce2faf4 7517 gen_aa32_st32(tmp, addr, get_mem_index(s));
94ee24e7 7518 tcg_temp_free_i32(tmp);
426f5abc
PB
7519 }
7520 tcg_gen_movi_i32(cpu_R[rd], 0);
7521 tcg_gen_br(done_label);
7522 gen_set_label(fail_label);
7523 tcg_gen_movi_i32(cpu_R[rd], 1);
7524 gen_set_label(done_label);
03d05e2d 7525 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7526}
7527#endif
7528
81465888
PM
7529/* gen_srs:
7530 * @env: CPUARMState
7531 * @s: DisasContext
7532 * @mode: mode field from insn (which stack to store to)
7533 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7534 * @writeback: true if writeback bit set
7535 *
7536 * Generate code for the SRS (Store Return State) insn.
7537 */
7538static void gen_srs(DisasContext *s,
7539 uint32_t mode, uint32_t amode, bool writeback)
7540{
7541 int32_t offset;
7542 TCGv_i32 addr = tcg_temp_new_i32();
7543 TCGv_i32 tmp = tcg_const_i32(mode);
7544 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7545 tcg_temp_free_i32(tmp);
7546 switch (amode) {
7547 case 0: /* DA */
7548 offset = -4;
7549 break;
7550 case 1: /* IA */
7551 offset = 0;
7552 break;
7553 case 2: /* DB */
7554 offset = -8;
7555 break;
7556 case 3: /* IB */
7557 offset = 4;
7558 break;
7559 default:
7560 abort();
7561 }
7562 tcg_gen_addi_i32(addr, addr, offset);
7563 tmp = load_reg(s, 14);
c1197795 7564 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 7565 tcg_temp_free_i32(tmp);
81465888
PM
7566 tmp = load_cpu_field(spsr);
7567 tcg_gen_addi_i32(addr, addr, 4);
c1197795 7568 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 7569 tcg_temp_free_i32(tmp);
81465888
PM
7570 if (writeback) {
7571 switch (amode) {
7572 case 0:
7573 offset = -8;
7574 break;
7575 case 1:
7576 offset = 4;
7577 break;
7578 case 2:
7579 offset = -4;
7580 break;
7581 case 3:
7582 offset = 0;
7583 break;
7584 default:
7585 abort();
7586 }
7587 tcg_gen_addi_i32(addr, addr, offset);
7588 tmp = tcg_const_i32(mode);
7589 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7590 tcg_temp_free_i32(tmp);
7591 }
7592 tcg_temp_free_i32(addr);
7593}
7594
f4df2210 7595static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 7596{
f4df2210 7597 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
7598 TCGv_i32 tmp;
7599 TCGv_i32 tmp2;
7600 TCGv_i32 tmp3;
7601 TCGv_i32 addr;
a7812ae4 7602 TCGv_i64 tmp64;
9ee6e8bb 7603
9ee6e8bb 7604 /* M variants do not implement ARM mode. */
b53d8923 7605 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 7606 goto illegal_op;
b53d8923 7607 }
9ee6e8bb
PB
7608 cond = insn >> 28;
7609 if (cond == 0xf){
be5e7a76
DES
7610 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7611 * choose to UNDEF. In ARMv5 and above the space is used
7612 * for miscellaneous unconditional instructions.
7613 */
7614 ARCH(5);
7615
9ee6e8bb
PB
7616 /* Unconditional instructions. */
7617 if (((insn >> 25) & 7) == 1) {
7618 /* NEON Data processing. */
d614a513 7619 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 7620 goto illegal_op;
d614a513 7621 }
9ee6e8bb 7622
7dcc1f89 7623 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 7624 goto illegal_op;
7dcc1f89 7625 }
9ee6e8bb
PB
7626 return;
7627 }
7628 if ((insn & 0x0f100000) == 0x04000000) {
7629 /* NEON load/store. */
d614a513 7630 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 7631 goto illegal_op;
d614a513 7632 }
9ee6e8bb 7633
7dcc1f89 7634 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 7635 goto illegal_op;
7dcc1f89 7636 }
9ee6e8bb
PB
7637 return;
7638 }
6a57f3eb
WN
7639 if ((insn & 0x0f000e10) == 0x0e000a00) {
7640 /* VFP. */
7dcc1f89 7641 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
7642 goto illegal_op;
7643 }
7644 return;
7645 }
3d185e5d
PM
7646 if (((insn & 0x0f30f000) == 0x0510f000) ||
7647 ((insn & 0x0f30f010) == 0x0710f000)) {
7648 if ((insn & (1 << 22)) == 0) {
7649 /* PLDW; v7MP */
d614a513 7650 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
7651 goto illegal_op;
7652 }
7653 }
7654 /* Otherwise PLD; v5TE+ */
be5e7a76 7655 ARCH(5TE);
3d185e5d
PM
7656 return;
7657 }
7658 if (((insn & 0x0f70f000) == 0x0450f000) ||
7659 ((insn & 0x0f70f010) == 0x0650f000)) {
7660 ARCH(7);
7661 return; /* PLI; V7 */
7662 }
7663 if (((insn & 0x0f700000) == 0x04100000) ||
7664 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 7665 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
7666 goto illegal_op;
7667 }
7668 return; /* v7MP: Unallocated memory hint: must NOP */
7669 }
7670
7671 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
7672 ARCH(6);
7673 /* setend */
10962fd5
PM
7674 if (((insn >> 9) & 1) != s->bswap_code) {
7675 /* Dynamic endianness switching not implemented. */
e0c270d9 7676 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
9ee6e8bb
PB
7677 goto illegal_op;
7678 }
7679 return;
7680 } else if ((insn & 0x0fffff00) == 0x057ff000) {
7681 switch ((insn >> 4) & 0xf) {
7682 case 1: /* clrex */
7683 ARCH(6K);
426f5abc 7684 gen_clrex(s);
9ee6e8bb
PB
7685 return;
7686 case 4: /* dsb */
7687 case 5: /* dmb */
7688 case 6: /* isb */
7689 ARCH(7);
7690 /* We don't emulate caches so these are a no-op. */
7691 return;
7692 default:
7693 goto illegal_op;
7694 }
7695 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
7696 /* srs */
81465888 7697 if (IS_USER(s)) {
9ee6e8bb 7698 goto illegal_op;
9ee6e8bb 7699 }
81465888
PM
7700 ARCH(6);
7701 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 7702 return;
ea825eee 7703 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 7704 /* rfe */
c67b6b71 7705 int32_t offset;
9ee6e8bb
PB
7706 if (IS_USER(s))
7707 goto illegal_op;
7708 ARCH(6);
7709 rn = (insn >> 16) & 0xf;
b0109805 7710 addr = load_reg(s, rn);
9ee6e8bb
PB
7711 i = (insn >> 23) & 3;
7712 switch (i) {
b0109805 7713 case 0: offset = -4; break; /* DA */
c67b6b71
FN
7714 case 1: offset = 0; break; /* IA */
7715 case 2: offset = -8; break; /* DB */
b0109805 7716 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
7717 default: abort();
7718 }
7719 if (offset)
b0109805
PB
7720 tcg_gen_addi_i32(addr, addr, offset);
7721 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 7722 tmp = tcg_temp_new_i32();
6ce2faf4 7723 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 7724 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 7725 tmp2 = tcg_temp_new_i32();
6ce2faf4 7726 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
7727 if (insn & (1 << 21)) {
7728 /* Base writeback. */
7729 switch (i) {
b0109805 7730 case 0: offset = -8; break;
c67b6b71
FN
7731 case 1: offset = 4; break;
7732 case 2: offset = -4; break;
b0109805 7733 case 3: offset = 0; break;
9ee6e8bb
PB
7734 default: abort();
7735 }
7736 if (offset)
b0109805
PB
7737 tcg_gen_addi_i32(addr, addr, offset);
7738 store_reg(s, rn, addr);
7739 } else {
7d1b0095 7740 tcg_temp_free_i32(addr);
9ee6e8bb 7741 }
b0109805 7742 gen_rfe(s, tmp, tmp2);
c67b6b71 7743 return;
9ee6e8bb
PB
7744 } else if ((insn & 0x0e000000) == 0x0a000000) {
7745 /* branch link and change to thumb (blx <offset>) */
7746 int32_t offset;
7747
7748 val = (uint32_t)s->pc;
7d1b0095 7749 tmp = tcg_temp_new_i32();
d9ba4830
PB
7750 tcg_gen_movi_i32(tmp, val);
7751 store_reg(s, 14, tmp);
9ee6e8bb
PB
7752 /* Sign-extend the 24-bit offset */
7753 offset = (((int32_t)insn) << 8) >> 8;
7754 /* offset * 4 + bit24 * 2 + (thumb bit) */
7755 val += (offset << 2) | ((insn >> 23) & 2) | 1;
7756 /* pipeline offset */
7757 val += 4;
be5e7a76 7758 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 7759 gen_bx_im(s, val);
9ee6e8bb
PB
7760 return;
7761 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 7762 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 7763 /* iWMMXt register transfer. */
c0f4af17 7764 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 7765 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 7766 return;
c0f4af17
PM
7767 }
7768 }
9ee6e8bb
PB
7769 }
7770 } else if ((insn & 0x0fe00000) == 0x0c400000) {
7771 /* Coprocessor double register transfer. */
be5e7a76 7772 ARCH(5TE);
9ee6e8bb
PB
7773 } else if ((insn & 0x0f000010) == 0x0e000010) {
7774 /* Additional coprocessor register transfer. */
7997d92f 7775 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
7776 uint32_t mask;
7777 uint32_t val;
7778 /* cps (privileged) */
7779 if (IS_USER(s))
7780 return;
7781 mask = val = 0;
7782 if (insn & (1 << 19)) {
7783 if (insn & (1 << 8))
7784 mask |= CPSR_A;
7785 if (insn & (1 << 7))
7786 mask |= CPSR_I;
7787 if (insn & (1 << 6))
7788 mask |= CPSR_F;
7789 if (insn & (1 << 18))
7790 val |= mask;
7791 }
7997d92f 7792 if (insn & (1 << 17)) {
9ee6e8bb
PB
7793 mask |= CPSR_M;
7794 val |= (insn & 0x1f);
7795 }
7796 if (mask) {
2fbac54b 7797 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
7798 }
7799 return;
7800 }
7801 goto illegal_op;
7802 }
7803 if (cond != 0xe) {
7804 /* if not always execute, we generate a conditional jump to
7805 next instruction */
7806 s->condlabel = gen_new_label();
39fb730a 7807 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
7808 s->condjmp = 1;
7809 }
7810 if ((insn & 0x0f900000) == 0x03000000) {
7811 if ((insn & (1 << 21)) == 0) {
7812 ARCH(6T2);
7813 rd = (insn >> 12) & 0xf;
7814 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7815 if ((insn & (1 << 22)) == 0) {
7816 /* MOVW */
7d1b0095 7817 tmp = tcg_temp_new_i32();
5e3f878a 7818 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
7819 } else {
7820 /* MOVT */
5e3f878a 7821 tmp = load_reg(s, rd);
86831435 7822 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7823 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 7824 }
5e3f878a 7825 store_reg(s, rd, tmp);
9ee6e8bb
PB
7826 } else {
7827 if (((insn >> 12) & 0xf) != 0xf)
7828 goto illegal_op;
7829 if (((insn >> 16) & 0xf) == 0) {
7830 gen_nop_hint(s, insn & 0xff);
7831 } else {
7832 /* CPSR = immediate */
7833 val = insn & 0xff;
7834 shift = ((insn >> 8) & 0xf) * 2;
7835 if (shift)
7836 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 7837 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
7838 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
7839 i, val)) {
9ee6e8bb 7840 goto illegal_op;
7dcc1f89 7841 }
9ee6e8bb
PB
7842 }
7843 }
7844 } else if ((insn & 0x0f900000) == 0x01000000
7845 && (insn & 0x00000090) != 0x00000090) {
7846 /* miscellaneous instructions */
7847 op1 = (insn >> 21) & 3;
7848 sh = (insn >> 4) & 0xf;
7849 rm = insn & 0xf;
7850 switch (sh) {
7851 case 0x0: /* move program status register */
7852 if (op1 & 1) {
7853 /* PSR = reg */
2fbac54b 7854 tmp = load_reg(s, rm);
9ee6e8bb 7855 i = ((op1 & 2) != 0);
7dcc1f89 7856 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
7857 goto illegal_op;
7858 } else {
7859 /* reg = PSR */
7860 rd = (insn >> 12) & 0xf;
7861 if (op1 & 2) {
7862 if (IS_USER(s))
7863 goto illegal_op;
d9ba4830 7864 tmp = load_cpu_field(spsr);
9ee6e8bb 7865 } else {
7d1b0095 7866 tmp = tcg_temp_new_i32();
9ef39277 7867 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 7868 }
d9ba4830 7869 store_reg(s, rd, tmp);
9ee6e8bb
PB
7870 }
7871 break;
7872 case 0x1:
7873 if (op1 == 1) {
7874 /* branch/exchange thumb (bx). */
be5e7a76 7875 ARCH(4T);
d9ba4830
PB
7876 tmp = load_reg(s, rm);
7877 gen_bx(s, tmp);
9ee6e8bb
PB
7878 } else if (op1 == 3) {
7879 /* clz */
be5e7a76 7880 ARCH(5);
9ee6e8bb 7881 rd = (insn >> 12) & 0xf;
1497c961
PB
7882 tmp = load_reg(s, rm);
7883 gen_helper_clz(tmp, tmp);
7884 store_reg(s, rd, tmp);
9ee6e8bb
PB
7885 } else {
7886 goto illegal_op;
7887 }
7888 break;
7889 case 0x2:
7890 if (op1 == 1) {
7891 ARCH(5J); /* bxj */
7892 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7893 tmp = load_reg(s, rm);
7894 gen_bx(s, tmp);
9ee6e8bb
PB
7895 } else {
7896 goto illegal_op;
7897 }
7898 break;
7899 case 0x3:
7900 if (op1 != 1)
7901 goto illegal_op;
7902
be5e7a76 7903 ARCH(5);
9ee6e8bb 7904 /* branch link/exchange thumb (blx) */
d9ba4830 7905 tmp = load_reg(s, rm);
7d1b0095 7906 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
7907 tcg_gen_movi_i32(tmp2, s->pc);
7908 store_reg(s, 14, tmp2);
7909 gen_bx(s, tmp);
9ee6e8bb 7910 break;
eb0ecd5a
WN
7911 case 0x4:
7912 {
7913 /* crc32/crc32c */
7914 uint32_t c = extract32(insn, 8, 4);
7915
7916 /* Check this CPU supports ARMv8 CRC instructions.
7917 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
7918 * Bits 8, 10 and 11 should be zero.
7919 */
d614a513 7920 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
eb0ecd5a
WN
7921 (c & 0xd) != 0) {
7922 goto illegal_op;
7923 }
7924
7925 rn = extract32(insn, 16, 4);
7926 rd = extract32(insn, 12, 4);
7927
7928 tmp = load_reg(s, rn);
7929 tmp2 = load_reg(s, rm);
aa633469
PM
7930 if (op1 == 0) {
7931 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
7932 } else if (op1 == 1) {
7933 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
7934 }
eb0ecd5a
WN
7935 tmp3 = tcg_const_i32(1 << op1);
7936 if (c & 0x2) {
7937 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
7938 } else {
7939 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
7940 }
7941 tcg_temp_free_i32(tmp2);
7942 tcg_temp_free_i32(tmp3);
7943 store_reg(s, rd, tmp);
7944 break;
7945 }
9ee6e8bb 7946 case 0x5: /* saturating add/subtract */
be5e7a76 7947 ARCH(5TE);
9ee6e8bb
PB
7948 rd = (insn >> 12) & 0xf;
7949 rn = (insn >> 16) & 0xf;
b40d0353 7950 tmp = load_reg(s, rm);
5e3f878a 7951 tmp2 = load_reg(s, rn);
9ee6e8bb 7952 if (op1 & 2)
9ef39277 7953 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 7954 if (op1 & 1)
9ef39277 7955 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 7956 else
9ef39277 7957 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 7958 tcg_temp_free_i32(tmp2);
5e3f878a 7959 store_reg(s, rd, tmp);
9ee6e8bb 7960 break;
49e14940 7961 case 7:
d4a2dc67
PM
7962 {
7963 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e
PM
7964 switch (op1) {
7965 case 1:
7966 /* bkpt */
7967 ARCH(5);
7968 gen_exception_insn(s, 4, EXCP_BKPT,
73710361
GB
7969 syn_aa32_bkpt(imm16, false),
7970 default_exception_el(s));
37e6456e
PM
7971 break;
7972 case 2:
7973 /* Hypervisor call (v7) */
7974 ARCH(7);
7975 if (IS_USER(s)) {
7976 goto illegal_op;
7977 }
7978 gen_hvc(s, imm16);
7979 break;
7980 case 3:
7981 /* Secure monitor call (v6+) */
7982 ARCH(6K);
7983 if (IS_USER(s)) {
7984 goto illegal_op;
7985 }
7986 gen_smc(s);
7987 break;
7988 default:
49e14940
AL
7989 goto illegal_op;
7990 }
9ee6e8bb 7991 break;
d4a2dc67 7992 }
9ee6e8bb
PB
7993 case 0x8: /* signed multiply */
7994 case 0xa:
7995 case 0xc:
7996 case 0xe:
be5e7a76 7997 ARCH(5TE);
9ee6e8bb
PB
7998 rs = (insn >> 8) & 0xf;
7999 rn = (insn >> 12) & 0xf;
8000 rd = (insn >> 16) & 0xf;
8001 if (op1 == 1) {
8002 /* (32 * 16) >> 16 */
5e3f878a
PB
8003 tmp = load_reg(s, rm);
8004 tmp2 = load_reg(s, rs);
9ee6e8bb 8005 if (sh & 4)
5e3f878a 8006 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8007 else
5e3f878a 8008 gen_sxth(tmp2);
a7812ae4
PB
8009 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8010 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8011 tmp = tcg_temp_new_i32();
a7812ae4 8012 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 8013 tcg_temp_free_i64(tmp64);
9ee6e8bb 8014 if ((sh & 2) == 0) {
5e3f878a 8015 tmp2 = load_reg(s, rn);
9ef39277 8016 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8017 tcg_temp_free_i32(tmp2);
9ee6e8bb 8018 }
5e3f878a 8019 store_reg(s, rd, tmp);
9ee6e8bb
PB
8020 } else {
8021 /* 16 * 16 */
5e3f878a
PB
8022 tmp = load_reg(s, rm);
8023 tmp2 = load_reg(s, rs);
8024 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 8025 tcg_temp_free_i32(tmp2);
9ee6e8bb 8026 if (op1 == 2) {
a7812ae4
PB
8027 tmp64 = tcg_temp_new_i64();
8028 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8029 tcg_temp_free_i32(tmp);
a7812ae4
PB
8030 gen_addq(s, tmp64, rn, rd);
8031 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 8032 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8033 } else {
8034 if (op1 == 0) {
5e3f878a 8035 tmp2 = load_reg(s, rn);
9ef39277 8036 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8037 tcg_temp_free_i32(tmp2);
9ee6e8bb 8038 }
5e3f878a 8039 store_reg(s, rd, tmp);
9ee6e8bb
PB
8040 }
8041 }
8042 break;
8043 default:
8044 goto illegal_op;
8045 }
8046 } else if (((insn & 0x0e000000) == 0 &&
8047 (insn & 0x00000090) != 0x90) ||
8048 ((insn & 0x0e000000) == (1 << 25))) {
8049 int set_cc, logic_cc, shiftop;
8050
8051 op1 = (insn >> 21) & 0xf;
8052 set_cc = (insn >> 20) & 1;
8053 logic_cc = table_logic_cc[op1] & set_cc;
8054
8055 /* data processing instruction */
8056 if (insn & (1 << 25)) {
8057 /* immediate operand */
8058 val = insn & 0xff;
8059 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 8060 if (shift) {
9ee6e8bb 8061 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 8062 }
7d1b0095 8063 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
8064 tcg_gen_movi_i32(tmp2, val);
8065 if (logic_cc && shift) {
8066 gen_set_CF_bit31(tmp2);
8067 }
9ee6e8bb
PB
8068 } else {
8069 /* register */
8070 rm = (insn) & 0xf;
e9bb4aa9 8071 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8072 shiftop = (insn >> 5) & 3;
8073 if (!(insn & (1 << 4))) {
8074 shift = (insn >> 7) & 0x1f;
e9bb4aa9 8075 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
8076 } else {
8077 rs = (insn >> 8) & 0xf;
8984bd2e 8078 tmp = load_reg(s, rs);
e9bb4aa9 8079 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
8080 }
8081 }
8082 if (op1 != 0x0f && op1 != 0x0d) {
8083 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
8084 tmp = load_reg(s, rn);
8085 } else {
39d5492a 8086 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
8087 }
8088 rd = (insn >> 12) & 0xf;
8089 switch(op1) {
8090 case 0x00:
e9bb4aa9
JR
8091 tcg_gen_and_i32(tmp, tmp, tmp2);
8092 if (logic_cc) {
8093 gen_logic_CC(tmp);
8094 }
7dcc1f89 8095 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8096 break;
8097 case 0x01:
e9bb4aa9
JR
8098 tcg_gen_xor_i32(tmp, tmp, tmp2);
8099 if (logic_cc) {
8100 gen_logic_CC(tmp);
8101 }
7dcc1f89 8102 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8103 break;
8104 case 0x02:
8105 if (set_cc && rd == 15) {
8106 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 8107 if (IS_USER(s)) {
9ee6e8bb 8108 goto illegal_op;
e9bb4aa9 8109 }
72485ec4 8110 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 8111 gen_exception_return(s, tmp);
9ee6e8bb 8112 } else {
e9bb4aa9 8113 if (set_cc) {
72485ec4 8114 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8115 } else {
8116 tcg_gen_sub_i32(tmp, tmp, tmp2);
8117 }
7dcc1f89 8118 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8119 }
8120 break;
8121 case 0x03:
e9bb4aa9 8122 if (set_cc) {
72485ec4 8123 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8124 } else {
8125 tcg_gen_sub_i32(tmp, tmp2, tmp);
8126 }
7dcc1f89 8127 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8128 break;
8129 case 0x04:
e9bb4aa9 8130 if (set_cc) {
72485ec4 8131 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8132 } else {
8133 tcg_gen_add_i32(tmp, tmp, tmp2);
8134 }
7dcc1f89 8135 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8136 break;
8137 case 0x05:
e9bb4aa9 8138 if (set_cc) {
49b4c31e 8139 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8140 } else {
8141 gen_add_carry(tmp, tmp, tmp2);
8142 }
7dcc1f89 8143 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8144 break;
8145 case 0x06:
e9bb4aa9 8146 if (set_cc) {
2de68a49 8147 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8148 } else {
8149 gen_sub_carry(tmp, tmp, tmp2);
8150 }
7dcc1f89 8151 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8152 break;
8153 case 0x07:
e9bb4aa9 8154 if (set_cc) {
2de68a49 8155 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8156 } else {
8157 gen_sub_carry(tmp, tmp2, tmp);
8158 }
7dcc1f89 8159 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8160 break;
8161 case 0x08:
8162 if (set_cc) {
e9bb4aa9
JR
8163 tcg_gen_and_i32(tmp, tmp, tmp2);
8164 gen_logic_CC(tmp);
9ee6e8bb 8165 }
7d1b0095 8166 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8167 break;
8168 case 0x09:
8169 if (set_cc) {
e9bb4aa9
JR
8170 tcg_gen_xor_i32(tmp, tmp, tmp2);
8171 gen_logic_CC(tmp);
9ee6e8bb 8172 }
7d1b0095 8173 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8174 break;
8175 case 0x0a:
8176 if (set_cc) {
72485ec4 8177 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 8178 }
7d1b0095 8179 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8180 break;
8181 case 0x0b:
8182 if (set_cc) {
72485ec4 8183 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 8184 }
7d1b0095 8185 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8186 break;
8187 case 0x0c:
e9bb4aa9
JR
8188 tcg_gen_or_i32(tmp, tmp, tmp2);
8189 if (logic_cc) {
8190 gen_logic_CC(tmp);
8191 }
7dcc1f89 8192 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8193 break;
8194 case 0x0d:
8195 if (logic_cc && rd == 15) {
8196 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 8197 if (IS_USER(s)) {
9ee6e8bb 8198 goto illegal_op;
e9bb4aa9
JR
8199 }
8200 gen_exception_return(s, tmp2);
9ee6e8bb 8201 } else {
e9bb4aa9
JR
8202 if (logic_cc) {
8203 gen_logic_CC(tmp2);
8204 }
7dcc1f89 8205 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8206 }
8207 break;
8208 case 0x0e:
f669df27 8209 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
8210 if (logic_cc) {
8211 gen_logic_CC(tmp);
8212 }
7dcc1f89 8213 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8214 break;
8215 default:
8216 case 0x0f:
e9bb4aa9
JR
8217 tcg_gen_not_i32(tmp2, tmp2);
8218 if (logic_cc) {
8219 gen_logic_CC(tmp2);
8220 }
7dcc1f89 8221 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8222 break;
8223 }
e9bb4aa9 8224 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 8225 tcg_temp_free_i32(tmp2);
e9bb4aa9 8226 }
9ee6e8bb
PB
8227 } else {
8228 /* other instructions */
8229 op1 = (insn >> 24) & 0xf;
8230 switch(op1) {
8231 case 0x0:
8232 case 0x1:
8233 /* multiplies, extra load/stores */
8234 sh = (insn >> 5) & 3;
8235 if (sh == 0) {
8236 if (op1 == 0x0) {
8237 rd = (insn >> 16) & 0xf;
8238 rn = (insn >> 12) & 0xf;
8239 rs = (insn >> 8) & 0xf;
8240 rm = (insn) & 0xf;
8241 op1 = (insn >> 20) & 0xf;
8242 switch (op1) {
8243 case 0: case 1: case 2: case 3: case 6:
8244 /* 32 bit mul */
5e3f878a
PB
8245 tmp = load_reg(s, rs);
8246 tmp2 = load_reg(s, rm);
8247 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8248 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8249 if (insn & (1 << 22)) {
8250 /* Subtract (mls) */
8251 ARCH(6T2);
5e3f878a
PB
8252 tmp2 = load_reg(s, rn);
8253 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 8254 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8255 } else if (insn & (1 << 21)) {
8256 /* Add */
5e3f878a
PB
8257 tmp2 = load_reg(s, rn);
8258 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8259 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8260 }
8261 if (insn & (1 << 20))
5e3f878a
PB
8262 gen_logic_CC(tmp);
8263 store_reg(s, rd, tmp);
9ee6e8bb 8264 break;
8aac08b1
AJ
8265 case 4:
8266 /* 64 bit mul double accumulate (UMAAL) */
8267 ARCH(6);
8268 tmp = load_reg(s, rs);
8269 tmp2 = load_reg(s, rm);
8270 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8271 gen_addq_lo(s, tmp64, rn);
8272 gen_addq_lo(s, tmp64, rd);
8273 gen_storeq_reg(s, rn, rd, tmp64);
8274 tcg_temp_free_i64(tmp64);
8275 break;
8276 case 8: case 9: case 10: case 11:
8277 case 12: case 13: case 14: case 15:
8278 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
8279 tmp = load_reg(s, rs);
8280 tmp2 = load_reg(s, rm);
8aac08b1 8281 if (insn & (1 << 22)) {
c9f10124 8282 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 8283 } else {
c9f10124 8284 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
8285 }
8286 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
8287 TCGv_i32 al = load_reg(s, rn);
8288 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8289 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8290 tcg_temp_free_i32(al);
8291 tcg_temp_free_i32(ah);
9ee6e8bb 8292 }
8aac08b1 8293 if (insn & (1 << 20)) {
c9f10124 8294 gen_logicq_cc(tmp, tmp2);
8aac08b1 8295 }
c9f10124
RH
8296 store_reg(s, rn, tmp);
8297 store_reg(s, rd, tmp2);
9ee6e8bb 8298 break;
8aac08b1
AJ
8299 default:
8300 goto illegal_op;
9ee6e8bb
PB
8301 }
8302 } else {
8303 rn = (insn >> 16) & 0xf;
8304 rd = (insn >> 12) & 0xf;
8305 if (insn & (1 << 23)) {
8306 /* load/store exclusive */
2359bf80 8307 int op2 = (insn >> 8) & 3;
86753403 8308 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8309
8310 switch (op2) {
8311 case 0: /* lda/stl */
8312 if (op1 == 1) {
8313 goto illegal_op;
8314 }
8315 ARCH(8);
8316 break;
8317 case 1: /* reserved */
8318 goto illegal_op;
8319 case 2: /* ldaex/stlex */
8320 ARCH(8);
8321 break;
8322 case 3: /* ldrex/strex */
8323 if (op1) {
8324 ARCH(6K);
8325 } else {
8326 ARCH(6);
8327 }
8328 break;
8329 }
8330
3174f8e9 8331 addr = tcg_temp_local_new_i32();
98a46317 8332 load_reg_var(s, addr, rn);
2359bf80
MR
8333
8334 /* Since the emulation does not have barriers,
8335 the acquire/release semantics need no special
8336 handling */
8337 if (op2 == 0) {
8338 if (insn & (1 << 20)) {
8339 tmp = tcg_temp_new_i32();
8340 switch (op1) {
8341 case 0: /* lda */
6ce2faf4 8342 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
2359bf80
MR
8343 break;
8344 case 2: /* ldab */
6ce2faf4 8345 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
2359bf80
MR
8346 break;
8347 case 3: /* ldah */
6ce2faf4 8348 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
2359bf80
MR
8349 break;
8350 default:
8351 abort();
8352 }
8353 store_reg(s, rd, tmp);
8354 } else {
8355 rm = insn & 0xf;
8356 tmp = load_reg(s, rm);
8357 switch (op1) {
8358 case 0: /* stl */
6ce2faf4 8359 gen_aa32_st32(tmp, addr, get_mem_index(s));
2359bf80
MR
8360 break;
8361 case 2: /* stlb */
6ce2faf4 8362 gen_aa32_st8(tmp, addr, get_mem_index(s));
2359bf80
MR
8363 break;
8364 case 3: /* stlh */
6ce2faf4 8365 gen_aa32_st16(tmp, addr, get_mem_index(s));
2359bf80
MR
8366 break;
8367 default:
8368 abort();
8369 }
8370 tcg_temp_free_i32(tmp);
8371 }
8372 } else if (insn & (1 << 20)) {
86753403
PB
8373 switch (op1) {
8374 case 0: /* ldrex */
426f5abc 8375 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8376 break;
8377 case 1: /* ldrexd */
426f5abc 8378 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8379 break;
8380 case 2: /* ldrexb */
426f5abc 8381 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8382 break;
8383 case 3: /* ldrexh */
426f5abc 8384 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8385 break;
8386 default:
8387 abort();
8388 }
9ee6e8bb
PB
8389 } else {
8390 rm = insn & 0xf;
86753403
PB
8391 switch (op1) {
8392 case 0: /* strex */
426f5abc 8393 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8394 break;
8395 case 1: /* strexd */
502e64fe 8396 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
8397 break;
8398 case 2: /* strexb */
426f5abc 8399 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
8400 break;
8401 case 3: /* strexh */
426f5abc 8402 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
8403 break;
8404 default:
8405 abort();
8406 }
9ee6e8bb 8407 }
39d5492a 8408 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8409 } else {
8410 /* SWP instruction */
8411 rm = (insn) & 0xf;
8412
8984bd2e
PB
8413 /* ??? This is not really atomic. However we know
8414 we never have multiple CPUs running in parallel,
8415 so it is good enough. */
8416 addr = load_reg(s, rn);
8417 tmp = load_reg(s, rm);
5a839c0d 8418 tmp2 = tcg_temp_new_i32();
9ee6e8bb 8419 if (insn & (1 << 22)) {
6ce2faf4
EI
8420 gen_aa32_ld8u(tmp2, addr, get_mem_index(s));
8421 gen_aa32_st8(tmp, addr, get_mem_index(s));
9ee6e8bb 8422 } else {
6ce2faf4
EI
8423 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
8424 gen_aa32_st32(tmp, addr, get_mem_index(s));
9ee6e8bb 8425 }
5a839c0d 8426 tcg_temp_free_i32(tmp);
7d1b0095 8427 tcg_temp_free_i32(addr);
8984bd2e 8428 store_reg(s, rd, tmp2);
9ee6e8bb
PB
8429 }
8430 }
8431 } else {
8432 int address_offset;
3960c336
PM
8433 bool load = insn & (1 << 20);
8434 bool doubleword = false;
9ee6e8bb
PB
8435 /* Misc load/store */
8436 rn = (insn >> 16) & 0xf;
8437 rd = (insn >> 12) & 0xf;
3960c336
PM
8438
8439 if (!load && (sh & 2)) {
8440 /* doubleword */
8441 ARCH(5TE);
8442 if (rd & 1) {
8443 /* UNPREDICTABLE; we choose to UNDEF */
8444 goto illegal_op;
8445 }
8446 load = (sh & 1) == 0;
8447 doubleword = true;
8448 }
8449
b0109805 8450 addr = load_reg(s, rn);
9ee6e8bb 8451 if (insn & (1 << 24))
b0109805 8452 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb 8453 address_offset = 0;
3960c336
PM
8454
8455 if (doubleword) {
8456 if (!load) {
9ee6e8bb 8457 /* store */
b0109805 8458 tmp = load_reg(s, rd);
6ce2faf4 8459 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 8460 tcg_temp_free_i32(tmp);
b0109805
PB
8461 tcg_gen_addi_i32(addr, addr, 4);
8462 tmp = load_reg(s, rd + 1);
6ce2faf4 8463 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 8464 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8465 } else {
8466 /* load */
5a839c0d 8467 tmp = tcg_temp_new_i32();
6ce2faf4 8468 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805
PB
8469 store_reg(s, rd, tmp);
8470 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8471 tmp = tcg_temp_new_i32();
6ce2faf4 8472 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9ee6e8bb 8473 rd++;
9ee6e8bb
PB
8474 }
8475 address_offset = -4;
3960c336
PM
8476 } else if (load) {
8477 /* load */
8478 tmp = tcg_temp_new_i32();
8479 switch (sh) {
8480 case 1:
8481 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
8482 break;
8483 case 2:
8484 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
8485 break;
8486 default:
8487 case 3:
8488 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
8489 break;
8490 }
9ee6e8bb
PB
8491 } else {
8492 /* store */
b0109805 8493 tmp = load_reg(s, rd);
6ce2faf4 8494 gen_aa32_st16(tmp, addr, get_mem_index(s));
5a839c0d 8495 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8496 }
8497 /* Perform base writeback before the loaded value to
8498 ensure correct behavior with overlapping index registers.
8499 ldrd with base writeback is is undefined if the
8500 destination and index registers overlap. */
8501 if (!(insn & (1 << 24))) {
b0109805
PB
8502 gen_add_datah_offset(s, insn, address_offset, addr);
8503 store_reg(s, rn, addr);
9ee6e8bb
PB
8504 } else if (insn & (1 << 21)) {
8505 if (address_offset)
b0109805
PB
8506 tcg_gen_addi_i32(addr, addr, address_offset);
8507 store_reg(s, rn, addr);
8508 } else {
7d1b0095 8509 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8510 }
8511 if (load) {
8512 /* Complete the load. */
b0109805 8513 store_reg(s, rd, tmp);
9ee6e8bb
PB
8514 }
8515 }
8516 break;
8517 case 0x4:
8518 case 0x5:
8519 goto do_ldst;
8520 case 0x6:
8521 case 0x7:
8522 if (insn & (1 << 4)) {
8523 ARCH(6);
8524 /* Armv6 Media instructions. */
8525 rm = insn & 0xf;
8526 rn = (insn >> 16) & 0xf;
2c0262af 8527 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
8528 rs = (insn >> 8) & 0xf;
8529 switch ((insn >> 23) & 3) {
8530 case 0: /* Parallel add/subtract. */
8531 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
8532 tmp = load_reg(s, rn);
8533 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8534 sh = (insn >> 5) & 7;
8535 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8536 goto illegal_op;
6ddbc6e4 8537 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 8538 tcg_temp_free_i32(tmp2);
6ddbc6e4 8539 store_reg(s, rd, tmp);
9ee6e8bb
PB
8540 break;
8541 case 1:
8542 if ((insn & 0x00700020) == 0) {
6c95676b 8543 /* Halfword pack. */
3670669c
PB
8544 tmp = load_reg(s, rn);
8545 tmp2 = load_reg(s, rm);
9ee6e8bb 8546 shift = (insn >> 7) & 0x1f;
3670669c
PB
8547 if (insn & (1 << 6)) {
8548 /* pkhtb */
22478e79
AZ
8549 if (shift == 0)
8550 shift = 31;
8551 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 8552 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 8553 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
8554 } else {
8555 /* pkhbt */
22478e79
AZ
8556 if (shift)
8557 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 8558 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
8559 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8560 }
8561 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8562 tcg_temp_free_i32(tmp2);
3670669c 8563 store_reg(s, rd, tmp);
9ee6e8bb
PB
8564 } else if ((insn & 0x00200020) == 0x00200000) {
8565 /* [us]sat */
6ddbc6e4 8566 tmp = load_reg(s, rm);
9ee6e8bb
PB
8567 shift = (insn >> 7) & 0x1f;
8568 if (insn & (1 << 6)) {
8569 if (shift == 0)
8570 shift = 31;
6ddbc6e4 8571 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8572 } else {
6ddbc6e4 8573 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
8574 }
8575 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8576 tmp2 = tcg_const_i32(sh);
8577 if (insn & (1 << 22))
9ef39277 8578 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 8579 else
9ef39277 8580 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 8581 tcg_temp_free_i32(tmp2);
6ddbc6e4 8582 store_reg(s, rd, tmp);
9ee6e8bb
PB
8583 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8584 /* [us]sat16 */
6ddbc6e4 8585 tmp = load_reg(s, rm);
9ee6e8bb 8586 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8587 tmp2 = tcg_const_i32(sh);
8588 if (insn & (1 << 22))
9ef39277 8589 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8590 else
9ef39277 8591 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8592 tcg_temp_free_i32(tmp2);
6ddbc6e4 8593 store_reg(s, rd, tmp);
9ee6e8bb
PB
8594 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8595 /* Select bytes. */
6ddbc6e4
PB
8596 tmp = load_reg(s, rn);
8597 tmp2 = load_reg(s, rm);
7d1b0095 8598 tmp3 = tcg_temp_new_i32();
0ecb72a5 8599 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 8600 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8601 tcg_temp_free_i32(tmp3);
8602 tcg_temp_free_i32(tmp2);
6ddbc6e4 8603 store_reg(s, rd, tmp);
9ee6e8bb 8604 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 8605 tmp = load_reg(s, rm);
9ee6e8bb 8606 shift = (insn >> 10) & 3;
1301f322 8607 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8608 rotate, a shift is sufficient. */
8609 if (shift != 0)
f669df27 8610 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8611 op1 = (insn >> 20) & 7;
8612 switch (op1) {
5e3f878a
PB
8613 case 0: gen_sxtb16(tmp); break;
8614 case 2: gen_sxtb(tmp); break;
8615 case 3: gen_sxth(tmp); break;
8616 case 4: gen_uxtb16(tmp); break;
8617 case 6: gen_uxtb(tmp); break;
8618 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
8619 default: goto illegal_op;
8620 }
8621 if (rn != 15) {
5e3f878a 8622 tmp2 = load_reg(s, rn);
9ee6e8bb 8623 if ((op1 & 3) == 0) {
5e3f878a 8624 gen_add16(tmp, tmp2);
9ee6e8bb 8625 } else {
5e3f878a 8626 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8627 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8628 }
8629 }
6c95676b 8630 store_reg(s, rd, tmp);
9ee6e8bb
PB
8631 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
8632 /* rev */
b0109805 8633 tmp = load_reg(s, rm);
9ee6e8bb
PB
8634 if (insn & (1 << 22)) {
8635 if (insn & (1 << 7)) {
b0109805 8636 gen_revsh(tmp);
9ee6e8bb
PB
8637 } else {
8638 ARCH(6T2);
b0109805 8639 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8640 }
8641 } else {
8642 if (insn & (1 << 7))
b0109805 8643 gen_rev16(tmp);
9ee6e8bb 8644 else
66896cb8 8645 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 8646 }
b0109805 8647 store_reg(s, rd, tmp);
9ee6e8bb
PB
8648 } else {
8649 goto illegal_op;
8650 }
8651 break;
8652 case 2: /* Multiplies (Type 3). */
41e9564d
PM
8653 switch ((insn >> 20) & 0x7) {
8654 case 5:
8655 if (((insn >> 6) ^ (insn >> 7)) & 1) {
8656 /* op2 not 00x or 11x : UNDEF */
8657 goto illegal_op;
8658 }
838fa72d
AJ
8659 /* Signed multiply most significant [accumulate].
8660 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
8661 tmp = load_reg(s, rm);
8662 tmp2 = load_reg(s, rs);
a7812ae4 8663 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 8664
955a7dd5 8665 if (rd != 15) {
838fa72d 8666 tmp = load_reg(s, rd);
9ee6e8bb 8667 if (insn & (1 << 6)) {
838fa72d 8668 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 8669 } else {
838fa72d 8670 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
8671 }
8672 }
838fa72d
AJ
8673 if (insn & (1 << 5)) {
8674 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8675 }
8676 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8677 tmp = tcg_temp_new_i32();
838fa72d
AJ
8678 tcg_gen_trunc_i64_i32(tmp, tmp64);
8679 tcg_temp_free_i64(tmp64);
955a7dd5 8680 store_reg(s, rn, tmp);
41e9564d
PM
8681 break;
8682 case 0:
8683 case 4:
8684 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8685 if (insn & (1 << 7)) {
8686 goto illegal_op;
8687 }
8688 tmp = load_reg(s, rm);
8689 tmp2 = load_reg(s, rs);
9ee6e8bb 8690 if (insn & (1 << 5))
5e3f878a
PB
8691 gen_swap_half(tmp2);
8692 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8693 if (insn & (1 << 22)) {
5e3f878a 8694 /* smlald, smlsld */
33bbd75a
PC
8695 TCGv_i64 tmp64_2;
8696
a7812ae4 8697 tmp64 = tcg_temp_new_i64();
33bbd75a 8698 tmp64_2 = tcg_temp_new_i64();
a7812ae4 8699 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 8700 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 8701 tcg_temp_free_i32(tmp);
33bbd75a
PC
8702 tcg_temp_free_i32(tmp2);
8703 if (insn & (1 << 6)) {
8704 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
8705 } else {
8706 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
8707 }
8708 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
8709 gen_addq(s, tmp64, rd, rn);
8710 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 8711 tcg_temp_free_i64(tmp64);
9ee6e8bb 8712 } else {
5e3f878a 8713 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
8714 if (insn & (1 << 6)) {
8715 /* This subtraction cannot overflow. */
8716 tcg_gen_sub_i32(tmp, tmp, tmp2);
8717 } else {
8718 /* This addition cannot overflow 32 bits;
8719 * however it may overflow considered as a
8720 * signed operation, in which case we must set
8721 * the Q flag.
8722 */
8723 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8724 }
8725 tcg_temp_free_i32(tmp2);
22478e79 8726 if (rd != 15)
9ee6e8bb 8727 {
22478e79 8728 tmp2 = load_reg(s, rd);
9ef39277 8729 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8730 tcg_temp_free_i32(tmp2);
9ee6e8bb 8731 }
22478e79 8732 store_reg(s, rn, tmp);
9ee6e8bb 8733 }
41e9564d 8734 break;
b8b8ea05
PM
8735 case 1:
8736 case 3:
8737 /* SDIV, UDIV */
d614a513 8738 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
b8b8ea05
PM
8739 goto illegal_op;
8740 }
8741 if (((insn >> 5) & 7) || (rd != 15)) {
8742 goto illegal_op;
8743 }
8744 tmp = load_reg(s, rm);
8745 tmp2 = load_reg(s, rs);
8746 if (insn & (1 << 21)) {
8747 gen_helper_udiv(tmp, tmp, tmp2);
8748 } else {
8749 gen_helper_sdiv(tmp, tmp, tmp2);
8750 }
8751 tcg_temp_free_i32(tmp2);
8752 store_reg(s, rn, tmp);
8753 break;
41e9564d
PM
8754 default:
8755 goto illegal_op;
9ee6e8bb
PB
8756 }
8757 break;
8758 case 3:
8759 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
8760 switch (op1) {
8761 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
8762 ARCH(6);
8763 tmp = load_reg(s, rm);
8764 tmp2 = load_reg(s, rs);
8765 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8766 tcg_temp_free_i32(tmp2);
ded9d295
AZ
8767 if (rd != 15) {
8768 tmp2 = load_reg(s, rd);
6ddbc6e4 8769 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8770 tcg_temp_free_i32(tmp2);
9ee6e8bb 8771 }
ded9d295 8772 store_reg(s, rn, tmp);
9ee6e8bb
PB
8773 break;
8774 case 0x20: case 0x24: case 0x28: case 0x2c:
8775 /* Bitfield insert/clear. */
8776 ARCH(6T2);
8777 shift = (insn >> 7) & 0x1f;
8778 i = (insn >> 16) & 0x1f;
45140a57
KB
8779 if (i < shift) {
8780 /* UNPREDICTABLE; we choose to UNDEF */
8781 goto illegal_op;
8782 }
9ee6e8bb
PB
8783 i = i + 1 - shift;
8784 if (rm == 15) {
7d1b0095 8785 tmp = tcg_temp_new_i32();
5e3f878a 8786 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 8787 } else {
5e3f878a 8788 tmp = load_reg(s, rm);
9ee6e8bb
PB
8789 }
8790 if (i != 32) {
5e3f878a 8791 tmp2 = load_reg(s, rd);
d593c48e 8792 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 8793 tcg_temp_free_i32(tmp2);
9ee6e8bb 8794 }
5e3f878a 8795 store_reg(s, rd, tmp);
9ee6e8bb
PB
8796 break;
8797 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
8798 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 8799 ARCH(6T2);
5e3f878a 8800 tmp = load_reg(s, rm);
9ee6e8bb
PB
8801 shift = (insn >> 7) & 0x1f;
8802 i = ((insn >> 16) & 0x1f) + 1;
8803 if (shift + i > 32)
8804 goto illegal_op;
8805 if (i < 32) {
8806 if (op1 & 0x20) {
5e3f878a 8807 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 8808 } else {
5e3f878a 8809 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
8810 }
8811 }
5e3f878a 8812 store_reg(s, rd, tmp);
9ee6e8bb
PB
8813 break;
8814 default:
8815 goto illegal_op;
8816 }
8817 break;
8818 }
8819 break;
8820 }
8821 do_ldst:
8822 /* Check for undefined extension instructions
8823 * per the ARM Bible IE:
8824 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
8825 */
8826 sh = (0xf << 20) | (0xf << 4);
8827 if (op1 == 0x7 && ((insn & sh) == sh))
8828 {
8829 goto illegal_op;
8830 }
8831 /* load/store byte/word */
8832 rn = (insn >> 16) & 0xf;
8833 rd = (insn >> 12) & 0xf;
b0109805 8834 tmp2 = load_reg(s, rn);
a99caa48
PM
8835 if ((insn & 0x01200000) == 0x00200000) {
8836 /* ldrt/strt */
579d21cc 8837 i = get_a32_user_mem_index(s);
a99caa48
PM
8838 } else {
8839 i = get_mem_index(s);
8840 }
9ee6e8bb 8841 if (insn & (1 << 24))
b0109805 8842 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
8843 if (insn & (1 << 20)) {
8844 /* load */
5a839c0d 8845 tmp = tcg_temp_new_i32();
9ee6e8bb 8846 if (insn & (1 << 22)) {
08307563 8847 gen_aa32_ld8u(tmp, tmp2, i);
9ee6e8bb 8848 } else {
08307563 8849 gen_aa32_ld32u(tmp, tmp2, i);
9ee6e8bb 8850 }
9ee6e8bb
PB
8851 } else {
8852 /* store */
b0109805 8853 tmp = load_reg(s, rd);
5a839c0d 8854 if (insn & (1 << 22)) {
08307563 8855 gen_aa32_st8(tmp, tmp2, i);
5a839c0d 8856 } else {
08307563 8857 gen_aa32_st32(tmp, tmp2, i);
5a839c0d
PM
8858 }
8859 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8860 }
8861 if (!(insn & (1 << 24))) {
b0109805
PB
8862 gen_add_data_offset(s, insn, tmp2);
8863 store_reg(s, rn, tmp2);
8864 } else if (insn & (1 << 21)) {
8865 store_reg(s, rn, tmp2);
8866 } else {
7d1b0095 8867 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8868 }
8869 if (insn & (1 << 20)) {
8870 /* Complete the load. */
7dcc1f89 8871 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
8872 }
8873 break;
8874 case 0x08:
8875 case 0x09:
8876 {
da3e53dd
PM
8877 int j, n, loaded_base;
8878 bool exc_return = false;
8879 bool is_load = extract32(insn, 20, 1);
8880 bool user = false;
39d5492a 8881 TCGv_i32 loaded_var;
9ee6e8bb
PB
8882 /* load/store multiple words */
8883 /* XXX: store correct base if write back */
9ee6e8bb 8884 if (insn & (1 << 22)) {
da3e53dd 8885 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
8886 if (IS_USER(s))
8887 goto illegal_op; /* only usable in supervisor mode */
8888
da3e53dd
PM
8889 if (is_load && extract32(insn, 15, 1)) {
8890 exc_return = true;
8891 } else {
8892 user = true;
8893 }
9ee6e8bb
PB
8894 }
8895 rn = (insn >> 16) & 0xf;
b0109805 8896 addr = load_reg(s, rn);
9ee6e8bb
PB
8897
8898 /* compute total size */
8899 loaded_base = 0;
39d5492a 8900 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
8901 n = 0;
8902 for(i=0;i<16;i++) {
8903 if (insn & (1 << i))
8904 n++;
8905 }
8906 /* XXX: test invalid n == 0 case ? */
8907 if (insn & (1 << 23)) {
8908 if (insn & (1 << 24)) {
8909 /* pre increment */
b0109805 8910 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8911 } else {
8912 /* post increment */
8913 }
8914 } else {
8915 if (insn & (1 << 24)) {
8916 /* pre decrement */
b0109805 8917 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
8918 } else {
8919 /* post decrement */
8920 if (n != 1)
b0109805 8921 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
8922 }
8923 }
8924 j = 0;
8925 for(i=0;i<16;i++) {
8926 if (insn & (1 << i)) {
da3e53dd 8927 if (is_load) {
9ee6e8bb 8928 /* load */
5a839c0d 8929 tmp = tcg_temp_new_i32();
6ce2faf4 8930 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
be5e7a76 8931 if (user) {
b75263d6 8932 tmp2 = tcg_const_i32(i);
1ce94f81 8933 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 8934 tcg_temp_free_i32(tmp2);
7d1b0095 8935 tcg_temp_free_i32(tmp);
9ee6e8bb 8936 } else if (i == rn) {
b0109805 8937 loaded_var = tmp;
9ee6e8bb
PB
8938 loaded_base = 1;
8939 } else {
7dcc1f89 8940 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
8941 }
8942 } else {
8943 /* store */
8944 if (i == 15) {
8945 /* special case: r15 = PC + 8 */
8946 val = (long)s->pc + 4;
7d1b0095 8947 tmp = tcg_temp_new_i32();
b0109805 8948 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 8949 } else if (user) {
7d1b0095 8950 tmp = tcg_temp_new_i32();
b75263d6 8951 tmp2 = tcg_const_i32(i);
9ef39277 8952 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 8953 tcg_temp_free_i32(tmp2);
9ee6e8bb 8954 } else {
b0109805 8955 tmp = load_reg(s, i);
9ee6e8bb 8956 }
6ce2faf4 8957 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 8958 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8959 }
8960 j++;
8961 /* no need to add after the last transfer */
8962 if (j != n)
b0109805 8963 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8964 }
8965 }
8966 if (insn & (1 << 21)) {
8967 /* write back */
8968 if (insn & (1 << 23)) {
8969 if (insn & (1 << 24)) {
8970 /* pre increment */
8971 } else {
8972 /* post increment */
b0109805 8973 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8974 }
8975 } else {
8976 if (insn & (1 << 24)) {
8977 /* pre decrement */
8978 if (n != 1)
b0109805 8979 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
8980 } else {
8981 /* post decrement */
b0109805 8982 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
8983 }
8984 }
b0109805
PB
8985 store_reg(s, rn, addr);
8986 } else {
7d1b0095 8987 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8988 }
8989 if (loaded_base) {
b0109805 8990 store_reg(s, rn, loaded_var);
9ee6e8bb 8991 }
da3e53dd 8992 if (exc_return) {
9ee6e8bb 8993 /* Restore CPSR from SPSR. */
d9ba4830 8994 tmp = load_cpu_field(spsr);
4051e12c 8995 gen_set_cpsr(tmp, CPSR_ERET_MASK);
7d1b0095 8996 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8997 s->is_jmp = DISAS_UPDATE;
8998 }
8999 }
9000 break;
9001 case 0xa:
9002 case 0xb:
9003 {
9004 int32_t offset;
9005
9006 /* branch (and link) */
9007 val = (int32_t)s->pc;
9008 if (insn & (1 << 24)) {
7d1b0095 9009 tmp = tcg_temp_new_i32();
5e3f878a
PB
9010 tcg_gen_movi_i32(tmp, val);
9011 store_reg(s, 14, tmp);
9ee6e8bb 9012 }
534df156
PM
9013 offset = sextract32(insn << 2, 0, 26);
9014 val += offset + 4;
9ee6e8bb
PB
9015 gen_jmp(s, val);
9016 }
9017 break;
9018 case 0xc:
9019 case 0xd:
9020 case 0xe:
6a57f3eb
WN
9021 if (((insn >> 8) & 0xe) == 10) {
9022 /* VFP. */
7dcc1f89 9023 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9024 goto illegal_op;
9025 }
7dcc1f89 9026 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 9027 /* Coprocessor. */
9ee6e8bb 9028 goto illegal_op;
6a57f3eb 9029 }
9ee6e8bb
PB
9030 break;
9031 case 0xf:
9032 /* swi */
eaed129d 9033 gen_set_pc_im(s, s->pc);
d4a2dc67 9034 s->svc_imm = extract32(insn, 0, 24);
9ee6e8bb
PB
9035 s->is_jmp = DISAS_SWI;
9036 break;
9037 default:
9038 illegal_op:
73710361
GB
9039 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9040 default_exception_el(s));
9ee6e8bb
PB
9041 break;
9042 }
9043 }
9044}
9045
9046/* Return true if this is a Thumb-2 logical op. */
9047static int
9048thumb2_logic_op(int op)
9049{
9050 return (op < 8);
9051}
9052
9053/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9054 then set condition code flags based on the result of the operation.
9055 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9056 to the high bit of T1.
9057 Returns zero if the opcode is valid. */
9058
9059static int
39d5492a
PM
9060gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9061 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
9062{
9063 int logic_cc;
9064
9065 logic_cc = 0;
9066 switch (op) {
9067 case 0: /* and */
396e467c 9068 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
9069 logic_cc = conds;
9070 break;
9071 case 1: /* bic */
f669df27 9072 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
9073 logic_cc = conds;
9074 break;
9075 case 2: /* orr */
396e467c 9076 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
9077 logic_cc = conds;
9078 break;
9079 case 3: /* orn */
29501f1b 9080 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
9081 logic_cc = conds;
9082 break;
9083 case 4: /* eor */
396e467c 9084 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
9085 logic_cc = conds;
9086 break;
9087 case 8: /* add */
9088 if (conds)
72485ec4 9089 gen_add_CC(t0, t0, t1);
9ee6e8bb 9090 else
396e467c 9091 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
9092 break;
9093 case 10: /* adc */
9094 if (conds)
49b4c31e 9095 gen_adc_CC(t0, t0, t1);
9ee6e8bb 9096 else
396e467c 9097 gen_adc(t0, t1);
9ee6e8bb
PB
9098 break;
9099 case 11: /* sbc */
2de68a49
RH
9100 if (conds) {
9101 gen_sbc_CC(t0, t0, t1);
9102 } else {
396e467c 9103 gen_sub_carry(t0, t0, t1);
2de68a49 9104 }
9ee6e8bb
PB
9105 break;
9106 case 13: /* sub */
9107 if (conds)
72485ec4 9108 gen_sub_CC(t0, t0, t1);
9ee6e8bb 9109 else
396e467c 9110 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
9111 break;
9112 case 14: /* rsb */
9113 if (conds)
72485ec4 9114 gen_sub_CC(t0, t1, t0);
9ee6e8bb 9115 else
396e467c 9116 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
9117 break;
9118 default: /* 5, 6, 7, 9, 12, 15. */
9119 return 1;
9120 }
9121 if (logic_cc) {
396e467c 9122 gen_logic_CC(t0);
9ee6e8bb 9123 if (shifter_out)
396e467c 9124 gen_set_CF_bit31(t1);
9ee6e8bb
PB
9125 }
9126 return 0;
9127}
9128
9129/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9130 is not legal. */
0ecb72a5 9131static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 9132{
b0109805 9133 uint32_t insn, imm, shift, offset;
9ee6e8bb 9134 uint32_t rd, rn, rm, rs;
39d5492a
PM
9135 TCGv_i32 tmp;
9136 TCGv_i32 tmp2;
9137 TCGv_i32 tmp3;
9138 TCGv_i32 addr;
a7812ae4 9139 TCGv_i64 tmp64;
9ee6e8bb
PB
9140 int op;
9141 int shiftop;
9142 int conds;
9143 int logic_cc;
9144
d614a513
PM
9145 if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
9146 || arm_dc_feature(s, ARM_FEATURE_M))) {
601d70b9 9147 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
9148 16-bit instructions to get correct prefetch abort behavior. */
9149 insn = insn_hw1;
9150 if ((insn & (1 << 12)) == 0) {
be5e7a76 9151 ARCH(5);
9ee6e8bb
PB
9152 /* Second half of blx. */
9153 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
9154 tmp = load_reg(s, 14);
9155 tcg_gen_addi_i32(tmp, tmp, offset);
9156 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 9157
7d1b0095 9158 tmp2 = tcg_temp_new_i32();
b0109805 9159 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9160 store_reg(s, 14, tmp2);
9161 gen_bx(s, tmp);
9ee6e8bb
PB
9162 return 0;
9163 }
9164 if (insn & (1 << 11)) {
9165 /* Second half of bl. */
9166 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 9167 tmp = load_reg(s, 14);
6a0d8a1d 9168 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 9169
7d1b0095 9170 tmp2 = tcg_temp_new_i32();
b0109805 9171 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9172 store_reg(s, 14, tmp2);
9173 gen_bx(s, tmp);
9ee6e8bb
PB
9174 return 0;
9175 }
9176 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9177 /* Instruction spans a page boundary. Implement it as two
9178 16-bit instructions in case the second half causes an
9179 prefetch abort. */
9180 offset = ((int32_t)insn << 21) >> 9;
396e467c 9181 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
9182 return 0;
9183 }
9184 /* Fall through to 32-bit decode. */
9185 }
9186
d31dd73e 9187 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
9188 s->pc += 2;
9189 insn |= (uint32_t)insn_hw1 << 16;
9190
9191 if ((insn & 0xf800e800) != 0xf000e800) {
9192 ARCH(6T2);
9193 }
9194
9195 rn = (insn >> 16) & 0xf;
9196 rs = (insn >> 12) & 0xf;
9197 rd = (insn >> 8) & 0xf;
9198 rm = insn & 0xf;
9199 switch ((insn >> 25) & 0xf) {
9200 case 0: case 1: case 2: case 3:
9201 /* 16-bit instructions. Should never happen. */
9202 abort();
9203 case 4:
9204 if (insn & (1 << 22)) {
9205 /* Other load/store, table branch. */
9206 if (insn & 0x01200000) {
9207 /* Load/store doubleword. */
9208 if (rn == 15) {
7d1b0095 9209 addr = tcg_temp_new_i32();
b0109805 9210 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 9211 } else {
b0109805 9212 addr = load_reg(s, rn);
9ee6e8bb
PB
9213 }
9214 offset = (insn & 0xff) * 4;
9215 if ((insn & (1 << 23)) == 0)
9216 offset = -offset;
9217 if (insn & (1 << 24)) {
b0109805 9218 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
9219 offset = 0;
9220 }
9221 if (insn & (1 << 20)) {
9222 /* ldrd */
e2592fad 9223 tmp = tcg_temp_new_i32();
6ce2faf4 9224 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805
PB
9225 store_reg(s, rs, tmp);
9226 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9227 tmp = tcg_temp_new_i32();
6ce2faf4 9228 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 9229 store_reg(s, rd, tmp);
9ee6e8bb
PB
9230 } else {
9231 /* strd */
b0109805 9232 tmp = load_reg(s, rs);
6ce2faf4 9233 gen_aa32_st32(tmp, addr, get_mem_index(s));
e2592fad 9234 tcg_temp_free_i32(tmp);
b0109805
PB
9235 tcg_gen_addi_i32(addr, addr, 4);
9236 tmp = load_reg(s, rd);
6ce2faf4 9237 gen_aa32_st32(tmp, addr, get_mem_index(s));
e2592fad 9238 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9239 }
9240 if (insn & (1 << 21)) {
9241 /* Base writeback. */
9242 if (rn == 15)
9243 goto illegal_op;
b0109805
PB
9244 tcg_gen_addi_i32(addr, addr, offset - 4);
9245 store_reg(s, rn, addr);
9246 } else {
7d1b0095 9247 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9248 }
9249 } else if ((insn & (1 << 23)) == 0) {
9250 /* Load/store exclusive word. */
39d5492a 9251 addr = tcg_temp_local_new_i32();
98a46317 9252 load_reg_var(s, addr, rn);
426f5abc 9253 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 9254 if (insn & (1 << 20)) {
426f5abc 9255 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 9256 } else {
426f5abc 9257 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 9258 }
39d5492a 9259 tcg_temp_free_i32(addr);
2359bf80 9260 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
9261 /* Table Branch. */
9262 if (rn == 15) {
7d1b0095 9263 addr = tcg_temp_new_i32();
b0109805 9264 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 9265 } else {
b0109805 9266 addr = load_reg(s, rn);
9ee6e8bb 9267 }
b26eefb6 9268 tmp = load_reg(s, rm);
b0109805 9269 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
9270 if (insn & (1 << 4)) {
9271 /* tbh */
b0109805 9272 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9273 tcg_temp_free_i32(tmp);
e2592fad 9274 tmp = tcg_temp_new_i32();
6ce2faf4 9275 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9ee6e8bb 9276 } else { /* tbb */
7d1b0095 9277 tcg_temp_free_i32(tmp);
e2592fad 9278 tmp = tcg_temp_new_i32();
6ce2faf4 9279 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9ee6e8bb 9280 }
7d1b0095 9281 tcg_temp_free_i32(addr);
b0109805
PB
9282 tcg_gen_shli_i32(tmp, tmp, 1);
9283 tcg_gen_addi_i32(tmp, tmp, s->pc);
9284 store_reg(s, 15, tmp);
9ee6e8bb 9285 } else {
2359bf80 9286 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 9287 op = (insn >> 4) & 0x3;
2359bf80
MR
9288 switch (op2) {
9289 case 0:
426f5abc 9290 goto illegal_op;
2359bf80
MR
9291 case 1:
9292 /* Load/store exclusive byte/halfword/doubleword */
9293 if (op == 2) {
9294 goto illegal_op;
9295 }
9296 ARCH(7);
9297 break;
9298 case 2:
9299 /* Load-acquire/store-release */
9300 if (op == 3) {
9301 goto illegal_op;
9302 }
9303 /* Fall through */
9304 case 3:
9305 /* Load-acquire/store-release exclusive */
9306 ARCH(8);
9307 break;
426f5abc 9308 }
39d5492a 9309 addr = tcg_temp_local_new_i32();
98a46317 9310 load_reg_var(s, addr, rn);
2359bf80
MR
9311 if (!(op2 & 1)) {
9312 if (insn & (1 << 20)) {
9313 tmp = tcg_temp_new_i32();
9314 switch (op) {
9315 case 0: /* ldab */
6ce2faf4 9316 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
2359bf80
MR
9317 break;
9318 case 1: /* ldah */
6ce2faf4 9319 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
2359bf80
MR
9320 break;
9321 case 2: /* lda */
6ce2faf4 9322 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
2359bf80
MR
9323 break;
9324 default:
9325 abort();
9326 }
9327 store_reg(s, rs, tmp);
9328 } else {
9329 tmp = load_reg(s, rs);
9330 switch (op) {
9331 case 0: /* stlb */
6ce2faf4 9332 gen_aa32_st8(tmp, addr, get_mem_index(s));
2359bf80
MR
9333 break;
9334 case 1: /* stlh */
6ce2faf4 9335 gen_aa32_st16(tmp, addr, get_mem_index(s));
2359bf80
MR
9336 break;
9337 case 2: /* stl */
6ce2faf4 9338 gen_aa32_st32(tmp, addr, get_mem_index(s));
2359bf80
MR
9339 break;
9340 default:
9341 abort();
9342 }
9343 tcg_temp_free_i32(tmp);
9344 }
9345 } else if (insn & (1 << 20)) {
426f5abc 9346 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 9347 } else {
426f5abc 9348 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 9349 }
39d5492a 9350 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9351 }
9352 } else {
9353 /* Load/store multiple, RFE, SRS. */
9354 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 9355 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 9356 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 9357 goto illegal_op;
00115976 9358 }
9ee6e8bb
PB
9359 if (insn & (1 << 20)) {
9360 /* rfe */
b0109805
PB
9361 addr = load_reg(s, rn);
9362 if ((insn & (1 << 24)) == 0)
9363 tcg_gen_addi_i32(addr, addr, -8);
9364 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 9365 tmp = tcg_temp_new_i32();
6ce2faf4 9366 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 9367 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9368 tmp2 = tcg_temp_new_i32();
6ce2faf4 9369 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9370 if (insn & (1 << 21)) {
9371 /* Base writeback. */
b0109805
PB
9372 if (insn & (1 << 24)) {
9373 tcg_gen_addi_i32(addr, addr, 4);
9374 } else {
9375 tcg_gen_addi_i32(addr, addr, -4);
9376 }
9377 store_reg(s, rn, addr);
9378 } else {
7d1b0095 9379 tcg_temp_free_i32(addr);
9ee6e8bb 9380 }
b0109805 9381 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
9382 } else {
9383 /* srs */
81465888
PM
9384 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9385 insn & (1 << 21));
9ee6e8bb
PB
9386 }
9387 } else {
5856d44e 9388 int i, loaded_base = 0;
39d5492a 9389 TCGv_i32 loaded_var;
9ee6e8bb 9390 /* Load/store multiple. */
b0109805 9391 addr = load_reg(s, rn);
9ee6e8bb
PB
9392 offset = 0;
9393 for (i = 0; i < 16; i++) {
9394 if (insn & (1 << i))
9395 offset += 4;
9396 }
9397 if (insn & (1 << 24)) {
b0109805 9398 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9399 }
9400
39d5492a 9401 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9402 for (i = 0; i < 16; i++) {
9403 if ((insn & (1 << i)) == 0)
9404 continue;
9405 if (insn & (1 << 20)) {
9406 /* Load. */
e2592fad 9407 tmp = tcg_temp_new_i32();
6ce2faf4 9408 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9ee6e8bb 9409 if (i == 15) {
b0109805 9410 gen_bx(s, tmp);
5856d44e
YO
9411 } else if (i == rn) {
9412 loaded_var = tmp;
9413 loaded_base = 1;
9ee6e8bb 9414 } else {
b0109805 9415 store_reg(s, i, tmp);
9ee6e8bb
PB
9416 }
9417 } else {
9418 /* Store. */
b0109805 9419 tmp = load_reg(s, i);
6ce2faf4 9420 gen_aa32_st32(tmp, addr, get_mem_index(s));
e2592fad 9421 tcg_temp_free_i32(tmp);
9ee6e8bb 9422 }
b0109805 9423 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 9424 }
5856d44e
YO
9425 if (loaded_base) {
9426 store_reg(s, rn, loaded_var);
9427 }
9ee6e8bb
PB
9428 if (insn & (1 << 21)) {
9429 /* Base register writeback. */
9430 if (insn & (1 << 24)) {
b0109805 9431 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9432 }
9433 /* Fault if writeback register is in register list. */
9434 if (insn & (1 << rn))
9435 goto illegal_op;
b0109805
PB
9436 store_reg(s, rn, addr);
9437 } else {
7d1b0095 9438 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9439 }
9440 }
9441 }
9442 break;
2af9ab77
JB
9443 case 5:
9444
9ee6e8bb 9445 op = (insn >> 21) & 0xf;
2af9ab77
JB
9446 if (op == 6) {
9447 /* Halfword pack. */
9448 tmp = load_reg(s, rn);
9449 tmp2 = load_reg(s, rm);
9450 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9451 if (insn & (1 << 5)) {
9452 /* pkhtb */
9453 if (shift == 0)
9454 shift = 31;
9455 tcg_gen_sari_i32(tmp2, tmp2, shift);
9456 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9457 tcg_gen_ext16u_i32(tmp2, tmp2);
9458 } else {
9459 /* pkhbt */
9460 if (shift)
9461 tcg_gen_shli_i32(tmp2, tmp2, shift);
9462 tcg_gen_ext16u_i32(tmp, tmp);
9463 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9464 }
9465 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9466 tcg_temp_free_i32(tmp2);
3174f8e9
FN
9467 store_reg(s, rd, tmp);
9468 } else {
2af9ab77
JB
9469 /* Data processing register constant shift. */
9470 if (rn == 15) {
7d1b0095 9471 tmp = tcg_temp_new_i32();
2af9ab77
JB
9472 tcg_gen_movi_i32(tmp, 0);
9473 } else {
9474 tmp = load_reg(s, rn);
9475 }
9476 tmp2 = load_reg(s, rm);
9477
9478 shiftop = (insn >> 4) & 3;
9479 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9480 conds = (insn & (1 << 20)) != 0;
9481 logic_cc = (conds && thumb2_logic_op(op));
9482 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9483 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9484 goto illegal_op;
7d1b0095 9485 tcg_temp_free_i32(tmp2);
2af9ab77
JB
9486 if (rd != 15) {
9487 store_reg(s, rd, tmp);
9488 } else {
7d1b0095 9489 tcg_temp_free_i32(tmp);
2af9ab77 9490 }
3174f8e9 9491 }
9ee6e8bb
PB
9492 break;
9493 case 13: /* Misc data processing. */
9494 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9495 if (op < 4 && (insn & 0xf000) != 0xf000)
9496 goto illegal_op;
9497 switch (op) {
9498 case 0: /* Register controlled shift. */
8984bd2e
PB
9499 tmp = load_reg(s, rn);
9500 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9501 if ((insn & 0x70) != 0)
9502 goto illegal_op;
9503 op = (insn >> 21) & 3;
8984bd2e
PB
9504 logic_cc = (insn & (1 << 20)) != 0;
9505 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9506 if (logic_cc)
9507 gen_logic_CC(tmp);
7dcc1f89 9508 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9509 break;
9510 case 1: /* Sign/zero extend. */
5e3f878a 9511 tmp = load_reg(s, rm);
9ee6e8bb 9512 shift = (insn >> 4) & 3;
1301f322 9513 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9514 rotate, a shift is sufficient. */
9515 if (shift != 0)
f669df27 9516 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9517 op = (insn >> 20) & 7;
9518 switch (op) {
5e3f878a
PB
9519 case 0: gen_sxth(tmp); break;
9520 case 1: gen_uxth(tmp); break;
9521 case 2: gen_sxtb16(tmp); break;
9522 case 3: gen_uxtb16(tmp); break;
9523 case 4: gen_sxtb(tmp); break;
9524 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
9525 default: goto illegal_op;
9526 }
9527 if (rn != 15) {
5e3f878a 9528 tmp2 = load_reg(s, rn);
9ee6e8bb 9529 if ((op >> 1) == 1) {
5e3f878a 9530 gen_add16(tmp, tmp2);
9ee6e8bb 9531 } else {
5e3f878a 9532 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9533 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9534 }
9535 }
5e3f878a 9536 store_reg(s, rd, tmp);
9ee6e8bb
PB
9537 break;
9538 case 2: /* SIMD add/subtract. */
9539 op = (insn >> 20) & 7;
9540 shift = (insn >> 4) & 7;
9541 if ((op & 3) == 3 || (shift & 3) == 3)
9542 goto illegal_op;
6ddbc6e4
PB
9543 tmp = load_reg(s, rn);
9544 tmp2 = load_reg(s, rm);
9545 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 9546 tcg_temp_free_i32(tmp2);
6ddbc6e4 9547 store_reg(s, rd, tmp);
9ee6e8bb
PB
9548 break;
9549 case 3: /* Other data processing. */
9550 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9551 if (op < 4) {
9552 /* Saturating add/subtract. */
d9ba4830
PB
9553 tmp = load_reg(s, rn);
9554 tmp2 = load_reg(s, rm);
9ee6e8bb 9555 if (op & 1)
9ef39277 9556 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 9557 if (op & 2)
9ef39277 9558 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 9559 else
9ef39277 9560 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 9561 tcg_temp_free_i32(tmp2);
9ee6e8bb 9562 } else {
d9ba4830 9563 tmp = load_reg(s, rn);
9ee6e8bb
PB
9564 switch (op) {
9565 case 0x0a: /* rbit */
d9ba4830 9566 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9567 break;
9568 case 0x08: /* rev */
66896cb8 9569 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
9570 break;
9571 case 0x09: /* rev16 */
d9ba4830 9572 gen_rev16(tmp);
9ee6e8bb
PB
9573 break;
9574 case 0x0b: /* revsh */
d9ba4830 9575 gen_revsh(tmp);
9ee6e8bb
PB
9576 break;
9577 case 0x10: /* sel */
d9ba4830 9578 tmp2 = load_reg(s, rm);
7d1b0095 9579 tmp3 = tcg_temp_new_i32();
0ecb72a5 9580 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 9581 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9582 tcg_temp_free_i32(tmp3);
9583 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9584 break;
9585 case 0x18: /* clz */
d9ba4830 9586 gen_helper_clz(tmp, tmp);
9ee6e8bb 9587 break;
eb0ecd5a
WN
9588 case 0x20:
9589 case 0x21:
9590 case 0x22:
9591 case 0x28:
9592 case 0x29:
9593 case 0x2a:
9594 {
9595 /* crc32/crc32c */
9596 uint32_t sz = op & 0x3;
9597 uint32_t c = op & 0x8;
9598
d614a513 9599 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
eb0ecd5a
WN
9600 goto illegal_op;
9601 }
9602
9603 tmp2 = load_reg(s, rm);
aa633469
PM
9604 if (sz == 0) {
9605 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
9606 } else if (sz == 1) {
9607 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
9608 }
eb0ecd5a
WN
9609 tmp3 = tcg_const_i32(1 << sz);
9610 if (c) {
9611 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
9612 } else {
9613 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
9614 }
9615 tcg_temp_free_i32(tmp2);
9616 tcg_temp_free_i32(tmp3);
9617 break;
9618 }
9ee6e8bb
PB
9619 default:
9620 goto illegal_op;
9621 }
9622 }
d9ba4830 9623 store_reg(s, rd, tmp);
9ee6e8bb
PB
9624 break;
9625 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
9626 op = (insn >> 4) & 0xf;
d9ba4830
PB
9627 tmp = load_reg(s, rn);
9628 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9629 switch ((insn >> 20) & 7) {
9630 case 0: /* 32 x 32 -> 32 */
d9ba4830 9631 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 9632 tcg_temp_free_i32(tmp2);
9ee6e8bb 9633 if (rs != 15) {
d9ba4830 9634 tmp2 = load_reg(s, rs);
9ee6e8bb 9635 if (op)
d9ba4830 9636 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 9637 else
d9ba4830 9638 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9639 tcg_temp_free_i32(tmp2);
9ee6e8bb 9640 }
9ee6e8bb
PB
9641 break;
9642 case 1: /* 16 x 16 -> 32 */
d9ba4830 9643 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 9644 tcg_temp_free_i32(tmp2);
9ee6e8bb 9645 if (rs != 15) {
d9ba4830 9646 tmp2 = load_reg(s, rs);
9ef39277 9647 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9648 tcg_temp_free_i32(tmp2);
9ee6e8bb 9649 }
9ee6e8bb
PB
9650 break;
9651 case 2: /* Dual multiply add. */
9652 case 4: /* Dual multiply subtract. */
9653 if (op)
d9ba4830
PB
9654 gen_swap_half(tmp2);
9655 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9656 if (insn & (1 << 22)) {
e1d177b9 9657 /* This subtraction cannot overflow. */
d9ba4830 9658 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9659 } else {
e1d177b9
PM
9660 /* This addition cannot overflow 32 bits;
9661 * however it may overflow considered as a signed
9662 * operation, in which case we must set the Q flag.
9663 */
9ef39277 9664 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9665 }
7d1b0095 9666 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9667 if (rs != 15)
9668 {
d9ba4830 9669 tmp2 = load_reg(s, rs);
9ef39277 9670 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9671 tcg_temp_free_i32(tmp2);
9ee6e8bb 9672 }
9ee6e8bb
PB
9673 break;
9674 case 3: /* 32 * 16 -> 32msb */
9675 if (op)
d9ba4830 9676 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 9677 else
d9ba4830 9678 gen_sxth(tmp2);
a7812ae4
PB
9679 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9680 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 9681 tmp = tcg_temp_new_i32();
a7812ae4 9682 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 9683 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9684 if (rs != 15)
9685 {
d9ba4830 9686 tmp2 = load_reg(s, rs);
9ef39277 9687 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9688 tcg_temp_free_i32(tmp2);
9ee6e8bb 9689 }
9ee6e8bb 9690 break;
838fa72d
AJ
9691 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9692 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 9693 if (rs != 15) {
838fa72d
AJ
9694 tmp = load_reg(s, rs);
9695 if (insn & (1 << 20)) {
9696 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 9697 } else {
838fa72d 9698 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 9699 }
2c0262af 9700 }
838fa72d
AJ
9701 if (insn & (1 << 4)) {
9702 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9703 }
9704 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9705 tmp = tcg_temp_new_i32();
838fa72d
AJ
9706 tcg_gen_trunc_i64_i32(tmp, tmp64);
9707 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9708 break;
9709 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 9710 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9711 tcg_temp_free_i32(tmp2);
9ee6e8bb 9712 if (rs != 15) {
d9ba4830
PB
9713 tmp2 = load_reg(s, rs);
9714 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9715 tcg_temp_free_i32(tmp2);
5fd46862 9716 }
9ee6e8bb 9717 break;
2c0262af 9718 }
d9ba4830 9719 store_reg(s, rd, tmp);
2c0262af 9720 break;
9ee6e8bb
PB
9721 case 6: case 7: /* 64-bit multiply, Divide. */
9722 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
9723 tmp = load_reg(s, rn);
9724 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9725 if ((op & 0x50) == 0x10) {
9726 /* sdiv, udiv */
d614a513 9727 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 9728 goto illegal_op;
47789990 9729 }
9ee6e8bb 9730 if (op & 0x20)
5e3f878a 9731 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 9732 else
5e3f878a 9733 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 9734 tcg_temp_free_i32(tmp2);
5e3f878a 9735 store_reg(s, rd, tmp);
9ee6e8bb
PB
9736 } else if ((op & 0xe) == 0xc) {
9737 /* Dual multiply accumulate long. */
9738 if (op & 1)
5e3f878a
PB
9739 gen_swap_half(tmp2);
9740 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9741 if (op & 0x10) {
5e3f878a 9742 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 9743 } else {
5e3f878a 9744 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 9745 }
7d1b0095 9746 tcg_temp_free_i32(tmp2);
a7812ae4
PB
9747 /* BUGFIX */
9748 tmp64 = tcg_temp_new_i64();
9749 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9750 tcg_temp_free_i32(tmp);
a7812ae4
PB
9751 gen_addq(s, tmp64, rs, rd);
9752 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 9753 tcg_temp_free_i64(tmp64);
2c0262af 9754 } else {
9ee6e8bb
PB
9755 if (op & 0x20) {
9756 /* Unsigned 64-bit multiply */
a7812ae4 9757 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 9758 } else {
9ee6e8bb
PB
9759 if (op & 8) {
9760 /* smlalxy */
5e3f878a 9761 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 9762 tcg_temp_free_i32(tmp2);
a7812ae4
PB
9763 tmp64 = tcg_temp_new_i64();
9764 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9765 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9766 } else {
9767 /* Signed 64-bit multiply */
a7812ae4 9768 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 9769 }
b5ff1b31 9770 }
9ee6e8bb
PB
9771 if (op & 4) {
9772 /* umaal */
a7812ae4
PB
9773 gen_addq_lo(s, tmp64, rs);
9774 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
9775 } else if (op & 0x40) {
9776 /* 64-bit accumulate. */
a7812ae4 9777 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 9778 }
a7812ae4 9779 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 9780 tcg_temp_free_i64(tmp64);
5fd46862 9781 }
2c0262af 9782 break;
9ee6e8bb
PB
9783 }
9784 break;
9785 case 6: case 7: case 14: case 15:
9786 /* Coprocessor. */
9787 if (((insn >> 24) & 3) == 3) {
9788 /* Translate into the equivalent ARM encoding. */
f06053e3 9789 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 9790 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 9791 goto illegal_op;
7dcc1f89 9792 }
6a57f3eb 9793 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 9794 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9795 goto illegal_op;
9796 }
9ee6e8bb
PB
9797 } else {
9798 if (insn & (1 << 28))
9799 goto illegal_op;
7dcc1f89 9800 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 9801 goto illegal_op;
7dcc1f89 9802 }
9ee6e8bb
PB
9803 }
9804 break;
9805 case 8: case 9: case 10: case 11:
9806 if (insn & (1 << 15)) {
9807 /* Branches, misc control. */
9808 if (insn & 0x5000) {
9809 /* Unconditional branch. */
9810 /* signextend(hw1[10:0]) -> offset[:12]. */
9811 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
9812 /* hw1[10:0] -> offset[11:1]. */
9813 offset |= (insn & 0x7ff) << 1;
9814 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
9815 offset[24:22] already have the same value because of the
9816 sign extension above. */
9817 offset ^= ((~insn) & (1 << 13)) << 10;
9818 offset ^= ((~insn) & (1 << 11)) << 11;
9819
9ee6e8bb
PB
9820 if (insn & (1 << 14)) {
9821 /* Branch and link. */
3174f8e9 9822 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 9823 }
3b46e624 9824
b0109805 9825 offset += s->pc;
9ee6e8bb
PB
9826 if (insn & (1 << 12)) {
9827 /* b/bl */
b0109805 9828 gen_jmp(s, offset);
9ee6e8bb
PB
9829 } else {
9830 /* blx */
b0109805 9831 offset &= ~(uint32_t)2;
be5e7a76 9832 /* thumb2 bx, no need to check */
b0109805 9833 gen_bx_im(s, offset);
2c0262af 9834 }
9ee6e8bb
PB
9835 } else if (((insn >> 23) & 7) == 7) {
9836 /* Misc control */
9837 if (insn & (1 << 13))
9838 goto illegal_op;
9839
9840 if (insn & (1 << 26)) {
37e6456e
PM
9841 if (!(insn & (1 << 20))) {
9842 /* Hypervisor call (v7) */
9843 int imm16 = extract32(insn, 16, 4) << 12
9844 | extract32(insn, 0, 12);
9845 ARCH(7);
9846 if (IS_USER(s)) {
9847 goto illegal_op;
9848 }
9849 gen_hvc(s, imm16);
9850 } else {
9851 /* Secure monitor call (v6+) */
9852 ARCH(6K);
9853 if (IS_USER(s)) {
9854 goto illegal_op;
9855 }
9856 gen_smc(s);
9857 }
2c0262af 9858 } else {
9ee6e8bb
PB
9859 op = (insn >> 20) & 7;
9860 switch (op) {
9861 case 0: /* msr cpsr. */
b53d8923 9862 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
9863 tmp = load_reg(s, rn);
9864 addr = tcg_const_i32(insn & 0xff);
9865 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9866 tcg_temp_free_i32(addr);
7d1b0095 9867 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9868 gen_lookup_tb(s);
9869 break;
9870 }
9871 /* fall through */
9872 case 1: /* msr spsr. */
b53d8923 9873 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 9874 goto illegal_op;
b53d8923 9875 }
2fbac54b
FN
9876 tmp = load_reg(s, rn);
9877 if (gen_set_psr(s,
7dcc1f89 9878 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 9879 op == 1, tmp))
9ee6e8bb
PB
9880 goto illegal_op;
9881 break;
9882 case 2: /* cps, nop-hint. */
9883 if (((insn >> 8) & 7) == 0) {
9884 gen_nop_hint(s, insn & 0xff);
9885 }
9886 /* Implemented as NOP in user mode. */
9887 if (IS_USER(s))
9888 break;
9889 offset = 0;
9890 imm = 0;
9891 if (insn & (1 << 10)) {
9892 if (insn & (1 << 7))
9893 offset |= CPSR_A;
9894 if (insn & (1 << 6))
9895 offset |= CPSR_I;
9896 if (insn & (1 << 5))
9897 offset |= CPSR_F;
9898 if (insn & (1 << 9))
9899 imm = CPSR_A | CPSR_I | CPSR_F;
9900 }
9901 if (insn & (1 << 8)) {
9902 offset |= 0x1f;
9903 imm |= (insn & 0x1f);
9904 }
9905 if (offset) {
2fbac54b 9906 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
9907 }
9908 break;
9909 case 3: /* Special control operations. */
426f5abc 9910 ARCH(7);
9ee6e8bb
PB
9911 op = (insn >> 4) & 0xf;
9912 switch (op) {
9913 case 2: /* clrex */
426f5abc 9914 gen_clrex(s);
9ee6e8bb
PB
9915 break;
9916 case 4: /* dsb */
9917 case 5: /* dmb */
9918 case 6: /* isb */
9919 /* These execute as NOPs. */
9ee6e8bb
PB
9920 break;
9921 default:
9922 goto illegal_op;
9923 }
9924 break;
9925 case 4: /* bxj */
9926 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
9927 tmp = load_reg(s, rn);
9928 gen_bx(s, tmp);
9ee6e8bb
PB
9929 break;
9930 case 5: /* Exception return. */
b8b45b68
RV
9931 if (IS_USER(s)) {
9932 goto illegal_op;
9933 }
9934 if (rn != 14 || rd != 15) {
9935 goto illegal_op;
9936 }
9937 tmp = load_reg(s, rn);
9938 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
9939 gen_exception_return(s, tmp);
9940 break;
9ee6e8bb 9941 case 6: /* mrs cpsr. */
7d1b0095 9942 tmp = tcg_temp_new_i32();
b53d8923 9943 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
9944 addr = tcg_const_i32(insn & 0xff);
9945 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 9946 tcg_temp_free_i32(addr);
9ee6e8bb 9947 } else {
9ef39277 9948 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 9949 }
8984bd2e 9950 store_reg(s, rd, tmp);
9ee6e8bb
PB
9951 break;
9952 case 7: /* mrs spsr. */
9953 /* Not accessible in user mode. */
b53d8923 9954 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 9955 goto illegal_op;
b53d8923 9956 }
d9ba4830
PB
9957 tmp = load_cpu_field(spsr);
9958 store_reg(s, rd, tmp);
9ee6e8bb 9959 break;
2c0262af
FB
9960 }
9961 }
9ee6e8bb
PB
9962 } else {
9963 /* Conditional branch. */
9964 op = (insn >> 22) & 0xf;
9965 /* Generate a conditional jump to next instruction. */
9966 s->condlabel = gen_new_label();
39fb730a 9967 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
9968 s->condjmp = 1;
9969
9970 /* offset[11:1] = insn[10:0] */
9971 offset = (insn & 0x7ff) << 1;
9972 /* offset[17:12] = insn[21:16]. */
9973 offset |= (insn & 0x003f0000) >> 4;
9974 /* offset[31:20] = insn[26]. */
9975 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
9976 /* offset[18] = insn[13]. */
9977 offset |= (insn & (1 << 13)) << 5;
9978 /* offset[19] = insn[11]. */
9979 offset |= (insn & (1 << 11)) << 8;
9980
9981 /* jump to the offset */
b0109805 9982 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
9983 }
9984 } else {
9985 /* Data processing immediate. */
9986 if (insn & (1 << 25)) {
9987 if (insn & (1 << 24)) {
9988 if (insn & (1 << 20))
9989 goto illegal_op;
9990 /* Bitfield/Saturate. */
9991 op = (insn >> 21) & 7;
9992 imm = insn & 0x1f;
9993 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 9994 if (rn == 15) {
7d1b0095 9995 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
9996 tcg_gen_movi_i32(tmp, 0);
9997 } else {
9998 tmp = load_reg(s, rn);
9999 }
9ee6e8bb
PB
10000 switch (op) {
10001 case 2: /* Signed bitfield extract. */
10002 imm++;
10003 if (shift + imm > 32)
10004 goto illegal_op;
10005 if (imm < 32)
6ddbc6e4 10006 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
10007 break;
10008 case 6: /* Unsigned bitfield extract. */
10009 imm++;
10010 if (shift + imm > 32)
10011 goto illegal_op;
10012 if (imm < 32)
6ddbc6e4 10013 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
10014 break;
10015 case 3: /* Bitfield insert/clear. */
10016 if (imm < shift)
10017 goto illegal_op;
10018 imm = imm + 1 - shift;
10019 if (imm != 32) {
6ddbc6e4 10020 tmp2 = load_reg(s, rd);
d593c48e 10021 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 10022 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10023 }
10024 break;
10025 case 7:
10026 goto illegal_op;
10027 default: /* Saturate. */
9ee6e8bb
PB
10028 if (shift) {
10029 if (op & 1)
6ddbc6e4 10030 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 10031 else
6ddbc6e4 10032 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 10033 }
6ddbc6e4 10034 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
10035 if (op & 4) {
10036 /* Unsigned. */
9ee6e8bb 10037 if ((op & 1) && shift == 0)
9ef39277 10038 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10039 else
9ef39277 10040 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
2c0262af 10041 } else {
9ee6e8bb 10042 /* Signed. */
9ee6e8bb 10043 if ((op & 1) && shift == 0)
9ef39277 10044 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10045 else
9ef39277 10046 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
2c0262af 10047 }
b75263d6 10048 tcg_temp_free_i32(tmp2);
9ee6e8bb 10049 break;
2c0262af 10050 }
6ddbc6e4 10051 store_reg(s, rd, tmp);
9ee6e8bb
PB
10052 } else {
10053 imm = ((insn & 0x04000000) >> 15)
10054 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10055 if (insn & (1 << 22)) {
10056 /* 16-bit immediate. */
10057 imm |= (insn >> 4) & 0xf000;
10058 if (insn & (1 << 23)) {
10059 /* movt */
5e3f878a 10060 tmp = load_reg(s, rd);
86831435 10061 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 10062 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 10063 } else {
9ee6e8bb 10064 /* movw */
7d1b0095 10065 tmp = tcg_temp_new_i32();
5e3f878a 10066 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
10067 }
10068 } else {
9ee6e8bb
PB
10069 /* Add/sub 12-bit immediate. */
10070 if (rn == 15) {
b0109805 10071 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 10072 if (insn & (1 << 23))
b0109805 10073 offset -= imm;
9ee6e8bb 10074 else
b0109805 10075 offset += imm;
7d1b0095 10076 tmp = tcg_temp_new_i32();
5e3f878a 10077 tcg_gen_movi_i32(tmp, offset);
2c0262af 10078 } else {
5e3f878a 10079 tmp = load_reg(s, rn);
9ee6e8bb 10080 if (insn & (1 << 23))
5e3f878a 10081 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 10082 else
5e3f878a 10083 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 10084 }
9ee6e8bb 10085 }
5e3f878a 10086 store_reg(s, rd, tmp);
191abaa2 10087 }
9ee6e8bb
PB
10088 } else {
10089 int shifter_out = 0;
10090 /* modified 12-bit immediate. */
10091 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10092 imm = (insn & 0xff);
10093 switch (shift) {
10094 case 0: /* XY */
10095 /* Nothing to do. */
10096 break;
10097 case 1: /* 00XY00XY */
10098 imm |= imm << 16;
10099 break;
10100 case 2: /* XY00XY00 */
10101 imm |= imm << 16;
10102 imm <<= 8;
10103 break;
10104 case 3: /* XYXYXYXY */
10105 imm |= imm << 16;
10106 imm |= imm << 8;
10107 break;
10108 default: /* Rotated constant. */
10109 shift = (shift << 1) | (imm >> 7);
10110 imm |= 0x80;
10111 imm = imm << (32 - shift);
10112 shifter_out = 1;
10113 break;
b5ff1b31 10114 }
7d1b0095 10115 tmp2 = tcg_temp_new_i32();
3174f8e9 10116 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 10117 rn = (insn >> 16) & 0xf;
3174f8e9 10118 if (rn == 15) {
7d1b0095 10119 tmp = tcg_temp_new_i32();
3174f8e9
FN
10120 tcg_gen_movi_i32(tmp, 0);
10121 } else {
10122 tmp = load_reg(s, rn);
10123 }
9ee6e8bb
PB
10124 op = (insn >> 21) & 0xf;
10125 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 10126 shifter_out, tmp, tmp2))
9ee6e8bb 10127 goto illegal_op;
7d1b0095 10128 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10129 rd = (insn >> 8) & 0xf;
10130 if (rd != 15) {
3174f8e9
FN
10131 store_reg(s, rd, tmp);
10132 } else {
7d1b0095 10133 tcg_temp_free_i32(tmp);
2c0262af 10134 }
2c0262af 10135 }
9ee6e8bb
PB
10136 }
10137 break;
10138 case 12: /* Load/store single data item. */
10139 {
10140 int postinc = 0;
10141 int writeback = 0;
a99caa48 10142 int memidx;
9ee6e8bb 10143 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 10144 if (disas_neon_ls_insn(s, insn)) {
c1713132 10145 goto illegal_op;
7dcc1f89 10146 }
9ee6e8bb
PB
10147 break;
10148 }
a2fdc890
PM
10149 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10150 if (rs == 15) {
10151 if (!(insn & (1 << 20))) {
10152 goto illegal_op;
10153 }
10154 if (op != 2) {
10155 /* Byte or halfword load space with dest == r15 : memory hints.
10156 * Catch them early so we don't emit pointless addressing code.
10157 * This space is a mix of:
10158 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10159 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10160 * cores)
10161 * unallocated hints, which must be treated as NOPs
10162 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10163 * which is easiest for the decoding logic
10164 * Some space which must UNDEF
10165 */
10166 int op1 = (insn >> 23) & 3;
10167 int op2 = (insn >> 6) & 0x3f;
10168 if (op & 2) {
10169 goto illegal_op;
10170 }
10171 if (rn == 15) {
02afbf64
PM
10172 /* UNPREDICTABLE, unallocated hint or
10173 * PLD/PLDW/PLI (literal)
10174 */
a2fdc890
PM
10175 return 0;
10176 }
10177 if (op1 & 1) {
02afbf64 10178 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10179 }
10180 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 10181 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10182 }
10183 /* UNDEF space, or an UNPREDICTABLE */
10184 return 1;
10185 }
10186 }
a99caa48 10187 memidx = get_mem_index(s);
9ee6e8bb 10188 if (rn == 15) {
7d1b0095 10189 addr = tcg_temp_new_i32();
9ee6e8bb
PB
10190 /* PC relative. */
10191 /* s->pc has already been incremented by 4. */
10192 imm = s->pc & 0xfffffffc;
10193 if (insn & (1 << 23))
10194 imm += insn & 0xfff;
10195 else
10196 imm -= insn & 0xfff;
b0109805 10197 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 10198 } else {
b0109805 10199 addr = load_reg(s, rn);
9ee6e8bb
PB
10200 if (insn & (1 << 23)) {
10201 /* Positive offset. */
10202 imm = insn & 0xfff;
b0109805 10203 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 10204 } else {
9ee6e8bb 10205 imm = insn & 0xff;
2a0308c5
PM
10206 switch ((insn >> 8) & 0xf) {
10207 case 0x0: /* Shifted Register. */
9ee6e8bb 10208 shift = (insn >> 4) & 0xf;
2a0308c5
PM
10209 if (shift > 3) {
10210 tcg_temp_free_i32(addr);
18c9b560 10211 goto illegal_op;
2a0308c5 10212 }
b26eefb6 10213 tmp = load_reg(s, rm);
9ee6e8bb 10214 if (shift)
b26eefb6 10215 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 10216 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10217 tcg_temp_free_i32(tmp);
9ee6e8bb 10218 break;
2a0308c5 10219 case 0xc: /* Negative offset. */
b0109805 10220 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 10221 break;
2a0308c5 10222 case 0xe: /* User privilege. */
b0109805 10223 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 10224 memidx = get_a32_user_mem_index(s);
9ee6e8bb 10225 break;
2a0308c5 10226 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
10227 imm = -imm;
10228 /* Fall through. */
2a0308c5 10229 case 0xb: /* Post-increment. */
9ee6e8bb
PB
10230 postinc = 1;
10231 writeback = 1;
10232 break;
2a0308c5 10233 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
10234 imm = -imm;
10235 /* Fall through. */
2a0308c5 10236 case 0xf: /* Pre-increment. */
b0109805 10237 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
10238 writeback = 1;
10239 break;
10240 default:
2a0308c5 10241 tcg_temp_free_i32(addr);
b7bcbe95 10242 goto illegal_op;
9ee6e8bb
PB
10243 }
10244 }
10245 }
9ee6e8bb
PB
10246 if (insn & (1 << 20)) {
10247 /* Load. */
5a839c0d 10248 tmp = tcg_temp_new_i32();
a2fdc890 10249 switch (op) {
5a839c0d 10250 case 0:
a99caa48 10251 gen_aa32_ld8u(tmp, addr, memidx);
5a839c0d
PM
10252 break;
10253 case 4:
a99caa48 10254 gen_aa32_ld8s(tmp, addr, memidx);
5a839c0d
PM
10255 break;
10256 case 1:
a99caa48 10257 gen_aa32_ld16u(tmp, addr, memidx);
5a839c0d
PM
10258 break;
10259 case 5:
a99caa48 10260 gen_aa32_ld16s(tmp, addr, memidx);
5a839c0d
PM
10261 break;
10262 case 2:
a99caa48 10263 gen_aa32_ld32u(tmp, addr, memidx);
5a839c0d 10264 break;
2a0308c5 10265 default:
5a839c0d 10266 tcg_temp_free_i32(tmp);
2a0308c5
PM
10267 tcg_temp_free_i32(addr);
10268 goto illegal_op;
a2fdc890
PM
10269 }
10270 if (rs == 15) {
10271 gen_bx(s, tmp);
9ee6e8bb 10272 } else {
a2fdc890 10273 store_reg(s, rs, tmp);
9ee6e8bb
PB
10274 }
10275 } else {
10276 /* Store. */
b0109805 10277 tmp = load_reg(s, rs);
9ee6e8bb 10278 switch (op) {
5a839c0d 10279 case 0:
a99caa48 10280 gen_aa32_st8(tmp, addr, memidx);
5a839c0d
PM
10281 break;
10282 case 1:
a99caa48 10283 gen_aa32_st16(tmp, addr, memidx);
5a839c0d
PM
10284 break;
10285 case 2:
a99caa48 10286 gen_aa32_st32(tmp, addr, memidx);
5a839c0d 10287 break;
2a0308c5 10288 default:
5a839c0d 10289 tcg_temp_free_i32(tmp);
2a0308c5
PM
10290 tcg_temp_free_i32(addr);
10291 goto illegal_op;
b7bcbe95 10292 }
5a839c0d 10293 tcg_temp_free_i32(tmp);
2c0262af 10294 }
9ee6e8bb 10295 if (postinc)
b0109805
PB
10296 tcg_gen_addi_i32(addr, addr, imm);
10297 if (writeback) {
10298 store_reg(s, rn, addr);
10299 } else {
7d1b0095 10300 tcg_temp_free_i32(addr);
b0109805 10301 }
9ee6e8bb
PB
10302 }
10303 break;
10304 default:
10305 goto illegal_op;
2c0262af 10306 }
9ee6e8bb
PB
10307 return 0;
10308illegal_op:
10309 return 1;
2c0262af
FB
10310}
10311
0ecb72a5 10312static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
10313{
10314 uint32_t val, insn, op, rm, rn, rd, shift, cond;
10315 int32_t offset;
10316 int i;
39d5492a
PM
10317 TCGv_i32 tmp;
10318 TCGv_i32 tmp2;
10319 TCGv_i32 addr;
99c475ab 10320
9ee6e8bb
PB
10321 if (s->condexec_mask) {
10322 cond = s->condexec_cond;
bedd2912
JB
10323 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
10324 s->condlabel = gen_new_label();
39fb730a 10325 arm_gen_test_cc(cond ^ 1, s->condlabel);
bedd2912
JB
10326 s->condjmp = 1;
10327 }
9ee6e8bb
PB
10328 }
10329
d31dd73e 10330 insn = arm_lduw_code(env, s->pc, s->bswap_code);
99c475ab 10331 s->pc += 2;
b5ff1b31 10332
99c475ab
FB
10333 switch (insn >> 12) {
10334 case 0: case 1:
396e467c 10335
99c475ab
FB
10336 rd = insn & 7;
10337 op = (insn >> 11) & 3;
10338 if (op == 3) {
10339 /* add/subtract */
10340 rn = (insn >> 3) & 7;
396e467c 10341 tmp = load_reg(s, rn);
99c475ab
FB
10342 if (insn & (1 << 10)) {
10343 /* immediate */
7d1b0095 10344 tmp2 = tcg_temp_new_i32();
396e467c 10345 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
10346 } else {
10347 /* reg */
10348 rm = (insn >> 6) & 7;
396e467c 10349 tmp2 = load_reg(s, rm);
99c475ab 10350 }
9ee6e8bb
PB
10351 if (insn & (1 << 9)) {
10352 if (s->condexec_mask)
396e467c 10353 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10354 else
72485ec4 10355 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
10356 } else {
10357 if (s->condexec_mask)
396e467c 10358 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 10359 else
72485ec4 10360 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 10361 }
7d1b0095 10362 tcg_temp_free_i32(tmp2);
396e467c 10363 store_reg(s, rd, tmp);
99c475ab
FB
10364 } else {
10365 /* shift immediate */
10366 rm = (insn >> 3) & 7;
10367 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
10368 tmp = load_reg(s, rm);
10369 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10370 if (!s->condexec_mask)
10371 gen_logic_CC(tmp);
10372 store_reg(s, rd, tmp);
99c475ab
FB
10373 }
10374 break;
10375 case 2: case 3:
10376 /* arithmetic large immediate */
10377 op = (insn >> 11) & 3;
10378 rd = (insn >> 8) & 0x7;
396e467c 10379 if (op == 0) { /* mov */
7d1b0095 10380 tmp = tcg_temp_new_i32();
396e467c 10381 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 10382 if (!s->condexec_mask)
396e467c
FN
10383 gen_logic_CC(tmp);
10384 store_reg(s, rd, tmp);
10385 } else {
10386 tmp = load_reg(s, rd);
7d1b0095 10387 tmp2 = tcg_temp_new_i32();
396e467c
FN
10388 tcg_gen_movi_i32(tmp2, insn & 0xff);
10389 switch (op) {
10390 case 1: /* cmp */
72485ec4 10391 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10392 tcg_temp_free_i32(tmp);
10393 tcg_temp_free_i32(tmp2);
396e467c
FN
10394 break;
10395 case 2: /* add */
10396 if (s->condexec_mask)
10397 tcg_gen_add_i32(tmp, tmp, tmp2);
10398 else
72485ec4 10399 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 10400 tcg_temp_free_i32(tmp2);
396e467c
FN
10401 store_reg(s, rd, tmp);
10402 break;
10403 case 3: /* sub */
10404 if (s->condexec_mask)
10405 tcg_gen_sub_i32(tmp, tmp, tmp2);
10406 else
72485ec4 10407 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 10408 tcg_temp_free_i32(tmp2);
396e467c
FN
10409 store_reg(s, rd, tmp);
10410 break;
10411 }
99c475ab 10412 }
99c475ab
FB
10413 break;
10414 case 4:
10415 if (insn & (1 << 11)) {
10416 rd = (insn >> 8) & 7;
5899f386
FB
10417 /* load pc-relative. Bit 1 of PC is ignored. */
10418 val = s->pc + 2 + ((insn & 0xff) * 4);
10419 val &= ~(uint32_t)2;
7d1b0095 10420 addr = tcg_temp_new_i32();
b0109805 10421 tcg_gen_movi_i32(addr, val);
c40c8556 10422 tmp = tcg_temp_new_i32();
6ce2faf4 10423 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
7d1b0095 10424 tcg_temp_free_i32(addr);
b0109805 10425 store_reg(s, rd, tmp);
99c475ab
FB
10426 break;
10427 }
10428 if (insn & (1 << 10)) {
10429 /* data processing extended or blx */
10430 rd = (insn & 7) | ((insn >> 4) & 8);
10431 rm = (insn >> 3) & 0xf;
10432 op = (insn >> 8) & 3;
10433 switch (op) {
10434 case 0: /* add */
396e467c
FN
10435 tmp = load_reg(s, rd);
10436 tmp2 = load_reg(s, rm);
10437 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10438 tcg_temp_free_i32(tmp2);
396e467c 10439 store_reg(s, rd, tmp);
99c475ab
FB
10440 break;
10441 case 1: /* cmp */
396e467c
FN
10442 tmp = load_reg(s, rd);
10443 tmp2 = load_reg(s, rm);
72485ec4 10444 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10445 tcg_temp_free_i32(tmp2);
10446 tcg_temp_free_i32(tmp);
99c475ab
FB
10447 break;
10448 case 2: /* mov/cpy */
396e467c
FN
10449 tmp = load_reg(s, rm);
10450 store_reg(s, rd, tmp);
99c475ab
FB
10451 break;
10452 case 3:/* branch [and link] exchange thumb register */
b0109805 10453 tmp = load_reg(s, rm);
99c475ab 10454 if (insn & (1 << 7)) {
be5e7a76 10455 ARCH(5);
99c475ab 10456 val = (uint32_t)s->pc | 1;
7d1b0095 10457 tmp2 = tcg_temp_new_i32();
b0109805
PB
10458 tcg_gen_movi_i32(tmp2, val);
10459 store_reg(s, 14, tmp2);
99c475ab 10460 }
be5e7a76 10461 /* already thumb, no need to check */
d9ba4830 10462 gen_bx(s, tmp);
99c475ab
FB
10463 break;
10464 }
10465 break;
10466 }
10467
10468 /* data processing register */
10469 rd = insn & 7;
10470 rm = (insn >> 3) & 7;
10471 op = (insn >> 6) & 0xf;
10472 if (op == 2 || op == 3 || op == 4 || op == 7) {
10473 /* the shift/rotate ops want the operands backwards */
10474 val = rm;
10475 rm = rd;
10476 rd = val;
10477 val = 1;
10478 } else {
10479 val = 0;
10480 }
10481
396e467c 10482 if (op == 9) { /* neg */
7d1b0095 10483 tmp = tcg_temp_new_i32();
396e467c
FN
10484 tcg_gen_movi_i32(tmp, 0);
10485 } else if (op != 0xf) { /* mvn doesn't read its first operand */
10486 tmp = load_reg(s, rd);
10487 } else {
39d5492a 10488 TCGV_UNUSED_I32(tmp);
396e467c 10489 }
99c475ab 10490
396e467c 10491 tmp2 = load_reg(s, rm);
5899f386 10492 switch (op) {
99c475ab 10493 case 0x0: /* and */
396e467c 10494 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 10495 if (!s->condexec_mask)
396e467c 10496 gen_logic_CC(tmp);
99c475ab
FB
10497 break;
10498 case 0x1: /* eor */
396e467c 10499 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 10500 if (!s->condexec_mask)
396e467c 10501 gen_logic_CC(tmp);
99c475ab
FB
10502 break;
10503 case 0x2: /* lsl */
9ee6e8bb 10504 if (s->condexec_mask) {
365af80e 10505 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 10506 } else {
9ef39277 10507 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10508 gen_logic_CC(tmp2);
9ee6e8bb 10509 }
99c475ab
FB
10510 break;
10511 case 0x3: /* lsr */
9ee6e8bb 10512 if (s->condexec_mask) {
365af80e 10513 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 10514 } else {
9ef39277 10515 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10516 gen_logic_CC(tmp2);
9ee6e8bb 10517 }
99c475ab
FB
10518 break;
10519 case 0x4: /* asr */
9ee6e8bb 10520 if (s->condexec_mask) {
365af80e 10521 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 10522 } else {
9ef39277 10523 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10524 gen_logic_CC(tmp2);
9ee6e8bb 10525 }
99c475ab
FB
10526 break;
10527 case 0x5: /* adc */
49b4c31e 10528 if (s->condexec_mask) {
396e467c 10529 gen_adc(tmp, tmp2);
49b4c31e
RH
10530 } else {
10531 gen_adc_CC(tmp, tmp, tmp2);
10532 }
99c475ab
FB
10533 break;
10534 case 0x6: /* sbc */
2de68a49 10535 if (s->condexec_mask) {
396e467c 10536 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
10537 } else {
10538 gen_sbc_CC(tmp, tmp, tmp2);
10539 }
99c475ab
FB
10540 break;
10541 case 0x7: /* ror */
9ee6e8bb 10542 if (s->condexec_mask) {
f669df27
AJ
10543 tcg_gen_andi_i32(tmp, tmp, 0x1f);
10544 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 10545 } else {
9ef39277 10546 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10547 gen_logic_CC(tmp2);
9ee6e8bb 10548 }
99c475ab
FB
10549 break;
10550 case 0x8: /* tst */
396e467c
FN
10551 tcg_gen_and_i32(tmp, tmp, tmp2);
10552 gen_logic_CC(tmp);
99c475ab 10553 rd = 16;
5899f386 10554 break;
99c475ab 10555 case 0x9: /* neg */
9ee6e8bb 10556 if (s->condexec_mask)
396e467c 10557 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 10558 else
72485ec4 10559 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
10560 break;
10561 case 0xa: /* cmp */
72485ec4 10562 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
10563 rd = 16;
10564 break;
10565 case 0xb: /* cmn */
72485ec4 10566 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
10567 rd = 16;
10568 break;
10569 case 0xc: /* orr */
396e467c 10570 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 10571 if (!s->condexec_mask)
396e467c 10572 gen_logic_CC(tmp);
99c475ab
FB
10573 break;
10574 case 0xd: /* mul */
7b2919a0 10575 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 10576 if (!s->condexec_mask)
396e467c 10577 gen_logic_CC(tmp);
99c475ab
FB
10578 break;
10579 case 0xe: /* bic */
f669df27 10580 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 10581 if (!s->condexec_mask)
396e467c 10582 gen_logic_CC(tmp);
99c475ab
FB
10583 break;
10584 case 0xf: /* mvn */
396e467c 10585 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 10586 if (!s->condexec_mask)
396e467c 10587 gen_logic_CC(tmp2);
99c475ab 10588 val = 1;
5899f386 10589 rm = rd;
99c475ab
FB
10590 break;
10591 }
10592 if (rd != 16) {
396e467c
FN
10593 if (val) {
10594 store_reg(s, rm, tmp2);
10595 if (op != 0xf)
7d1b0095 10596 tcg_temp_free_i32(tmp);
396e467c
FN
10597 } else {
10598 store_reg(s, rd, tmp);
7d1b0095 10599 tcg_temp_free_i32(tmp2);
396e467c
FN
10600 }
10601 } else {
7d1b0095
PM
10602 tcg_temp_free_i32(tmp);
10603 tcg_temp_free_i32(tmp2);
99c475ab
FB
10604 }
10605 break;
10606
10607 case 5:
10608 /* load/store register offset. */
10609 rd = insn & 7;
10610 rn = (insn >> 3) & 7;
10611 rm = (insn >> 6) & 7;
10612 op = (insn >> 9) & 7;
b0109805 10613 addr = load_reg(s, rn);
b26eefb6 10614 tmp = load_reg(s, rm);
b0109805 10615 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10616 tcg_temp_free_i32(tmp);
99c475ab 10617
c40c8556 10618 if (op < 3) { /* store */
b0109805 10619 tmp = load_reg(s, rd);
c40c8556
PM
10620 } else {
10621 tmp = tcg_temp_new_i32();
10622 }
99c475ab
FB
10623
10624 switch (op) {
10625 case 0: /* str */
6ce2faf4 10626 gen_aa32_st32(tmp, addr, get_mem_index(s));
99c475ab
FB
10627 break;
10628 case 1: /* strh */
6ce2faf4 10629 gen_aa32_st16(tmp, addr, get_mem_index(s));
99c475ab
FB
10630 break;
10631 case 2: /* strb */
6ce2faf4 10632 gen_aa32_st8(tmp, addr, get_mem_index(s));
99c475ab
FB
10633 break;
10634 case 3: /* ldrsb */
6ce2faf4 10635 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
99c475ab
FB
10636 break;
10637 case 4: /* ldr */
6ce2faf4 10638 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
99c475ab
FB
10639 break;
10640 case 5: /* ldrh */
6ce2faf4 10641 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
99c475ab
FB
10642 break;
10643 case 6: /* ldrb */
6ce2faf4 10644 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
99c475ab
FB
10645 break;
10646 case 7: /* ldrsh */
6ce2faf4 10647 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
99c475ab
FB
10648 break;
10649 }
c40c8556 10650 if (op >= 3) { /* load */
b0109805 10651 store_reg(s, rd, tmp);
c40c8556
PM
10652 } else {
10653 tcg_temp_free_i32(tmp);
10654 }
7d1b0095 10655 tcg_temp_free_i32(addr);
99c475ab
FB
10656 break;
10657
10658 case 6:
10659 /* load/store word immediate offset */
10660 rd = insn & 7;
10661 rn = (insn >> 3) & 7;
b0109805 10662 addr = load_reg(s, rn);
99c475ab 10663 val = (insn >> 4) & 0x7c;
b0109805 10664 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10665
10666 if (insn & (1 << 11)) {
10667 /* load */
c40c8556 10668 tmp = tcg_temp_new_i32();
6ce2faf4 10669 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 10670 store_reg(s, rd, tmp);
99c475ab
FB
10671 } else {
10672 /* store */
b0109805 10673 tmp = load_reg(s, rd);
6ce2faf4 10674 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10675 tcg_temp_free_i32(tmp);
99c475ab 10676 }
7d1b0095 10677 tcg_temp_free_i32(addr);
99c475ab
FB
10678 break;
10679
10680 case 7:
10681 /* load/store byte immediate offset */
10682 rd = insn & 7;
10683 rn = (insn >> 3) & 7;
b0109805 10684 addr = load_reg(s, rn);
99c475ab 10685 val = (insn >> 6) & 0x1f;
b0109805 10686 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10687
10688 if (insn & (1 << 11)) {
10689 /* load */
c40c8556 10690 tmp = tcg_temp_new_i32();
6ce2faf4 10691 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
b0109805 10692 store_reg(s, rd, tmp);
99c475ab
FB
10693 } else {
10694 /* store */
b0109805 10695 tmp = load_reg(s, rd);
6ce2faf4 10696 gen_aa32_st8(tmp, addr, get_mem_index(s));
c40c8556 10697 tcg_temp_free_i32(tmp);
99c475ab 10698 }
7d1b0095 10699 tcg_temp_free_i32(addr);
99c475ab
FB
10700 break;
10701
10702 case 8:
10703 /* load/store halfword immediate offset */
10704 rd = insn & 7;
10705 rn = (insn >> 3) & 7;
b0109805 10706 addr = load_reg(s, rn);
99c475ab 10707 val = (insn >> 5) & 0x3e;
b0109805 10708 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10709
10710 if (insn & (1 << 11)) {
10711 /* load */
c40c8556 10712 tmp = tcg_temp_new_i32();
6ce2faf4 10713 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
b0109805 10714 store_reg(s, rd, tmp);
99c475ab
FB
10715 } else {
10716 /* store */
b0109805 10717 tmp = load_reg(s, rd);
6ce2faf4 10718 gen_aa32_st16(tmp, addr, get_mem_index(s));
c40c8556 10719 tcg_temp_free_i32(tmp);
99c475ab 10720 }
7d1b0095 10721 tcg_temp_free_i32(addr);
99c475ab
FB
10722 break;
10723
10724 case 9:
10725 /* load/store from stack */
10726 rd = (insn >> 8) & 7;
b0109805 10727 addr = load_reg(s, 13);
99c475ab 10728 val = (insn & 0xff) * 4;
b0109805 10729 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10730
10731 if (insn & (1 << 11)) {
10732 /* load */
c40c8556 10733 tmp = tcg_temp_new_i32();
6ce2faf4 10734 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 10735 store_reg(s, rd, tmp);
99c475ab
FB
10736 } else {
10737 /* store */
b0109805 10738 tmp = load_reg(s, rd);
6ce2faf4 10739 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10740 tcg_temp_free_i32(tmp);
99c475ab 10741 }
7d1b0095 10742 tcg_temp_free_i32(addr);
99c475ab
FB
10743 break;
10744
10745 case 10:
10746 /* add to high reg */
10747 rd = (insn >> 8) & 7;
5899f386
FB
10748 if (insn & (1 << 11)) {
10749 /* SP */
5e3f878a 10750 tmp = load_reg(s, 13);
5899f386
FB
10751 } else {
10752 /* PC. bit 1 is ignored. */
7d1b0095 10753 tmp = tcg_temp_new_i32();
5e3f878a 10754 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 10755 }
99c475ab 10756 val = (insn & 0xff) * 4;
5e3f878a
PB
10757 tcg_gen_addi_i32(tmp, tmp, val);
10758 store_reg(s, rd, tmp);
99c475ab
FB
10759 break;
10760
10761 case 11:
10762 /* misc */
10763 op = (insn >> 8) & 0xf;
10764 switch (op) {
10765 case 0:
10766 /* adjust stack pointer */
b26eefb6 10767 tmp = load_reg(s, 13);
99c475ab
FB
10768 val = (insn & 0x7f) * 4;
10769 if (insn & (1 << 7))
6a0d8a1d 10770 val = -(int32_t)val;
b26eefb6
PB
10771 tcg_gen_addi_i32(tmp, tmp, val);
10772 store_reg(s, 13, tmp);
99c475ab
FB
10773 break;
10774
9ee6e8bb
PB
10775 case 2: /* sign/zero extend. */
10776 ARCH(6);
10777 rd = insn & 7;
10778 rm = (insn >> 3) & 7;
b0109805 10779 tmp = load_reg(s, rm);
9ee6e8bb 10780 switch ((insn >> 6) & 3) {
b0109805
PB
10781 case 0: gen_sxth(tmp); break;
10782 case 1: gen_sxtb(tmp); break;
10783 case 2: gen_uxth(tmp); break;
10784 case 3: gen_uxtb(tmp); break;
9ee6e8bb 10785 }
b0109805 10786 store_reg(s, rd, tmp);
9ee6e8bb 10787 break;
99c475ab
FB
10788 case 4: case 5: case 0xc: case 0xd:
10789 /* push/pop */
b0109805 10790 addr = load_reg(s, 13);
5899f386
FB
10791 if (insn & (1 << 8))
10792 offset = 4;
99c475ab 10793 else
5899f386
FB
10794 offset = 0;
10795 for (i = 0; i < 8; i++) {
10796 if (insn & (1 << i))
10797 offset += 4;
10798 }
10799 if ((insn & (1 << 11)) == 0) {
b0109805 10800 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 10801 }
99c475ab
FB
10802 for (i = 0; i < 8; i++) {
10803 if (insn & (1 << i)) {
10804 if (insn & (1 << 11)) {
10805 /* pop */
c40c8556 10806 tmp = tcg_temp_new_i32();
6ce2faf4 10807 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 10808 store_reg(s, i, tmp);
99c475ab
FB
10809 } else {
10810 /* push */
b0109805 10811 tmp = load_reg(s, i);
6ce2faf4 10812 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10813 tcg_temp_free_i32(tmp);
99c475ab 10814 }
5899f386 10815 /* advance to the next address. */
b0109805 10816 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
10817 }
10818 }
39d5492a 10819 TCGV_UNUSED_I32(tmp);
99c475ab
FB
10820 if (insn & (1 << 8)) {
10821 if (insn & (1 << 11)) {
10822 /* pop pc */
c40c8556 10823 tmp = tcg_temp_new_i32();
6ce2faf4 10824 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
99c475ab
FB
10825 /* don't set the pc until the rest of the instruction
10826 has completed */
10827 } else {
10828 /* push lr */
b0109805 10829 tmp = load_reg(s, 14);
6ce2faf4 10830 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10831 tcg_temp_free_i32(tmp);
99c475ab 10832 }
b0109805 10833 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 10834 }
5899f386 10835 if ((insn & (1 << 11)) == 0) {
b0109805 10836 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 10837 }
99c475ab 10838 /* write back the new stack pointer */
b0109805 10839 store_reg(s, 13, addr);
99c475ab 10840 /* set the new PC value */
be5e7a76 10841 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 10842 store_reg_from_load(s, 15, tmp);
be5e7a76 10843 }
99c475ab
FB
10844 break;
10845
9ee6e8bb
PB
10846 case 1: case 3: case 9: case 11: /* czb */
10847 rm = insn & 7;
d9ba4830 10848 tmp = load_reg(s, rm);
9ee6e8bb
PB
10849 s->condlabel = gen_new_label();
10850 s->condjmp = 1;
10851 if (insn & (1 << 11))
cb63669a 10852 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 10853 else
cb63669a 10854 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 10855 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10856 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
10857 val = (uint32_t)s->pc + 2;
10858 val += offset;
10859 gen_jmp(s, val);
10860 break;
10861
10862 case 15: /* IT, nop-hint. */
10863 if ((insn & 0xf) == 0) {
10864 gen_nop_hint(s, (insn >> 4) & 0xf);
10865 break;
10866 }
10867 /* If Then. */
10868 s->condexec_cond = (insn >> 4) & 0xe;
10869 s->condexec_mask = insn & 0x1f;
10870 /* No actual code generated for this insn, just setup state. */
10871 break;
10872
06c949e6 10873 case 0xe: /* bkpt */
d4a2dc67
PM
10874 {
10875 int imm8 = extract32(insn, 0, 8);
be5e7a76 10876 ARCH(5);
73710361
GB
10877 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
10878 default_exception_el(s));
06c949e6 10879 break;
d4a2dc67 10880 }
06c949e6 10881
9ee6e8bb
PB
10882 case 0xa: /* rev */
10883 ARCH(6);
10884 rn = (insn >> 3) & 0x7;
10885 rd = insn & 0x7;
b0109805 10886 tmp = load_reg(s, rn);
9ee6e8bb 10887 switch ((insn >> 6) & 3) {
66896cb8 10888 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
10889 case 1: gen_rev16(tmp); break;
10890 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
10891 default: goto illegal_op;
10892 }
b0109805 10893 store_reg(s, rd, tmp);
9ee6e8bb
PB
10894 break;
10895
d9e028c1
PM
10896 case 6:
10897 switch ((insn >> 5) & 7) {
10898 case 2:
10899 /* setend */
10900 ARCH(6);
10962fd5
PM
10901 if (((insn >> 3) & 1) != s->bswap_code) {
10902 /* Dynamic endianness switching not implemented. */
e0c270d9 10903 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
d9e028c1
PM
10904 goto illegal_op;
10905 }
9ee6e8bb 10906 break;
d9e028c1
PM
10907 case 3:
10908 /* cps */
10909 ARCH(6);
10910 if (IS_USER(s)) {
10911 break;
8984bd2e 10912 }
b53d8923 10913 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
10914 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
10915 /* FAULTMASK */
10916 if (insn & 1) {
10917 addr = tcg_const_i32(19);
10918 gen_helper_v7m_msr(cpu_env, addr, tmp);
10919 tcg_temp_free_i32(addr);
10920 }
10921 /* PRIMASK */
10922 if (insn & 2) {
10923 addr = tcg_const_i32(16);
10924 gen_helper_v7m_msr(cpu_env, addr, tmp);
10925 tcg_temp_free_i32(addr);
10926 }
10927 tcg_temp_free_i32(tmp);
10928 gen_lookup_tb(s);
10929 } else {
10930 if (insn & (1 << 4)) {
10931 shift = CPSR_A | CPSR_I | CPSR_F;
10932 } else {
10933 shift = 0;
10934 }
10935 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 10936 }
d9e028c1
PM
10937 break;
10938 default:
10939 goto undef;
9ee6e8bb
PB
10940 }
10941 break;
10942
99c475ab
FB
10943 default:
10944 goto undef;
10945 }
10946 break;
10947
10948 case 12:
a7d3970d 10949 {
99c475ab 10950 /* load/store multiple */
39d5492a
PM
10951 TCGv_i32 loaded_var;
10952 TCGV_UNUSED_I32(loaded_var);
99c475ab 10953 rn = (insn >> 8) & 0x7;
b0109805 10954 addr = load_reg(s, rn);
99c475ab
FB
10955 for (i = 0; i < 8; i++) {
10956 if (insn & (1 << i)) {
99c475ab
FB
10957 if (insn & (1 << 11)) {
10958 /* load */
c40c8556 10959 tmp = tcg_temp_new_i32();
6ce2faf4 10960 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
a7d3970d
PM
10961 if (i == rn) {
10962 loaded_var = tmp;
10963 } else {
10964 store_reg(s, i, tmp);
10965 }
99c475ab
FB
10966 } else {
10967 /* store */
b0109805 10968 tmp = load_reg(s, i);
6ce2faf4 10969 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10970 tcg_temp_free_i32(tmp);
99c475ab 10971 }
5899f386 10972 /* advance to the next address */
b0109805 10973 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
10974 }
10975 }
b0109805 10976 if ((insn & (1 << rn)) == 0) {
a7d3970d 10977 /* base reg not in list: base register writeback */
b0109805
PB
10978 store_reg(s, rn, addr);
10979 } else {
a7d3970d
PM
10980 /* base reg in list: if load, complete it now */
10981 if (insn & (1 << 11)) {
10982 store_reg(s, rn, loaded_var);
10983 }
7d1b0095 10984 tcg_temp_free_i32(addr);
b0109805 10985 }
99c475ab 10986 break;
a7d3970d 10987 }
99c475ab
FB
10988 case 13:
10989 /* conditional branch or swi */
10990 cond = (insn >> 8) & 0xf;
10991 if (cond == 0xe)
10992 goto undef;
10993
10994 if (cond == 0xf) {
10995 /* swi */
eaed129d 10996 gen_set_pc_im(s, s->pc);
d4a2dc67 10997 s->svc_imm = extract32(insn, 0, 8);
9ee6e8bb 10998 s->is_jmp = DISAS_SWI;
99c475ab
FB
10999 break;
11000 }
11001 /* generate a conditional jump to next instruction */
e50e6a20 11002 s->condlabel = gen_new_label();
39fb730a 11003 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 11004 s->condjmp = 1;
99c475ab
FB
11005
11006 /* jump to the offset */
5899f386 11007 val = (uint32_t)s->pc + 2;
99c475ab 11008 offset = ((int32_t)insn << 24) >> 24;
5899f386 11009 val += offset << 1;
8aaca4c0 11010 gen_jmp(s, val);
99c475ab
FB
11011 break;
11012
11013 case 14:
358bf29e 11014 if (insn & (1 << 11)) {
9ee6e8bb
PB
11015 if (disas_thumb2_insn(env, s, insn))
11016 goto undef32;
358bf29e
PB
11017 break;
11018 }
9ee6e8bb 11019 /* unconditional branch */
99c475ab
FB
11020 val = (uint32_t)s->pc;
11021 offset = ((int32_t)insn << 21) >> 21;
11022 val += (offset << 1) + 2;
8aaca4c0 11023 gen_jmp(s, val);
99c475ab
FB
11024 break;
11025
11026 case 15:
9ee6e8bb 11027 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 11028 goto undef32;
9ee6e8bb 11029 break;
99c475ab
FB
11030 }
11031 return;
9ee6e8bb 11032undef32:
73710361
GB
11033 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11034 default_exception_el(s));
9ee6e8bb
PB
11035 return;
11036illegal_op:
99c475ab 11037undef:
73710361
GB
11038 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11039 default_exception_el(s));
99c475ab
FB
11040}
11041
2c0262af
FB
11042/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
11043 basic block 'tb'. If search_pc is TRUE, also generate PC
11044 information for each intermediate instruction. */
5639c3f2 11045static inline void gen_intermediate_code_internal(ARMCPU *cpu,
2cfc5f17 11046 TranslationBlock *tb,
5639c3f2 11047 bool search_pc)
2c0262af 11048{
ed2803da 11049 CPUState *cs = CPU(cpu);
5639c3f2 11050 CPUARMState *env = &cpu->env;
2c0262af 11051 DisasContext dc1, *dc = &dc1;
a1d1bb31 11052 CPUBreakpoint *bp;
2c0262af 11053 int j, lj;
0fa85d43 11054 target_ulong pc_start;
0a2461fa 11055 target_ulong next_page_start;
2e70f6ef
PB
11056 int num_insns;
11057 int max_insns;
3b46e624 11058
2c0262af 11059 /* generate intermediate code */
40f860cd
PM
11060
11061 /* The A64 decoder has its own top level loop, because it doesn't need
11062 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
11063 */
11064 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
11065 gen_intermediate_code_internal_a64(cpu, tb, search_pc);
11066 return;
11067 }
11068
0fa85d43 11069 pc_start = tb->pc;
3b46e624 11070
2c0262af
FB
11071 dc->tb = tb;
11072
2c0262af
FB
11073 dc->is_jmp = DISAS_NEXT;
11074 dc->pc = pc_start;
ed2803da 11075 dc->singlestep_enabled = cs->singlestep_enabled;
e50e6a20 11076 dc->condjmp = 0;
3926cc84 11077
40f860cd 11078 dc->aarch64 = 0;
73710361 11079 dc->el3_is_aa64 = arm_el_is_aa64(env, 3);
40f860cd
PM
11080 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
11081 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
11082 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
11083 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
c1e37810
PM
11084 dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
11085 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 11086#if !defined(CONFIG_USER_ONLY)
c1e37810 11087 dc->user = (dc->current_el == 0);
3926cc84 11088#endif
3f342b9e 11089 dc->ns = ARM_TBFLAG_NS(tb->flags);
9dbbc748 11090 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
40f860cd
PM
11091 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
11092 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
11093 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
c0f4af17 11094 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
60322b39 11095 dc->cp_regs = cpu->cp_regs;
a984e42c 11096 dc->features = env->features;
40f860cd 11097
50225ad0
PM
11098 /* Single step state. The code-generation logic here is:
11099 * SS_ACTIVE == 0:
11100 * generate code with no special handling for single-stepping (except
11101 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11102 * this happens anyway because those changes are all system register or
11103 * PSTATE writes).
11104 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11105 * emit code for one insn
11106 * emit code to clear PSTATE.SS
11107 * emit code to generate software step exception for completed step
11108 * end TB (as usual for having generated an exception)
11109 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11110 * emit code to generate a software step exception
11111 * end the TB
11112 */
11113 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
11114 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
11115 dc->is_ldex = false;
11116 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11117
a7812ae4
PB
11118 cpu_F0s = tcg_temp_new_i32();
11119 cpu_F1s = tcg_temp_new_i32();
11120 cpu_F0d = tcg_temp_new_i64();
11121 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
11122 cpu_V0 = cpu_F0d;
11123 cpu_V1 = cpu_F1d;
e677137d 11124 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 11125 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 11126 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 11127 lj = -1;
2e70f6ef
PB
11128 num_insns = 0;
11129 max_insns = tb->cflags & CF_COUNT_MASK;
11130 if (max_insns == 0)
11131 max_insns = CF_COUNT_MASK;
11132
cd42d5b2 11133 gen_tb_start(tb);
e12ce78d 11134
3849902c
PM
11135 tcg_clear_temp_count();
11136
e12ce78d
PM
11137 /* A note on handling of the condexec (IT) bits:
11138 *
11139 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 11140 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 11141 * (1) if the condexec bits are not already zero then we write
0ecb72a5 11142 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
11143 * to do it at the end of the block. (For example if we don't do this
11144 * it's hard to identify whether we can safely skip writing condexec
11145 * at the end of the TB, which we definitely want to do for the case
11146 * where a TB doesn't do anything with the IT state at all.)
11147 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 11148 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
11149 * This is done both for leaving the TB at the end, and for leaving
11150 * it because of an exception we know will happen, which is done in
11151 * gen_exception_insn(). The latter is necessary because we need to
11152 * leave the TB with the PC/IT state just prior to execution of the
11153 * instruction which caused the exception.
11154 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 11155 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d
PM
11156 * This is handled in the same way as restoration of the
11157 * PC in these situations: we will be called again with search_pc=1
11158 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
11159 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
11160 * this to restore the condexec bits.
e12ce78d
PM
11161 *
11162 * Note that there are no instructions which can read the condexec
11163 * bits, and none which can write non-static values to them, so
0ecb72a5 11164 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
11165 * middle of a TB.
11166 */
11167
9ee6e8bb
PB
11168 /* Reset the conditional execution bits immediately. This avoids
11169 complications trying to do it at the end of the block. */
98eac7ca 11170 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 11171 {
39d5492a 11172 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 11173 tcg_gen_movi_i32(tmp, 0);
d9ba4830 11174 store_cpu_field(tmp, condexec_bits);
8f01245e 11175 }
2c0262af 11176 do {
fbb4a2e3
PB
11177#ifdef CONFIG_USER_ONLY
11178 /* Intercept jump to the magic kernel page. */
40f860cd 11179 if (dc->pc >= 0xffff0000) {
fbb4a2e3
PB
11180 /* We always get here via a jump, so know we are not in a
11181 conditional execution block. */
d4a2dc67 11182 gen_exception_internal(EXCP_KERNEL_TRAP);
fbb4a2e3
PB
11183 dc->is_jmp = DISAS_UPDATE;
11184 break;
11185 }
11186#else
b53d8923 11187 if (dc->pc >= 0xfffffff0 && arm_dc_feature(dc, ARM_FEATURE_M)) {
9ee6e8bb
PB
11188 /* We always get here via a jump, so know we are not in a
11189 conditional execution block. */
d4a2dc67 11190 gen_exception_internal(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
11191 dc->is_jmp = DISAS_UPDATE;
11192 break;
9ee6e8bb
PB
11193 }
11194#endif
11195
f0c3c505
AF
11196 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
11197 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
a1d1bb31 11198 if (bp->pc == dc->pc) {
d4a2dc67 11199 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
11200 /* Advance PC so that clearing the breakpoint will
11201 invalidate this TB. */
11202 dc->pc += 2;
11203 goto done_generating;
1fddef4b
FB
11204 }
11205 }
11206 }
2c0262af 11207 if (search_pc) {
fe700adb 11208 j = tcg_op_buf_count();
2c0262af
FB
11209 if (lj < j) {
11210 lj++;
11211 while (lj < j)
ab1103de 11212 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 11213 }
25983cad 11214 tcg_ctx.gen_opc_pc[lj] = dc->pc;
e12ce78d 11215 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
ab1103de 11216 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 11217 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 11218 }
e50e6a20 11219
2e70f6ef
PB
11220 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
11221 gen_io_start();
11222
fdefe51c 11223 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5642463a
PM
11224 tcg_gen_debug_insn_start(dc->pc);
11225 }
11226
50225ad0
PM
11227 if (dc->ss_active && !dc->pstate_ss) {
11228 /* Singlestep state is Active-pending.
11229 * If we're in this state at the start of a TB then either
11230 * a) we just took an exception to an EL which is being debugged
11231 * and this is the first insn in the exception handler
11232 * b) debug exceptions were masked and we just unmasked them
11233 * without changing EL (eg by clearing PSTATE.D)
11234 * In either case we're going to take a swstep exception in the
11235 * "did not step an insn" case, and so the syndrome ISV and EX
11236 * bits should be zero.
11237 */
11238 assert(num_insns == 0);
73710361
GB
11239 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
11240 default_exception_el(dc));
50225ad0
PM
11241 goto done_generating;
11242 }
11243
40f860cd 11244 if (dc->thumb) {
9ee6e8bb
PB
11245 disas_thumb_insn(env, dc);
11246 if (dc->condexec_mask) {
11247 dc->condexec_cond = (dc->condexec_cond & 0xe)
11248 | ((dc->condexec_mask >> 4) & 1);
11249 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
11250 if (dc->condexec_mask == 0) {
11251 dc->condexec_cond = 0;
11252 }
11253 }
11254 } else {
f4df2210
PM
11255 unsigned int insn = arm_ldl_code(env, dc->pc, dc->bswap_code);
11256 dc->pc += 4;
11257 disas_arm_insn(dc, insn);
9ee6e8bb 11258 }
e50e6a20
FB
11259
11260 if (dc->condjmp && !dc->is_jmp) {
11261 gen_set_label(dc->condlabel);
11262 dc->condjmp = 0;
11263 }
3849902c
PM
11264
11265 if (tcg_check_temp_count()) {
0a2461fa
AG
11266 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
11267 dc->pc);
3849902c
PM
11268 }
11269
aaf2d97d 11270 /* Translation stops when a conditional branch is encountered.
e50e6a20 11271 * Otherwise the subsequent code could get translated several times.
b5ff1b31 11272 * Also stop translation when a page boundary is reached. This
bf20dc07 11273 * ensures prefetch aborts occur at the right place. */
2e70f6ef 11274 num_insns ++;
fe700adb 11275 } while (!dc->is_jmp && !tcg_op_buf_full() &&
ed2803da 11276 !cs->singlestep_enabled &&
1b530a6d 11277 !singlestep &&
50225ad0 11278 !dc->ss_active &&
2e70f6ef
PB
11279 dc->pc < next_page_start &&
11280 num_insns < max_insns);
11281
11282 if (tb->cflags & CF_LAST_IO) {
11283 if (dc->condjmp) {
11284 /* FIXME: This can theoretically happen with self-modifying
11285 code. */
a47dddd7 11286 cpu_abort(cs, "IO on conditional branch instruction");
2e70f6ef
PB
11287 }
11288 gen_io_end();
11289 }
9ee6e8bb 11290
b5ff1b31 11291 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
11292 instruction was a conditional branch or trap, and the PC has
11293 already been written. */
50225ad0 11294 if (unlikely(cs->singlestep_enabled || dc->ss_active)) {
8aaca4c0 11295 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 11296 if (dc->condjmp) {
9ee6e8bb
PB
11297 gen_set_condexec(dc);
11298 if (dc->is_jmp == DISAS_SWI) {
50225ad0 11299 gen_ss_advance(dc);
73710361
GB
11300 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11301 default_exception_el(dc));
37e6456e
PM
11302 } else if (dc->is_jmp == DISAS_HVC) {
11303 gen_ss_advance(dc);
73710361 11304 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
11305 } else if (dc->is_jmp == DISAS_SMC) {
11306 gen_ss_advance(dc);
73710361 11307 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
50225ad0
PM
11308 } else if (dc->ss_active) {
11309 gen_step_complete_exception(dc);
9ee6e8bb 11310 } else {
d4a2dc67 11311 gen_exception_internal(EXCP_DEBUG);
9ee6e8bb 11312 }
e50e6a20
FB
11313 gen_set_label(dc->condlabel);
11314 }
11315 if (dc->condjmp || !dc->is_jmp) {
eaed129d 11316 gen_set_pc_im(dc, dc->pc);
e50e6a20 11317 dc->condjmp = 0;
8aaca4c0 11318 }
9ee6e8bb
PB
11319 gen_set_condexec(dc);
11320 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
50225ad0 11321 gen_ss_advance(dc);
73710361
GB
11322 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11323 default_exception_el(dc));
37e6456e
PM
11324 } else if (dc->is_jmp == DISAS_HVC && !dc->condjmp) {
11325 gen_ss_advance(dc);
73710361 11326 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
11327 } else if (dc->is_jmp == DISAS_SMC && !dc->condjmp) {
11328 gen_ss_advance(dc);
73710361 11329 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
50225ad0
PM
11330 } else if (dc->ss_active) {
11331 gen_step_complete_exception(dc);
9ee6e8bb
PB
11332 } else {
11333 /* FIXME: Single stepping a WFI insn will not halt
11334 the CPU. */
d4a2dc67 11335 gen_exception_internal(EXCP_DEBUG);
9ee6e8bb 11336 }
8aaca4c0 11337 } else {
9ee6e8bb
PB
11338 /* While branches must always occur at the end of an IT block,
11339 there are a few other things that can cause us to terminate
65626741 11340 the TB in the middle of an IT block:
9ee6e8bb
PB
11341 - Exception generating instructions (bkpt, swi, undefined).
11342 - Page boundaries.
11343 - Hardware watchpoints.
11344 Hardware breakpoints have already been handled and skip this code.
11345 */
11346 gen_set_condexec(dc);
8aaca4c0 11347 switch(dc->is_jmp) {
8aaca4c0 11348 case DISAS_NEXT:
6e256c93 11349 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
11350 break;
11351 default:
11352 case DISAS_JUMP:
11353 case DISAS_UPDATE:
11354 /* indicate that the hash table must be used to find the next TB */
57fec1fe 11355 tcg_gen_exit_tb(0);
8aaca4c0
FB
11356 break;
11357 case DISAS_TB_JUMP:
11358 /* nothing more to generate */
11359 break;
9ee6e8bb 11360 case DISAS_WFI:
1ce94f81 11361 gen_helper_wfi(cpu_env);
84549b6d
PM
11362 /* The helper doesn't necessarily throw an exception, but we
11363 * must go back to the main loop to check for interrupts anyway.
11364 */
11365 tcg_gen_exit_tb(0);
9ee6e8bb 11366 break;
72c1d3af
PM
11367 case DISAS_WFE:
11368 gen_helper_wfe(cpu_env);
11369 break;
9ee6e8bb 11370 case DISAS_SWI:
73710361
GB
11371 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11372 default_exception_el(dc));
9ee6e8bb 11373 break;
37e6456e 11374 case DISAS_HVC:
73710361 11375 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
11376 break;
11377 case DISAS_SMC:
73710361 11378 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 11379 break;
8aaca4c0 11380 }
e50e6a20
FB
11381 if (dc->condjmp) {
11382 gen_set_label(dc->condlabel);
9ee6e8bb 11383 gen_set_condexec(dc);
6e256c93 11384 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
11385 dc->condjmp = 0;
11386 }
2c0262af 11387 }
2e70f6ef 11388
9ee6e8bb 11389done_generating:
806f352d 11390 gen_tb_end(tb, num_insns);
2c0262af
FB
11391
11392#ifdef DEBUG_DISAS
8fec2b8c 11393 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
11394 qemu_log("----------------\n");
11395 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 11396 log_target_disas(env, pc_start, dc->pc - pc_start,
d8fd2954 11397 dc->thumb | (dc->bswap_code << 1));
93fcfe39 11398 qemu_log("\n");
2c0262af
FB
11399 }
11400#endif
b5ff1b31 11401 if (search_pc) {
fe700adb 11402 j = tcg_op_buf_count();
b5ff1b31
FB
11403 lj++;
11404 while (lj <= j)
ab1103de 11405 tcg_ctx.gen_opc_instr_start[lj++] = 0;
b5ff1b31 11406 } else {
2c0262af 11407 tb->size = dc->pc - pc_start;
2e70f6ef 11408 tb->icount = num_insns;
b5ff1b31 11409 }
2c0262af
FB
11410}
11411
0ecb72a5 11412void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 11413{
5639c3f2 11414 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, false);
2c0262af
FB
11415}
11416
0ecb72a5 11417void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
2c0262af 11418{
5639c3f2 11419 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, true);
2c0262af
FB
11420}
11421
b5ff1b31 11422static const char *cpu_mode_names[16] = {
28c9457d
EI
11423 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
11424 "???", "???", "hyp", "und", "???", "???", "???", "sys"
b5ff1b31 11425};
9ee6e8bb 11426
878096ee
AF
11427void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
11428 int flags)
2c0262af 11429{
878096ee
AF
11430 ARMCPU *cpu = ARM_CPU(cs);
11431 CPUARMState *env = &cpu->env;
2c0262af 11432 int i;
b5ff1b31 11433 uint32_t psr;
2c0262af 11434
17731115
PM
11435 if (is_a64(env)) {
11436 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
11437 return;
11438 }
11439
2c0262af 11440 for(i=0;i<16;i++) {
7fe48483 11441 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 11442 if ((i % 4) == 3)
7fe48483 11443 cpu_fprintf(f, "\n");
2c0262af 11444 else
7fe48483 11445 cpu_fprintf(f, " ");
2c0262af 11446 }
b5ff1b31 11447 psr = cpsr_read(env);
687fa640
TS
11448 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
11449 psr,
b5ff1b31
FB
11450 psr & (1 << 31) ? 'N' : '-',
11451 psr & (1 << 30) ? 'Z' : '-',
11452 psr & (1 << 29) ? 'C' : '-',
11453 psr & (1 << 28) ? 'V' : '-',
5fafdf24 11454 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 11455 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 11456
f2617cfc
PM
11457 if (flags & CPU_DUMP_FPU) {
11458 int numvfpregs = 0;
11459 if (arm_feature(env, ARM_FEATURE_VFP)) {
11460 numvfpregs += 16;
11461 }
11462 if (arm_feature(env, ARM_FEATURE_VFP3)) {
11463 numvfpregs += 16;
11464 }
11465 for (i = 0; i < numvfpregs; i++) {
11466 uint64_t v = float64_val(env->vfp.regs[i]);
11467 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
11468 i * 2, (uint32_t)v,
11469 i * 2 + 1, (uint32_t)(v >> 32),
11470 i, v);
11471 }
11472 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 11473 }
2c0262af 11474}
a6b025d3 11475
0ecb72a5 11476void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 11477{
3926cc84
AG
11478 if (is_a64(env)) {
11479 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
40f860cd 11480 env->condexec_bits = 0;
3926cc84
AG
11481 } else {
11482 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
40f860cd 11483 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
3926cc84 11484 }
d2856f1a 11485}
This page took 3.101527 seconds and 4 git commands to generate.