6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
22 #include "exec/exec-all.h"
24 #include "tcg-op-gvec.h"
27 #include "translate.h"
28 #include "internals.h"
29 #include "qemu/host-utils.h"
31 #include "exec/semihost.h"
32 #include "exec/gen-icount.h"
34 #include "exec/helper-proto.h"
35 #include "exec/helper-gen.h"
38 #include "trace-tcg.h"
40 static TCGv_i64 cpu_X[32];
41 static TCGv_i64 cpu_pc;
43 /* Load/store exclusive handling */
44 static TCGv_i64 cpu_exclusive_high;
45 static TCGv_i64 cpu_reg(DisasContext *s, int reg);
47 static const char *regnames[] = {
48 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
49 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
50 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
51 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
55 A64_SHIFT_TYPE_LSL = 0,
56 A64_SHIFT_TYPE_LSR = 1,
57 A64_SHIFT_TYPE_ASR = 2,
58 A64_SHIFT_TYPE_ROR = 3
61 /* Table based decoder typedefs - used when the relevant bits for decode
62 * are too awkwardly scattered across the instruction (eg SIMD).
64 typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn);
66 typedef struct AArch64DecodeTable {
69 AArch64DecodeFn *disas_fn;
72 /* Function prototype for gen_ functions for calling Neon helpers */
73 typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32);
74 typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32);
75 typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
76 typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64);
77 typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64);
78 typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64);
79 typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64);
80 typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32);
81 typedef void NeonGenTwoSingleOPFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
82 typedef void NeonGenTwoDoubleOPFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
83 typedef void NeonGenOneOpFn(TCGv_i64, TCGv_i64);
84 typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
85 typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
86 typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
88 /* Note that the gvec expanders operate on offsets + sizes. */
89 typedef void GVecGen2Fn(unsigned, uint32_t, uint32_t, uint32_t, uint32_t);
90 typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
92 typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
93 uint32_t, uint32_t, uint32_t);
95 /* initialize TCG globals. */
96 void a64_translate_init(void)
100 cpu_pc = tcg_global_mem_new_i64(cpu_env,
101 offsetof(CPUARMState, pc),
103 for (i = 0; i < 32; i++) {
104 cpu_X[i] = tcg_global_mem_new_i64(cpu_env,
105 offsetof(CPUARMState, xregs[i]),
109 cpu_exclusive_high = tcg_global_mem_new_i64(cpu_env,
110 offsetof(CPUARMState, exclusive_high), "exclusive_high");
113 static inline int get_a64_user_mem_index(DisasContext *s)
115 /* Return the core mmu_idx to use for A64 "unprivileged load/store" insns:
116 * if EL1, access as if EL0; otherwise access at current EL
120 switch (s->mmu_idx) {
121 case ARMMMUIdx_S12NSE1:
122 useridx = ARMMMUIdx_S12NSE0;
124 case ARMMMUIdx_S1SE1:
125 useridx = ARMMMUIdx_S1SE0;
128 g_assert_not_reached();
130 useridx = s->mmu_idx;
133 return arm_to_core_mmu_idx(useridx);
136 void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
137 fprintf_function cpu_fprintf, int flags)
139 ARMCPU *cpu = ARM_CPU(cs);
140 CPUARMState *env = &cpu->env;
141 uint32_t psr = pstate_read(env);
143 int el = arm_current_el(env);
144 const char *ns_status;
146 cpu_fprintf(f, "PC=%016"PRIx64" SP=%016"PRIx64"\n",
147 env->pc, env->xregs[31]);
148 for (i = 0; i < 31; i++) {
149 cpu_fprintf(f, "X%02d=%016"PRIx64, i, env->xregs[i]);
151 cpu_fprintf(f, "\n");
157 if (arm_feature(env, ARM_FEATURE_EL3) && el != 3) {
158 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
163 cpu_fprintf(f, "\nPSTATE=%08x %c%c%c%c %sEL%d%c\n",
165 psr & PSTATE_N ? 'N' : '-',
166 psr & PSTATE_Z ? 'Z' : '-',
167 psr & PSTATE_C ? 'C' : '-',
168 psr & PSTATE_V ? 'V' : '-',
171 psr & PSTATE_SP ? 'h' : 't');
173 if (flags & CPU_DUMP_FPU) {
175 for (i = 0; i < numvfpregs; i++) {
176 uint64_t *q = aa64_vfp_qreg(env, i);
179 cpu_fprintf(f, "q%02d=%016" PRIx64 ":%016" PRIx64 "%c",
180 i, vhi, vlo, (i & 1 ? '\n' : ' '));
182 cpu_fprintf(f, "FPCR: %08x FPSR: %08x\n",
183 vfp_get_fpcr(env), vfp_get_fpsr(env));
187 void gen_a64_set_pc_im(uint64_t val)
189 tcg_gen_movi_i64(cpu_pc, val);
192 /* Load the PC from a generic TCG variable.
194 * If address tagging is enabled via the TCR TBI bits, then loading
195 * an address into the PC will clear out any tag in the it:
196 * + for EL2 and EL3 there is only one TBI bit, and if it is set
197 * then the address is zero-extended, clearing bits [63:56]
198 * + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
199 * and TBI1 controls addressses with bit 55 == 1.
200 * If the appropriate TBI bit is set for the address then
201 * the address is sign-extended from bit 55 into bits [63:56]
203 * We can avoid doing this for relative-branches, because the
204 * PC + offset can never overflow into the tag bits (assuming
205 * that virtual addresses are less than 56 bits wide, as they
206 * are currently), but we must handle it for branch-to-register.
208 static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
211 if (s->current_el <= 1) {
212 /* Test if NEITHER or BOTH TBI values are set. If so, no need to
213 * examine bit 55 of address, can just generate code.
214 * If mixed, then test via generated code
216 if (s->tbi0 && s->tbi1) {
217 TCGv_i64 tmp_reg = tcg_temp_new_i64();
218 /* Both bits set, sign extension from bit 55 into [63:56] will
221 tcg_gen_shli_i64(tmp_reg, src, 8);
222 tcg_gen_sari_i64(cpu_pc, tmp_reg, 8);
223 tcg_temp_free_i64(tmp_reg);
224 } else if (!s->tbi0 && !s->tbi1) {
225 /* Neither bit set, just load it as-is */
226 tcg_gen_mov_i64(cpu_pc, src);
228 TCGv_i64 tcg_tmpval = tcg_temp_new_i64();
229 TCGv_i64 tcg_bit55 = tcg_temp_new_i64();
230 TCGv_i64 tcg_zero = tcg_const_i64(0);
232 tcg_gen_andi_i64(tcg_bit55, src, (1ull << 55));
235 /* tbi0==1, tbi1==0, so 0-fill upper byte if bit 55 = 0 */
236 tcg_gen_andi_i64(tcg_tmpval, src,
237 0x00FFFFFFFFFFFFFFull);
238 tcg_gen_movcond_i64(TCG_COND_EQ, cpu_pc, tcg_bit55, tcg_zero,
241 /* tbi0==0, tbi1==1, so 1-fill upper byte if bit 55 = 1 */
242 tcg_gen_ori_i64(tcg_tmpval, src,
243 0xFF00000000000000ull);
244 tcg_gen_movcond_i64(TCG_COND_NE, cpu_pc, tcg_bit55, tcg_zero,
247 tcg_temp_free_i64(tcg_zero);
248 tcg_temp_free_i64(tcg_bit55);
249 tcg_temp_free_i64(tcg_tmpval);
251 } else { /* EL > 1 */
253 /* Force tag byte to all zero */
254 tcg_gen_andi_i64(cpu_pc, src, 0x00FFFFFFFFFFFFFFull);
256 /* Load unmodified address */
257 tcg_gen_mov_i64(cpu_pc, src);
262 typedef struct DisasCompare64 {
267 static void a64_test_cc(DisasCompare64 *c64, int cc)
271 arm_test_cc(&c32, cc);
273 /* Sign-extend the 32-bit value so that the GE/LT comparisons work
274 * properly. The NE/EQ comparisons are also fine with this choice. */
275 c64->cond = c32.cond;
276 c64->value = tcg_temp_new_i64();
277 tcg_gen_ext_i32_i64(c64->value, c32.value);
282 static void a64_free_cc(DisasCompare64 *c64)
284 tcg_temp_free_i64(c64->value);
287 static void gen_exception_internal(int excp)
289 TCGv_i32 tcg_excp = tcg_const_i32(excp);
291 assert(excp_is_internal(excp));
292 gen_helper_exception_internal(cpu_env, tcg_excp);
293 tcg_temp_free_i32(tcg_excp);
296 static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
298 TCGv_i32 tcg_excp = tcg_const_i32(excp);
299 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
300 TCGv_i32 tcg_el = tcg_const_i32(target_el);
302 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
304 tcg_temp_free_i32(tcg_el);
305 tcg_temp_free_i32(tcg_syn);
306 tcg_temp_free_i32(tcg_excp);
309 static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
311 gen_a64_set_pc_im(s->pc - offset);
312 gen_exception_internal(excp);
313 s->base.is_jmp = DISAS_NORETURN;
316 static void gen_exception_insn(DisasContext *s, int offset, int excp,
317 uint32_t syndrome, uint32_t target_el)
319 gen_a64_set_pc_im(s->pc - offset);
320 gen_exception(excp, syndrome, target_el);
321 s->base.is_jmp = DISAS_NORETURN;
324 static void gen_ss_advance(DisasContext *s)
326 /* If the singlestep state is Active-not-pending, advance to
331 gen_helper_clear_pstate_ss(cpu_env);
335 static void gen_step_complete_exception(DisasContext *s)
337 /* We just completed step of an insn. Move from Active-not-pending
338 * to Active-pending, and then also take the swstep exception.
339 * This corresponds to making the (IMPDEF) choice to prioritize
340 * swstep exceptions over asynchronous exceptions taken to an exception
341 * level where debug is disabled. This choice has the advantage that
342 * we do not need to maintain internal state corresponding to the
343 * ISV/EX syndrome bits between completion of the step and generation
344 * of the exception, and our syndrome information is always correct.
347 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
348 default_exception_el(s));
349 s->base.is_jmp = DISAS_NORETURN;
352 static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
354 /* No direct tb linking with singlestep (either QEMU's or the ARM
355 * debug architecture kind) or deterministic io
357 if (s->base.singlestep_enabled || s->ss_active ||
358 (tb_cflags(s->base.tb) & CF_LAST_IO)) {
362 #ifndef CONFIG_USER_ONLY
363 /* Only link tbs from inside the same guest page */
364 if ((s->base.tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
372 static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
374 TranslationBlock *tb;
377 if (use_goto_tb(s, n, dest)) {
379 gen_a64_set_pc_im(dest);
380 tcg_gen_exit_tb((intptr_t)tb + n);
381 s->base.is_jmp = DISAS_NORETURN;
383 gen_a64_set_pc_im(dest);
385 gen_step_complete_exception(s);
386 } else if (s->base.singlestep_enabled) {
387 gen_exception_internal(EXCP_DEBUG);
389 tcg_gen_lookup_and_goto_ptr();
390 s->base.is_jmp = DISAS_NORETURN;
395 static void unallocated_encoding(DisasContext *s)
397 /* Unallocated and reserved encodings are uncategorized */
398 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
399 default_exception_el(s));
402 #define unsupported_encoding(s, insn) \
404 qemu_log_mask(LOG_UNIMP, \
405 "%s:%d: unsupported instruction encoding 0x%08x " \
406 "at pc=%016" PRIx64 "\n", \
407 __FILE__, __LINE__, insn, s->pc - 4); \
408 unallocated_encoding(s); \
411 static void init_tmp_a64_array(DisasContext *s)
413 #ifdef CONFIG_DEBUG_TCG
414 memset(s->tmp_a64, 0, sizeof(s->tmp_a64));
416 s->tmp_a64_count = 0;
419 static void free_tmp_a64(DisasContext *s)
422 for (i = 0; i < s->tmp_a64_count; i++) {
423 tcg_temp_free_i64(s->tmp_a64[i]);
425 init_tmp_a64_array(s);
428 static TCGv_i64 new_tmp_a64(DisasContext *s)
430 assert(s->tmp_a64_count < TMP_A64_MAX);
431 return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
434 static TCGv_i64 new_tmp_a64_zero(DisasContext *s)
436 TCGv_i64 t = new_tmp_a64(s);
437 tcg_gen_movi_i64(t, 0);
442 * Register access functions
444 * These functions are used for directly accessing a register in where
445 * changes to the final register value are likely to be made. If you
446 * need to use a register for temporary calculation (e.g. index type
447 * operations) use the read_* form.
449 * B1.2.1 Register mappings
451 * In instruction register encoding 31 can refer to ZR (zero register) or
452 * the SP (stack pointer) depending on context. In QEMU's case we map SP
453 * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
454 * This is the point of the _sp forms.
456 static TCGv_i64 cpu_reg(DisasContext *s, int reg)
459 return new_tmp_a64_zero(s);
465 /* register access for when 31 == SP */
466 static TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
471 /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
472 * representing the register contents. This TCGv is an auto-freed
473 * temporary so it need not be explicitly freed, and may be modified.
475 static TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
477 TCGv_i64 v = new_tmp_a64(s);
480 tcg_gen_mov_i64(v, cpu_X[reg]);
482 tcg_gen_ext32u_i64(v, cpu_X[reg]);
485 tcg_gen_movi_i64(v, 0);
490 static TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
492 TCGv_i64 v = new_tmp_a64(s);
494 tcg_gen_mov_i64(v, cpu_X[reg]);
496 tcg_gen_ext32u_i64(v, cpu_X[reg]);
501 /* We should have at some point before trying to access an FP register
502 * done the necessary access check, so assert that
503 * (a) we did the check and
504 * (b) we didn't then just plough ahead anyway if it failed.
505 * Print the instruction pattern in the abort message so we can figure
506 * out what we need to fix if a user encounters this problem in the wild.
508 static inline void assert_fp_access_checked(DisasContext *s)
510 #ifdef CONFIG_DEBUG_TCG
511 if (unlikely(!s->fp_access_checked || s->fp_excp_el)) {
512 fprintf(stderr, "target-arm: FP access check missing for "
513 "instruction 0x%08x\n", s->insn);
519 /* Return the offset into CPUARMState of an element of specified
520 * size, 'element' places in from the least significant end of
521 * the FP/vector register Qn.
523 static inline int vec_reg_offset(DisasContext *s, int regno,
524 int element, TCGMemOp size)
527 #ifdef HOST_WORDS_BIGENDIAN
528 /* This is complicated slightly because vfp.zregs[n].d[0] is
529 * still the low half and vfp.zregs[n].d[1] the high half
530 * of the 128 bit vector, even on big endian systems.
531 * Calculate the offset assuming a fully bigendian 128 bits,
532 * then XOR to account for the order of the two 64 bit halves.
534 offs += (16 - ((element + 1) * (1 << size)));
537 offs += element * (1 << size);
539 offs += offsetof(CPUARMState, vfp.zregs[regno]);
540 assert_fp_access_checked(s);
544 /* Return the offset info CPUARMState of the "whole" vector register Qn. */
545 static inline int vec_full_reg_offset(DisasContext *s, int regno)
547 assert_fp_access_checked(s);
548 return offsetof(CPUARMState, vfp.zregs[regno]);
551 /* Return a newly allocated pointer to the vector register. */
552 static TCGv_ptr vec_full_reg_ptr(DisasContext *s, int regno)
554 TCGv_ptr ret = tcg_temp_new_ptr();
555 tcg_gen_addi_ptr(ret, cpu_env, vec_full_reg_offset(s, regno));
559 /* Return the byte size of the "whole" vector register, VL / 8. */
560 static inline int vec_full_reg_size(DisasContext *s)
562 /* FIXME SVE: We should put the composite ZCR_EL* value into tb->flags.
563 In the meantime this is just the AdvSIMD length of 128. */
567 /* Return the offset into CPUARMState of a slice (from
568 * the least significant end) of FP register Qn (ie
570 * (Note that this is not the same mapping as for A32; see cpu.h)
572 static inline int fp_reg_offset(DisasContext *s, int regno, TCGMemOp size)
574 return vec_reg_offset(s, regno, 0, size);
577 /* Offset of the high half of the 128 bit vector Qn */
578 static inline int fp_reg_hi_offset(DisasContext *s, int regno)
580 return vec_reg_offset(s, regno, 1, MO_64);
583 /* Convenience accessors for reading and writing single and double
584 * FP registers. Writing clears the upper parts of the associated
585 * 128 bit vector register, as required by the architecture.
586 * Note that unlike the GP register accessors, the values returned
587 * by the read functions must be manually freed.
589 static TCGv_i64 read_fp_dreg(DisasContext *s, int reg)
591 TCGv_i64 v = tcg_temp_new_i64();
593 tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
597 static TCGv_i32 read_fp_sreg(DisasContext *s, int reg)
599 TCGv_i32 v = tcg_temp_new_i32();
601 tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(s, reg, MO_32));
605 /* Clear the bits above an N-bit vector, for N = (is_q ? 128 : 64).
606 * If SVE is not enabled, then there are only 128 bits in the vector.
608 static void clear_vec_high(DisasContext *s, bool is_q, int rd)
610 unsigned ofs = fp_reg_offset(s, rd, MO_64);
611 unsigned vsz = vec_full_reg_size(s);
614 TCGv_i64 tcg_zero = tcg_const_i64(0);
615 tcg_gen_st_i64(tcg_zero, cpu_env, ofs + 8);
616 tcg_temp_free_i64(tcg_zero);
619 tcg_gen_gvec_dup8i(ofs + 16, vsz - 16, vsz - 16, 0);
623 static void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
625 unsigned ofs = fp_reg_offset(s, reg, MO_64);
627 tcg_gen_st_i64(v, cpu_env, ofs);
628 clear_vec_high(s, false, reg);
631 static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v)
633 TCGv_i64 tmp = tcg_temp_new_i64();
635 tcg_gen_extu_i32_i64(tmp, v);
636 write_fp_dreg(s, reg, tmp);
637 tcg_temp_free_i64(tmp);
640 static TCGv_ptr get_fpstatus_ptr(bool is_f16)
642 TCGv_ptr statusptr = tcg_temp_new_ptr();
645 /* In A64 all instructions (both FP and Neon) use the FPCR; there
646 * is no equivalent of the A32 Neon "standard FPSCR value".
647 * However half-precision operations operate under a different
648 * FZ16 flag and use vfp.fp_status_f16 instead of vfp.fp_status.
651 offset = offsetof(CPUARMState, vfp.fp_status_f16);
653 offset = offsetof(CPUARMState, vfp.fp_status);
655 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
659 /* Expand a 2-operand AdvSIMD vector operation using an expander function. */
660 static void gen_gvec_fn2(DisasContext *s, bool is_q, int rd, int rn,
661 GVecGen2Fn *gvec_fn, int vece)
663 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
664 is_q ? 16 : 8, vec_full_reg_size(s));
667 /* Expand a 2-operand + immediate AdvSIMD vector operation using
668 * an expander function.
670 static void gen_gvec_fn2i(DisasContext *s, bool is_q, int rd, int rn,
671 int64_t imm, GVecGen2iFn *gvec_fn, int vece)
673 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
674 imm, is_q ? 16 : 8, vec_full_reg_size(s));
677 /* Expand a 3-operand AdvSIMD vector operation using an expander function. */
678 static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm,
679 GVecGen3Fn *gvec_fn, int vece)
681 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
682 vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s));
685 /* Expand a 2-operand + immediate AdvSIMD vector operation using
688 static void gen_gvec_op2i(DisasContext *s, bool is_q, int rd,
689 int rn, int64_t imm, const GVecGen2i *gvec_op)
691 tcg_gen_gvec_2i(vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
692 is_q ? 16 : 8, vec_full_reg_size(s), imm, gvec_op);
695 /* Expand a 3-operand AdvSIMD vector operation using an op descriptor. */
696 static void gen_gvec_op3(DisasContext *s, bool is_q, int rd,
697 int rn, int rm, const GVecGen3 *gvec_op)
699 tcg_gen_gvec_3(vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
700 vec_full_reg_offset(s, rm), is_q ? 16 : 8,
701 vec_full_reg_size(s), gvec_op);
704 /* Set ZF and NF based on a 64 bit result. This is alas fiddlier
705 * than the 32 bit equivalent.
707 static inline void gen_set_NZ64(TCGv_i64 result)
709 tcg_gen_extr_i64_i32(cpu_ZF, cpu_NF, result);
710 tcg_gen_or_i32(cpu_ZF, cpu_ZF, cpu_NF);
713 /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
714 static inline void gen_logic_CC(int sf, TCGv_i64 result)
717 gen_set_NZ64(result);
719 tcg_gen_extrl_i64_i32(cpu_ZF, result);
720 tcg_gen_mov_i32(cpu_NF, cpu_ZF);
722 tcg_gen_movi_i32(cpu_CF, 0);
723 tcg_gen_movi_i32(cpu_VF, 0);
726 /* dest = T0 + T1; compute C, N, V and Z flags */
727 static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
730 TCGv_i64 result, flag, tmp;
731 result = tcg_temp_new_i64();
732 flag = tcg_temp_new_i64();
733 tmp = tcg_temp_new_i64();
735 tcg_gen_movi_i64(tmp, 0);
736 tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
738 tcg_gen_extrl_i64_i32(cpu_CF, flag);
740 gen_set_NZ64(result);
742 tcg_gen_xor_i64(flag, result, t0);
743 tcg_gen_xor_i64(tmp, t0, t1);
744 tcg_gen_andc_i64(flag, flag, tmp);
745 tcg_temp_free_i64(tmp);
746 tcg_gen_extrh_i64_i32(cpu_VF, flag);
748 tcg_gen_mov_i64(dest, result);
749 tcg_temp_free_i64(result);
750 tcg_temp_free_i64(flag);
752 /* 32 bit arithmetic */
753 TCGv_i32 t0_32 = tcg_temp_new_i32();
754 TCGv_i32 t1_32 = tcg_temp_new_i32();
755 TCGv_i32 tmp = tcg_temp_new_i32();
757 tcg_gen_movi_i32(tmp, 0);
758 tcg_gen_extrl_i64_i32(t0_32, t0);
759 tcg_gen_extrl_i64_i32(t1_32, t1);
760 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
761 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
762 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
763 tcg_gen_xor_i32(tmp, t0_32, t1_32);
764 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
765 tcg_gen_extu_i32_i64(dest, cpu_NF);
767 tcg_temp_free_i32(tmp);
768 tcg_temp_free_i32(t0_32);
769 tcg_temp_free_i32(t1_32);
773 /* dest = T0 - T1; compute C, N, V and Z flags */
774 static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
777 /* 64 bit arithmetic */
778 TCGv_i64 result, flag, tmp;
780 result = tcg_temp_new_i64();
781 flag = tcg_temp_new_i64();
782 tcg_gen_sub_i64(result, t0, t1);
784 gen_set_NZ64(result);
786 tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
787 tcg_gen_extrl_i64_i32(cpu_CF, flag);
789 tcg_gen_xor_i64(flag, result, t0);
790 tmp = tcg_temp_new_i64();
791 tcg_gen_xor_i64(tmp, t0, t1);
792 tcg_gen_and_i64(flag, flag, tmp);
793 tcg_temp_free_i64(tmp);
794 tcg_gen_extrh_i64_i32(cpu_VF, flag);
795 tcg_gen_mov_i64(dest, result);
796 tcg_temp_free_i64(flag);
797 tcg_temp_free_i64(result);
799 /* 32 bit arithmetic */
800 TCGv_i32 t0_32 = tcg_temp_new_i32();
801 TCGv_i32 t1_32 = tcg_temp_new_i32();
804 tcg_gen_extrl_i64_i32(t0_32, t0);
805 tcg_gen_extrl_i64_i32(t1_32, t1);
806 tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
807 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
808 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
809 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
810 tmp = tcg_temp_new_i32();
811 tcg_gen_xor_i32(tmp, t0_32, t1_32);
812 tcg_temp_free_i32(t0_32);
813 tcg_temp_free_i32(t1_32);
814 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
815 tcg_temp_free_i32(tmp);
816 tcg_gen_extu_i32_i64(dest, cpu_NF);
820 /* dest = T0 + T1 + CF; do not compute flags. */
821 static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
823 TCGv_i64 flag = tcg_temp_new_i64();
824 tcg_gen_extu_i32_i64(flag, cpu_CF);
825 tcg_gen_add_i64(dest, t0, t1);
826 tcg_gen_add_i64(dest, dest, flag);
827 tcg_temp_free_i64(flag);
830 tcg_gen_ext32u_i64(dest, dest);
834 /* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
835 static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
838 TCGv_i64 result, cf_64, vf_64, tmp;
839 result = tcg_temp_new_i64();
840 cf_64 = tcg_temp_new_i64();
841 vf_64 = tcg_temp_new_i64();
842 tmp = tcg_const_i64(0);
844 tcg_gen_extu_i32_i64(cf_64, cpu_CF);
845 tcg_gen_add2_i64(result, cf_64, t0, tmp, cf_64, tmp);
846 tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, tmp);
847 tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
848 gen_set_NZ64(result);
850 tcg_gen_xor_i64(vf_64, result, t0);
851 tcg_gen_xor_i64(tmp, t0, t1);
852 tcg_gen_andc_i64(vf_64, vf_64, tmp);
853 tcg_gen_extrh_i64_i32(cpu_VF, vf_64);
855 tcg_gen_mov_i64(dest, result);
857 tcg_temp_free_i64(tmp);
858 tcg_temp_free_i64(vf_64);
859 tcg_temp_free_i64(cf_64);
860 tcg_temp_free_i64(result);
862 TCGv_i32 t0_32, t1_32, tmp;
863 t0_32 = tcg_temp_new_i32();
864 t1_32 = tcg_temp_new_i32();
865 tmp = tcg_const_i32(0);
867 tcg_gen_extrl_i64_i32(t0_32, t0);
868 tcg_gen_extrl_i64_i32(t1_32, t1);
869 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, cpu_CF, tmp);
870 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, tmp);
872 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
873 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
874 tcg_gen_xor_i32(tmp, t0_32, t1_32);
875 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
876 tcg_gen_extu_i32_i64(dest, cpu_NF);
878 tcg_temp_free_i32(tmp);
879 tcg_temp_free_i32(t1_32);
880 tcg_temp_free_i32(t0_32);
885 * Load/Store generators
889 * Store from GPR register to memory.
891 static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
892 TCGv_i64 tcg_addr, int size, int memidx,
894 unsigned int iss_srt,
895 bool iss_sf, bool iss_ar)
898 tcg_gen_qemu_st_i64(source, tcg_addr, memidx, s->be_data + size);
903 syn = syn_data_abort_with_iss(0,
909 0, 0, 0, 0, 0, false);
910 disas_set_insn_syndrome(s, syn);
914 static void do_gpr_st(DisasContext *s, TCGv_i64 source,
915 TCGv_i64 tcg_addr, int size,
917 unsigned int iss_srt,
918 bool iss_sf, bool iss_ar)
920 do_gpr_st_memidx(s, source, tcg_addr, size, get_mem_index(s),
921 iss_valid, iss_srt, iss_sf, iss_ar);
925 * Load from memory to GPR register
927 static void do_gpr_ld_memidx(DisasContext *s,
928 TCGv_i64 dest, TCGv_i64 tcg_addr,
929 int size, bool is_signed,
930 bool extend, int memidx,
931 bool iss_valid, unsigned int iss_srt,
932 bool iss_sf, bool iss_ar)
934 TCGMemOp memop = s->be_data + size;
942 tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
944 if (extend && is_signed) {
946 tcg_gen_ext32u_i64(dest, dest);
952 syn = syn_data_abort_with_iss(0,
958 0, 0, 0, 0, 0, false);
959 disas_set_insn_syndrome(s, syn);
963 static void do_gpr_ld(DisasContext *s,
964 TCGv_i64 dest, TCGv_i64 tcg_addr,
965 int size, bool is_signed, bool extend,
966 bool iss_valid, unsigned int iss_srt,
967 bool iss_sf, bool iss_ar)
969 do_gpr_ld_memidx(s, dest, tcg_addr, size, is_signed, extend,
971 iss_valid, iss_srt, iss_sf, iss_ar);
975 * Store from FP register to memory
977 static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
979 /* This writes the bottom N bits of a 128 bit wide vector to memory */
980 TCGv_i64 tmp = tcg_temp_new_i64();
981 tcg_gen_ld_i64(tmp, cpu_env, fp_reg_offset(s, srcidx, MO_64));
983 tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s),
986 bool be = s->be_data == MO_BE;
987 TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
989 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
990 tcg_gen_qemu_st_i64(tmp, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
992 tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(s, srcidx));
993 tcg_gen_qemu_st_i64(tmp, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
995 tcg_temp_free_i64(tcg_hiaddr);
998 tcg_temp_free_i64(tmp);
1002 * Load from memory to FP register
1004 static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
1006 /* This always zero-extends and writes to a full 128 bit wide vector */
1007 TCGv_i64 tmplo = tcg_temp_new_i64();
1011 TCGMemOp memop = s->be_data + size;
1012 tmphi = tcg_const_i64(0);
1013 tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
1015 bool be = s->be_data == MO_BE;
1016 TCGv_i64 tcg_hiaddr;
1018 tmphi = tcg_temp_new_i64();
1019 tcg_hiaddr = tcg_temp_new_i64();
1021 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
1022 tcg_gen_qemu_ld_i64(tmplo, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
1024 tcg_gen_qemu_ld_i64(tmphi, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
1026 tcg_temp_free_i64(tcg_hiaddr);
1029 tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64));
1030 tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(s, destidx));
1032 tcg_temp_free_i64(tmplo);
1033 tcg_temp_free_i64(tmphi);
1035 clear_vec_high(s, true, destidx);
1039 * Vector load/store helpers.
1041 * The principal difference between this and a FP load is that we don't
1042 * zero extend as we are filling a partial chunk of the vector register.
1043 * These functions don't support 128 bit loads/stores, which would be
1044 * normal load/store operations.
1046 * The _i32 versions are useful when operating on 32 bit quantities
1047 * (eg for floating point single or using Neon helper functions).
1050 /* Get value of an element within a vector register */
1051 static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
1052 int element, TCGMemOp memop)
1054 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1057 tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
1060 tcg_gen_ld16u_i64(tcg_dest, cpu_env, vect_off);
1063 tcg_gen_ld32u_i64(tcg_dest, cpu_env, vect_off);
1066 tcg_gen_ld8s_i64(tcg_dest, cpu_env, vect_off);
1069 tcg_gen_ld16s_i64(tcg_dest, cpu_env, vect_off);
1072 tcg_gen_ld32s_i64(tcg_dest, cpu_env, vect_off);
1076 tcg_gen_ld_i64(tcg_dest, cpu_env, vect_off);
1079 g_assert_not_reached();
1083 static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
1084 int element, TCGMemOp memop)
1086 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1089 tcg_gen_ld8u_i32(tcg_dest, cpu_env, vect_off);
1092 tcg_gen_ld16u_i32(tcg_dest, cpu_env, vect_off);
1095 tcg_gen_ld8s_i32(tcg_dest, cpu_env, vect_off);
1098 tcg_gen_ld16s_i32(tcg_dest, cpu_env, vect_off);
1102 tcg_gen_ld_i32(tcg_dest, cpu_env, vect_off);
1105 g_assert_not_reached();
1109 /* Set value of an element within a vector register */
1110 static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
1111 int element, TCGMemOp memop)
1113 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1116 tcg_gen_st8_i64(tcg_src, cpu_env, vect_off);
1119 tcg_gen_st16_i64(tcg_src, cpu_env, vect_off);
1122 tcg_gen_st32_i64(tcg_src, cpu_env, vect_off);
1125 tcg_gen_st_i64(tcg_src, cpu_env, vect_off);
1128 g_assert_not_reached();
1132 static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
1133 int destidx, int element, TCGMemOp memop)
1135 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1138 tcg_gen_st8_i32(tcg_src, cpu_env, vect_off);
1141 tcg_gen_st16_i32(tcg_src, cpu_env, vect_off);
1144 tcg_gen_st_i32(tcg_src, cpu_env, vect_off);
1147 g_assert_not_reached();
1151 /* Store from vector register to memory */
1152 static void do_vec_st(DisasContext *s, int srcidx, int element,
1153 TCGv_i64 tcg_addr, int size)
1155 TCGMemOp memop = s->be_data + size;
1156 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1158 read_vec_element(s, tcg_tmp, srcidx, element, size);
1159 tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop);
1161 tcg_temp_free_i64(tcg_tmp);
1164 /* Load from memory to vector register */
1165 static void do_vec_ld(DisasContext *s, int destidx, int element,
1166 TCGv_i64 tcg_addr, int size)
1168 TCGMemOp memop = s->be_data + size;
1169 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1171 tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop);
1172 write_vec_element(s, tcg_tmp, destidx, element, size);
1174 tcg_temp_free_i64(tcg_tmp);
1177 /* Check that FP/Neon access is enabled. If it is, return
1178 * true. If not, emit code to generate an appropriate exception,
1179 * and return false; the caller should not emit any code for
1180 * the instruction. Note that this check must happen after all
1181 * unallocated-encoding checks (otherwise the syndrome information
1182 * for the resulting exception will be incorrect).
1184 static inline bool fp_access_check(DisasContext *s)
1186 assert(!s->fp_access_checked);
1187 s->fp_access_checked = true;
1189 if (!s->fp_excp_el) {
1193 gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false),
1198 /* Check that SVE access is enabled. If it is, return true.
1199 * If not, emit code to generate an appropriate exception and return false.
1201 static inline bool sve_access_check(DisasContext *s)
1203 if (s->sve_excp_el) {
1204 gen_exception_insn(s, 4, EXCP_UDEF, syn_sve_access_trap(),
1212 * This utility function is for doing register extension with an
1213 * optional shift. You will likely want to pass a temporary for the
1214 * destination register. See DecodeRegExtend() in the ARM ARM.
1216 static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
1217 int option, unsigned int shift)
1219 int extsize = extract32(option, 0, 2);
1220 bool is_signed = extract32(option, 2, 1);
1225 tcg_gen_ext8s_i64(tcg_out, tcg_in);
1228 tcg_gen_ext16s_i64(tcg_out, tcg_in);
1231 tcg_gen_ext32s_i64(tcg_out, tcg_in);
1234 tcg_gen_mov_i64(tcg_out, tcg_in);
1240 tcg_gen_ext8u_i64(tcg_out, tcg_in);
1243 tcg_gen_ext16u_i64(tcg_out, tcg_in);
1246 tcg_gen_ext32u_i64(tcg_out, tcg_in);
1249 tcg_gen_mov_i64(tcg_out, tcg_in);
1255 tcg_gen_shli_i64(tcg_out, tcg_out, shift);
1259 static inline void gen_check_sp_alignment(DisasContext *s)
1261 /* The AArch64 architecture mandates that (if enabled via PSTATE
1262 * or SCTLR bits) there is a check that SP is 16-aligned on every
1263 * SP-relative load or store (with an exception generated if it is not).
1264 * In line with general QEMU practice regarding misaligned accesses,
1265 * we omit these checks for the sake of guest program performance.
1266 * This function is provided as a hook so we can more easily add these
1267 * checks in future (possibly as a "favour catching guest program bugs
1268 * over speed" user selectable option).
1273 * This provides a simple table based table lookup decoder. It is
1274 * intended to be used when the relevant bits for decode are too
1275 * awkwardly placed and switch/if based logic would be confusing and
1276 * deeply nested. Since it's a linear search through the table, tables
1277 * should be kept small.
1279 * It returns the first handler where insn & mask == pattern, or
1280 * NULL if there is no match.
1281 * The table is terminated by an empty mask (i.e. 0)
1283 static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
1286 const AArch64DecodeTable *tptr = table;
1288 while (tptr->mask) {
1289 if ((insn & tptr->mask) == tptr->pattern) {
1290 return tptr->disas_fn;
1298 * The instruction disassembly implemented here matches
1299 * the instruction encoding classifications in chapter C4
1300 * of the ARM Architecture Reference Manual (DDI0487B_a);
1301 * classification names and decode diagrams here should generally
1302 * match up with those in the manual.
1305 /* Unconditional branch (immediate)
1307 * +----+-----------+-------------------------------------+
1308 * | op | 0 0 1 0 1 | imm26 |
1309 * +----+-----------+-------------------------------------+
1311 static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
1313 uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4;
1315 if (insn & (1U << 31)) {
1316 /* BL Branch with link */
1317 tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
1320 /* B Branch / BL Branch with link */
1321 gen_goto_tb(s, 0, addr);
1324 /* Compare and branch (immediate)
1325 * 31 30 25 24 23 5 4 0
1326 * +----+-------------+----+---------------------+--------+
1327 * | sf | 0 1 1 0 1 0 | op | imm19 | Rt |
1328 * +----+-------------+----+---------------------+--------+
1330 static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
1332 unsigned int sf, op, rt;
1334 TCGLabel *label_match;
1337 sf = extract32(insn, 31, 1);
1338 op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
1339 rt = extract32(insn, 0, 5);
1340 addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
1342 tcg_cmp = read_cpu_reg(s, rt, sf);
1343 label_match = gen_new_label();
1345 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1346 tcg_cmp, 0, label_match);
1348 gen_goto_tb(s, 0, s->pc);
1349 gen_set_label(label_match);
1350 gen_goto_tb(s, 1, addr);
1353 /* Test and branch (immediate)
1354 * 31 30 25 24 23 19 18 5 4 0
1355 * +----+-------------+----+-------+-------------+------+
1356 * | b5 | 0 1 1 0 1 1 | op | b40 | imm14 | Rt |
1357 * +----+-------------+----+-------+-------------+------+
1359 static void disas_test_b_imm(DisasContext *s, uint32_t insn)
1361 unsigned int bit_pos, op, rt;
1363 TCGLabel *label_match;
1366 bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
1367 op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
1368 addr = s->pc + sextract32(insn, 5, 14) * 4 - 4;
1369 rt = extract32(insn, 0, 5);
1371 tcg_cmp = tcg_temp_new_i64();
1372 tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
1373 label_match = gen_new_label();
1374 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1375 tcg_cmp, 0, label_match);
1376 tcg_temp_free_i64(tcg_cmp);
1377 gen_goto_tb(s, 0, s->pc);
1378 gen_set_label(label_match);
1379 gen_goto_tb(s, 1, addr);
1382 /* Conditional branch (immediate)
1383 * 31 25 24 23 5 4 3 0
1384 * +---------------+----+---------------------+----+------+
1385 * | 0 1 0 1 0 1 0 | o1 | imm19 | o0 | cond |
1386 * +---------------+----+---------------------+----+------+
1388 static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
1393 if ((insn & (1 << 4)) || (insn & (1 << 24))) {
1394 unallocated_encoding(s);
1397 addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
1398 cond = extract32(insn, 0, 4);
1401 /* genuinely conditional branches */
1402 TCGLabel *label_match = gen_new_label();
1403 arm_gen_test_cc(cond, label_match);
1404 gen_goto_tb(s, 0, s->pc);
1405 gen_set_label(label_match);
1406 gen_goto_tb(s, 1, addr);
1408 /* 0xe and 0xf are both "always" conditions */
1409 gen_goto_tb(s, 0, addr);
1413 /* HINT instruction group, including various allocated HINTs */
1414 static void handle_hint(DisasContext *s, uint32_t insn,
1415 unsigned int op1, unsigned int op2, unsigned int crm)
1417 unsigned int selector = crm << 3 | op2;
1420 unallocated_encoding(s);
1428 s->base.is_jmp = DISAS_WFI;
1430 /* When running in MTTCG we don't generate jumps to the yield and
1431 * WFE helpers as it won't affect the scheduling of other vCPUs.
1432 * If we wanted to more completely model WFE/SEV so we don't busy
1433 * spin unnecessarily we would need to do something more involved.
1436 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1437 s->base.is_jmp = DISAS_YIELD;
1441 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1442 s->base.is_jmp = DISAS_WFE;
1447 /* we treat all as NOP at least for now */
1450 /* default specified as NOP equivalent */
1455 static void gen_clrex(DisasContext *s, uint32_t insn)
1457 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
1460 /* CLREX, DSB, DMB, ISB */
1461 static void handle_sync(DisasContext *s, uint32_t insn,
1462 unsigned int op1, unsigned int op2, unsigned int crm)
1467 unallocated_encoding(s);
1478 case 1: /* MBReqTypes_Reads */
1479 bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST;
1481 case 2: /* MBReqTypes_Writes */
1482 bar = TCG_BAR_SC | TCG_MO_ST_ST;
1484 default: /* MBReqTypes_All */
1485 bar = TCG_BAR_SC | TCG_MO_ALL;
1491 /* We need to break the TB after this insn to execute
1492 * a self-modified code correctly and also to take
1493 * any pending interrupts immediately.
1495 gen_goto_tb(s, 0, s->pc);
1498 unallocated_encoding(s);
1503 /* MSR (immediate) - move immediate to processor state field */
1504 static void handle_msr_i(DisasContext *s, uint32_t insn,
1505 unsigned int op1, unsigned int op2, unsigned int crm)
1507 int op = op1 << 3 | op2;
1509 case 0x05: /* SPSel */
1510 if (s->current_el == 0) {
1511 unallocated_encoding(s);
1515 case 0x1e: /* DAIFSet */
1516 case 0x1f: /* DAIFClear */
1518 TCGv_i32 tcg_imm = tcg_const_i32(crm);
1519 TCGv_i32 tcg_op = tcg_const_i32(op);
1520 gen_a64_set_pc_im(s->pc - 4);
1521 gen_helper_msr_i_pstate(cpu_env, tcg_op, tcg_imm);
1522 tcg_temp_free_i32(tcg_imm);
1523 tcg_temp_free_i32(tcg_op);
1524 /* For DAIFClear, exit the cpu loop to re-evaluate pending IRQs. */
1525 gen_a64_set_pc_im(s->pc);
1526 s->base.is_jmp = (op == 0x1f ? DISAS_EXIT : DISAS_JUMP);
1530 unallocated_encoding(s);
1535 static void gen_get_nzcv(TCGv_i64 tcg_rt)
1537 TCGv_i32 tmp = tcg_temp_new_i32();
1538 TCGv_i32 nzcv = tcg_temp_new_i32();
1540 /* build bit 31, N */
1541 tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31));
1542 /* build bit 30, Z */
1543 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
1544 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
1545 /* build bit 29, C */
1546 tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
1547 /* build bit 28, V */
1548 tcg_gen_shri_i32(tmp, cpu_VF, 31);
1549 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
1550 /* generate result */
1551 tcg_gen_extu_i32_i64(tcg_rt, nzcv);
1553 tcg_temp_free_i32(nzcv);
1554 tcg_temp_free_i32(tmp);
1557 static void gen_set_nzcv(TCGv_i64 tcg_rt)
1560 TCGv_i32 nzcv = tcg_temp_new_i32();
1562 /* take NZCV from R[t] */
1563 tcg_gen_extrl_i64_i32(nzcv, tcg_rt);
1566 tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
1568 tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
1569 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
1571 tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
1572 tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
1574 tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
1575 tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
1576 tcg_temp_free_i32(nzcv);
1579 /* MRS - move from system register
1580 * MSR (register) - move to system register
1583 * These are all essentially the same insn in 'read' and 'write'
1584 * versions, with varying op0 fields.
1586 static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
1587 unsigned int op0, unsigned int op1, unsigned int op2,
1588 unsigned int crn, unsigned int crm, unsigned int rt)
1590 const ARMCPRegInfo *ri;
1593 ri = get_arm_cp_reginfo(s->cp_regs,
1594 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
1595 crn, crm, op0, op1, op2));
1598 /* Unknown register; this might be a guest error or a QEMU
1599 * unimplemented feature.
1601 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 "
1602 "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
1603 isread ? "read" : "write", op0, op1, crn, crm, op2);
1604 unallocated_encoding(s);
1608 /* Check access permissions */
1609 if (!cp_access_ok(s->current_el, ri, isread)) {
1610 unallocated_encoding(s);
1615 /* Emit code to perform further access permissions checks at
1616 * runtime; this may result in an exception.
1619 TCGv_i32 tcg_syn, tcg_isread;
1622 gen_a64_set_pc_im(s->pc - 4);
1623 tmpptr = tcg_const_ptr(ri);
1624 syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
1625 tcg_syn = tcg_const_i32(syndrome);
1626 tcg_isread = tcg_const_i32(isread);
1627 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn, tcg_isread);
1628 tcg_temp_free_ptr(tmpptr);
1629 tcg_temp_free_i32(tcg_syn);
1630 tcg_temp_free_i32(tcg_isread);
1633 /* Handle special cases first */
1634 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
1638 tcg_rt = cpu_reg(s, rt);
1640 gen_get_nzcv(tcg_rt);
1642 gen_set_nzcv(tcg_rt);
1645 case ARM_CP_CURRENTEL:
1646 /* Reads as current EL value from pstate, which is
1647 * guaranteed to be constant by the tb flags.
1649 tcg_rt = cpu_reg(s, rt);
1650 tcg_gen_movi_i64(tcg_rt, s->current_el << 2);
1653 /* Writes clear the aligned block of memory which rt points into. */
1654 tcg_rt = cpu_reg(s, rt);
1655 gen_helper_dc_zva(cpu_env, tcg_rt);
1660 if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) {
1663 if ((ri->type & ARM_CP_FPU) && !fp_access_check(s)) {
1667 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1671 tcg_rt = cpu_reg(s, rt);
1674 if (ri->type & ARM_CP_CONST) {
1675 tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
1676 } else if (ri->readfn) {
1678 tmpptr = tcg_const_ptr(ri);
1679 gen_helper_get_cp_reg64(tcg_rt, cpu_env, tmpptr);
1680 tcg_temp_free_ptr(tmpptr);
1682 tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
1685 if (ri->type & ARM_CP_CONST) {
1686 /* If not forbidden by access permissions, treat as WI */
1688 } else if (ri->writefn) {
1690 tmpptr = tcg_const_ptr(ri);
1691 gen_helper_set_cp_reg64(cpu_env, tmpptr, tcg_rt);
1692 tcg_temp_free_ptr(tmpptr);
1694 tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
1698 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1699 /* I/O operations must end the TB here (whether read or write) */
1701 s->base.is_jmp = DISAS_UPDATE;
1702 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
1703 /* We default to ending the TB on a coprocessor register write,
1704 * but allow this to be suppressed by the register definition
1705 * (usually only necessary to work around guest bugs).
1707 s->base.is_jmp = DISAS_UPDATE;
1712 * 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0
1713 * +---------------------+---+-----+-----+-------+-------+-----+------+
1714 * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt |
1715 * +---------------------+---+-----+-----+-------+-------+-----+------+
1717 static void disas_system(DisasContext *s, uint32_t insn)
1719 unsigned int l, op0, op1, crn, crm, op2, rt;
1720 l = extract32(insn, 21, 1);
1721 op0 = extract32(insn, 19, 2);
1722 op1 = extract32(insn, 16, 3);
1723 crn = extract32(insn, 12, 4);
1724 crm = extract32(insn, 8, 4);
1725 op2 = extract32(insn, 5, 3);
1726 rt = extract32(insn, 0, 5);
1729 if (l || rt != 31) {
1730 unallocated_encoding(s);
1734 case 2: /* HINT (including allocated hints like NOP, YIELD, etc) */
1735 handle_hint(s, insn, op1, op2, crm);
1737 case 3: /* CLREX, DSB, DMB, ISB */
1738 handle_sync(s, insn, op1, op2, crm);
1740 case 4: /* MSR (immediate) */
1741 handle_msr_i(s, insn, op1, op2, crm);
1744 unallocated_encoding(s);
1749 handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
1752 /* Exception generation
1754 * 31 24 23 21 20 5 4 2 1 0
1755 * +-----------------+-----+------------------------+-----+----+
1756 * | 1 1 0 1 0 1 0 0 | opc | imm16 | op2 | LL |
1757 * +-----------------------+------------------------+----------+
1759 static void disas_exc(DisasContext *s, uint32_t insn)
1761 int opc = extract32(insn, 21, 3);
1762 int op2_ll = extract32(insn, 0, 5);
1763 int imm16 = extract32(insn, 5, 16);
1768 /* For SVC, HVC and SMC we advance the single-step state
1769 * machine before taking the exception. This is architecturally
1770 * mandated, to ensure that single-stepping a system call
1771 * instruction works properly.
1776 gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16),
1777 default_exception_el(s));
1780 if (s->current_el == 0) {
1781 unallocated_encoding(s);
1784 /* The pre HVC helper handles cases when HVC gets trapped
1785 * as an undefined insn by runtime configuration.
1787 gen_a64_set_pc_im(s->pc - 4);
1788 gen_helper_pre_hvc(cpu_env);
1790 gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16), 2);
1793 if (s->current_el == 0) {
1794 unallocated_encoding(s);
1797 gen_a64_set_pc_im(s->pc - 4);
1798 tmp = tcg_const_i32(syn_aa64_smc(imm16));
1799 gen_helper_pre_smc(cpu_env, tmp);
1800 tcg_temp_free_i32(tmp);
1802 gen_exception_insn(s, 0, EXCP_SMC, syn_aa64_smc(imm16), 3);
1805 unallocated_encoding(s);
1811 unallocated_encoding(s);
1815 gen_exception_insn(s, 4, EXCP_BKPT, syn_aa64_bkpt(imm16),
1816 default_exception_el(s));
1820 unallocated_encoding(s);
1823 /* HLT. This has two purposes.
1824 * Architecturally, it is an external halting debug instruction.
1825 * Since QEMU doesn't implement external debug, we treat this as
1826 * it is required for halting debug disabled: it will UNDEF.
1827 * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
1829 if (semihosting_enabled() && imm16 == 0xf000) {
1830 #ifndef CONFIG_USER_ONLY
1831 /* In system mode, don't allow userspace access to semihosting,
1832 * to provide some semblance of security (and for consistency
1833 * with our 32-bit semihosting).
1835 if (s->current_el == 0) {
1836 unsupported_encoding(s, insn);
1840 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1842 unsupported_encoding(s, insn);
1846 if (op2_ll < 1 || op2_ll > 3) {
1847 unallocated_encoding(s);
1850 /* DCPS1, DCPS2, DCPS3 */
1851 unsupported_encoding(s, insn);
1854 unallocated_encoding(s);
1859 /* Unconditional branch (register)
1860 * 31 25 24 21 20 16 15 10 9 5 4 0
1861 * +---------------+-------+-------+-------+------+-------+
1862 * | 1 1 0 1 0 1 1 | opc | op2 | op3 | Rn | op4 |
1863 * +---------------+-------+-------+-------+------+-------+
1865 static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
1867 unsigned int opc, op2, op3, rn, op4;
1869 opc = extract32(insn, 21, 4);
1870 op2 = extract32(insn, 16, 5);
1871 op3 = extract32(insn, 10, 6);
1872 rn = extract32(insn, 5, 5);
1873 op4 = extract32(insn, 0, 5);
1875 if (op4 != 0x0 || op3 != 0x0 || op2 != 0x1f) {
1876 unallocated_encoding(s);
1884 gen_a64_set_pc(s, cpu_reg(s, rn));
1885 /* BLR also needs to load return address */
1887 tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
1891 if (s->current_el == 0) {
1892 unallocated_encoding(s);
1895 gen_helper_exception_return(cpu_env);
1896 /* Must exit loop to check un-masked IRQs */
1897 s->base.is_jmp = DISAS_EXIT;
1901 unallocated_encoding(s);
1903 unsupported_encoding(s, insn);
1907 unallocated_encoding(s);
1911 s->base.is_jmp = DISAS_JUMP;
1914 /* Branches, exception generating and system instructions */
1915 static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
1917 switch (extract32(insn, 25, 7)) {
1918 case 0x0a: case 0x0b:
1919 case 0x4a: case 0x4b: /* Unconditional branch (immediate) */
1920 disas_uncond_b_imm(s, insn);
1922 case 0x1a: case 0x5a: /* Compare & branch (immediate) */
1923 disas_comp_b_imm(s, insn);
1925 case 0x1b: case 0x5b: /* Test & branch (immediate) */
1926 disas_test_b_imm(s, insn);
1928 case 0x2a: /* Conditional branch (immediate) */
1929 disas_cond_b_imm(s, insn);
1931 case 0x6a: /* Exception generation / System */
1932 if (insn & (1 << 24)) {
1933 disas_system(s, insn);
1938 case 0x6b: /* Unconditional branch (register) */
1939 disas_uncond_b_reg(s, insn);
1942 unallocated_encoding(s);
1948 * Load/Store exclusive instructions are implemented by remembering
1949 * the value/address loaded, and seeing if these are the same
1950 * when the store is performed. This is not actually the architecturally
1951 * mandated semantics, but it works for typical guest code sequences
1952 * and avoids having to monitor regular stores.
1954 * The store exclusive uses the atomic cmpxchg primitives to avoid
1955 * races in multi-threaded linux-user and when MTTCG softmmu is
1958 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
1959 TCGv_i64 addr, int size, bool is_pair)
1961 int idx = get_mem_index(s);
1962 TCGMemOp memop = s->be_data;
1964 g_assert(size <= 3);
1966 g_assert(size >= 2);
1968 /* The pair must be single-copy atomic for the doubleword. */
1969 memop |= MO_64 | MO_ALIGN;
1970 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
1971 if (s->be_data == MO_LE) {
1972 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32);
1973 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 32, 32);
1975 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 32, 32);
1976 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 0, 32);
1979 /* The pair must be single-copy atomic for *each* doubleword, not
1980 the entire quadword, however it must be quadword aligned. */
1982 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx,
1983 memop | MO_ALIGN_16);
1985 TCGv_i64 addr2 = tcg_temp_new_i64();
1986 tcg_gen_addi_i64(addr2, addr, 8);
1987 tcg_gen_qemu_ld_i64(cpu_exclusive_high, addr2, idx, memop);
1988 tcg_temp_free_i64(addr2);
1990 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
1991 tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high);
1994 memop |= size | MO_ALIGN;
1995 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
1996 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
1998 tcg_gen_mov_i64(cpu_exclusive_addr, addr);
2001 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
2002 TCGv_i64 addr, int size, int is_pair)
2004 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]
2005 * && (!is_pair || env->exclusive_high == [addr + datasize])) {
2008 * [addr + datasize] = {Rt2};
2014 * env->exclusive_addr = -1;
2016 TCGLabel *fail_label = gen_new_label();
2017 TCGLabel *done_label = gen_new_label();
2020 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
2022 tmp = tcg_temp_new_i64();
2025 if (s->be_data == MO_LE) {
2026 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt), cpu_reg(s, rt2));
2028 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt2), cpu_reg(s, rt));
2030 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr,
2031 cpu_exclusive_val, tmp,
2033 MO_64 | MO_ALIGN | s->be_data);
2034 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2035 } else if (s->be_data == MO_LE) {
2036 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2037 gen_helper_paired_cmpxchg64_le_parallel(tmp, cpu_env,
2042 gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
2043 cpu_reg(s, rt), cpu_reg(s, rt2));
2046 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2047 gen_helper_paired_cmpxchg64_be_parallel(tmp, cpu_env,
2052 gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
2053 cpu_reg(s, rt), cpu_reg(s, rt2));
2057 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,
2058 cpu_reg(s, rt), get_mem_index(s),
2059 size | MO_ALIGN | s->be_data);
2060 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2062 tcg_gen_mov_i64(cpu_reg(s, rd), tmp);
2063 tcg_temp_free_i64(tmp);
2064 tcg_gen_br(done_label);
2066 gen_set_label(fail_label);
2067 tcg_gen_movi_i64(cpu_reg(s, rd), 1);
2068 gen_set_label(done_label);
2069 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
2072 /* Update the Sixty-Four bit (SF) registersize. This logic is derived
2073 * from the ARMv8 specs for LDR (Shared decode for all encodings).
2075 static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc)
2077 int opc0 = extract32(opc, 0, 1);
2081 regsize = opc0 ? 32 : 64;
2083 regsize = size == 3 ? 64 : 32;
2085 return regsize == 64;
2088 /* Load/store exclusive
2090 * 31 30 29 24 23 22 21 20 16 15 14 10 9 5 4 0
2091 * +-----+-------------+----+---+----+------+----+-------+------+------+
2092 * | sz | 0 0 1 0 0 0 | o2 | L | o1 | Rs | o0 | Rt2 | Rn | Rt |
2093 * +-----+-------------+----+---+----+------+----+-------+------+------+
2095 * sz: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64 bit
2096 * L: 0 -> store, 1 -> load
2097 * o2: 0 -> exclusive, 1 -> not
2098 * o1: 0 -> single register, 1 -> register pair
2099 * o0: 1 -> load-acquire/store-release, 0 -> not
2101 static void disas_ldst_excl(DisasContext *s, uint32_t insn)
2103 int rt = extract32(insn, 0, 5);
2104 int rn = extract32(insn, 5, 5);
2105 int rt2 = extract32(insn, 10, 5);
2106 int is_lasr = extract32(insn, 15, 1);
2107 int rs = extract32(insn, 16, 5);
2108 int is_pair = extract32(insn, 21, 1);
2109 int is_store = !extract32(insn, 22, 1);
2110 int is_excl = !extract32(insn, 23, 1);
2111 int size = extract32(insn, 30, 2);
2114 if ((!is_excl && !is_pair && !is_lasr) ||
2115 (!is_excl && is_pair) ||
2116 (is_pair && size < 2)) {
2117 unallocated_encoding(s);
2122 gen_check_sp_alignment(s);
2124 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2126 /* Note that since TCG is single threaded load-acquire/store-release
2127 * semantics require no extra if (is_lasr) { ... } handling.
2133 gen_load_exclusive(s, rt, rt2, tcg_addr, size, is_pair);
2135 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2139 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2141 gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, is_pair);
2144 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2145 bool iss_sf = disas_ldst_compute_iss_sf(size, false, 0);
2147 /* Generate ISS for non-exclusive accesses including LASR. */
2150 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2152 do_gpr_st(s, tcg_rt, tcg_addr, size,
2153 true, rt, iss_sf, is_lasr);
2155 do_gpr_ld(s, tcg_rt, tcg_addr, size, false, false,
2156 true, rt, iss_sf, is_lasr);
2158 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2165 * Load register (literal)
2167 * 31 30 29 27 26 25 24 23 5 4 0
2168 * +-----+-------+---+-----+-------------------+-------+
2169 * | opc | 0 1 1 | V | 0 0 | imm19 | Rt |
2170 * +-----+-------+---+-----+-------------------+-------+
2172 * V: 1 -> vector (simd/fp)
2173 * opc (non-vector): 00 -> 32 bit, 01 -> 64 bit,
2174 * 10-> 32 bit signed, 11 -> prefetch
2175 * opc (vector): 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit (11 unallocated)
2177 static void disas_ld_lit(DisasContext *s, uint32_t insn)
2179 int rt = extract32(insn, 0, 5);
2180 int64_t imm = sextract32(insn, 5, 19) << 2;
2181 bool is_vector = extract32(insn, 26, 1);
2182 int opc = extract32(insn, 30, 2);
2183 bool is_signed = false;
2185 TCGv_i64 tcg_rt, tcg_addr;
2189 unallocated_encoding(s);
2193 if (!fp_access_check(s)) {
2198 /* PRFM (literal) : prefetch */
2201 size = 2 + extract32(opc, 0, 1);
2202 is_signed = extract32(opc, 1, 1);
2205 tcg_rt = cpu_reg(s, rt);
2207 tcg_addr = tcg_const_i64((s->pc - 4) + imm);
2209 do_fp_ld(s, rt, tcg_addr, size);
2211 /* Only unsigned 32bit loads target 32bit registers. */
2212 bool iss_sf = opc != 0;
2214 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false,
2215 true, rt, iss_sf, false);
2217 tcg_temp_free_i64(tcg_addr);
2221 * LDNP (Load Pair - non-temporal hint)
2222 * LDP (Load Pair - non vector)
2223 * LDPSW (Load Pair Signed Word - non vector)
2224 * STNP (Store Pair - non-temporal hint)
2225 * STP (Store Pair - non vector)
2226 * LDNP (Load Pair of SIMD&FP - non-temporal hint)
2227 * LDP (Load Pair of SIMD&FP)
2228 * STNP (Store Pair of SIMD&FP - non-temporal hint)
2229 * STP (Store Pair of SIMD&FP)
2231 * 31 30 29 27 26 25 24 23 22 21 15 14 10 9 5 4 0
2232 * +-----+-------+---+---+-------+---+-----------------------------+
2233 * | opc | 1 0 1 | V | 0 | index | L | imm7 | Rt2 | Rn | Rt |
2234 * +-----+-------+---+---+-------+---+-------+-------+------+------+
2236 * opc: LDP/STP/LDNP/STNP 00 -> 32 bit, 10 -> 64 bit
2238 * LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit
2239 * V: 0 -> GPR, 1 -> Vector
2240 * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index,
2241 * 10 -> signed offset, 11 -> pre-index
2242 * L: 0 -> Store 1 -> Load
2244 * Rt, Rt2 = GPR or SIMD registers to be stored
2245 * Rn = general purpose register containing address
2246 * imm7 = signed offset (multiple of 4 or 8 depending on size)
2248 static void disas_ldst_pair(DisasContext *s, uint32_t insn)
2250 int rt = extract32(insn, 0, 5);
2251 int rn = extract32(insn, 5, 5);
2252 int rt2 = extract32(insn, 10, 5);
2253 uint64_t offset = sextract64(insn, 15, 7);
2254 int index = extract32(insn, 23, 2);
2255 bool is_vector = extract32(insn, 26, 1);
2256 bool is_load = extract32(insn, 22, 1);
2257 int opc = extract32(insn, 30, 2);
2259 bool is_signed = false;
2260 bool postindex = false;
2263 TCGv_i64 tcg_addr; /* calculated address */
2267 unallocated_encoding(s);
2274 size = 2 + extract32(opc, 1, 1);
2275 is_signed = extract32(opc, 0, 1);
2276 if (!is_load && is_signed) {
2277 unallocated_encoding(s);
2283 case 1: /* post-index */
2288 /* signed offset with "non-temporal" hint. Since we don't emulate
2289 * caches we don't care about hints to the cache system about
2290 * data access patterns, and handle this identically to plain
2294 /* There is no non-temporal-hint version of LDPSW */
2295 unallocated_encoding(s);
2300 case 2: /* signed offset, rn not updated */
2303 case 3: /* pre-index */
2309 if (is_vector && !fp_access_check(s)) {
2316 gen_check_sp_alignment(s);
2319 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2322 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
2327 do_fp_ld(s, rt, tcg_addr, size);
2329 do_fp_st(s, rt, tcg_addr, size);
2331 tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
2333 do_fp_ld(s, rt2, tcg_addr, size);
2335 do_fp_st(s, rt2, tcg_addr, size);
2338 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2339 TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
2342 TCGv_i64 tmp = tcg_temp_new_i64();
2344 /* Do not modify tcg_rt before recognizing any exception
2345 * from the second load.
2347 do_gpr_ld(s, tmp, tcg_addr, size, is_signed, false,
2348 false, 0, false, false);
2349 tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
2350 do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false,
2351 false, 0, false, false);
2353 tcg_gen_mov_i64(tcg_rt, tmp);
2354 tcg_temp_free_i64(tmp);
2356 do_gpr_st(s, tcg_rt, tcg_addr, size,
2357 false, 0, false, false);
2358 tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
2359 do_gpr_st(s, tcg_rt2, tcg_addr, size,
2360 false, 0, false, false);
2366 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset - (1 << size));
2368 tcg_gen_subi_i64(tcg_addr, tcg_addr, 1 << size);
2370 tcg_gen_mov_i64(cpu_reg_sp(s, rn), tcg_addr);
2375 * Load/store (immediate post-indexed)
2376 * Load/store (immediate pre-indexed)
2377 * Load/store (unscaled immediate)
2379 * 31 30 29 27 26 25 24 23 22 21 20 12 11 10 9 5 4 0
2380 * +----+-------+---+-----+-----+---+--------+-----+------+------+
2381 * |size| 1 1 1 | V | 0 0 | opc | 0 | imm9 | idx | Rn | Rt |
2382 * +----+-------+---+-----+-----+---+--------+-----+------+------+
2384 * idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback)
2386 * V = 0 -> non-vector
2387 * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
2388 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2390 static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
2396 int rn = extract32(insn, 5, 5);
2397 int imm9 = sextract32(insn, 12, 9);
2398 int idx = extract32(insn, 10, 2);
2399 bool is_signed = false;
2400 bool is_store = false;
2401 bool is_extended = false;
2402 bool is_unpriv = (idx == 2);
2403 bool iss_valid = !is_vector;
2410 size |= (opc & 2) << 1;
2411 if (size > 4 || is_unpriv) {
2412 unallocated_encoding(s);
2415 is_store = ((opc & 1) == 0);
2416 if (!fp_access_check(s)) {
2420 if (size == 3 && opc == 2) {
2421 /* PRFM - prefetch */
2423 unallocated_encoding(s);
2428 if (opc == 3 && size > 1) {
2429 unallocated_encoding(s);
2432 is_store = (opc == 0);
2433 is_signed = extract32(opc, 1, 1);
2434 is_extended = (size < 3) && extract32(opc, 0, 1);
2452 g_assert_not_reached();
2456 gen_check_sp_alignment(s);
2458 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2461 tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
2466 do_fp_st(s, rt, tcg_addr, size);
2468 do_fp_ld(s, rt, tcg_addr, size);
2471 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2472 int memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
2473 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2476 do_gpr_st_memidx(s, tcg_rt, tcg_addr, size, memidx,
2477 iss_valid, rt, iss_sf, false);
2479 do_gpr_ld_memidx(s, tcg_rt, tcg_addr, size,
2480 is_signed, is_extended, memidx,
2481 iss_valid, rt, iss_sf, false);
2486 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
2488 tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
2490 tcg_gen_mov_i64(tcg_rn, tcg_addr);
2495 * Load/store (register offset)
2497 * 31 30 29 27 26 25 24 23 22 21 20 16 15 13 12 11 10 9 5 4 0
2498 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
2499 * |size| 1 1 1 | V | 0 0 | opc | 1 | Rm | opt | S| 1 0 | Rn | Rt |
2500 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
2503 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
2504 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2506 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
2507 * opc<0>: 0 -> store, 1 -> load
2508 * V: 1 -> vector/simd
2509 * opt: extend encoding (see DecodeRegExtend)
2510 * S: if S=1 then scale (essentially index by sizeof(size))
2511 * Rt: register to transfer into/out of
2512 * Rn: address register or SP for base
2513 * Rm: offset register or ZR for offset
2515 static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
2521 int rn = extract32(insn, 5, 5);
2522 int shift = extract32(insn, 12, 1);
2523 int rm = extract32(insn, 16, 5);
2524 int opt = extract32(insn, 13, 3);
2525 bool is_signed = false;
2526 bool is_store = false;
2527 bool is_extended = false;
2532 if (extract32(opt, 1, 1) == 0) {
2533 unallocated_encoding(s);
2538 size |= (opc & 2) << 1;
2540 unallocated_encoding(s);
2543 is_store = !extract32(opc, 0, 1);
2544 if (!fp_access_check(s)) {
2548 if (size == 3 && opc == 2) {
2549 /* PRFM - prefetch */
2552 if (opc == 3 && size > 1) {
2553 unallocated_encoding(s);
2556 is_store = (opc == 0);
2557 is_signed = extract32(opc, 1, 1);
2558 is_extended = (size < 3) && extract32(opc, 0, 1);
2562 gen_check_sp_alignment(s);
2564 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2566 tcg_rm = read_cpu_reg(s, rm, 1);
2567 ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
2569 tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_rm);
2573 do_fp_st(s, rt, tcg_addr, size);
2575 do_fp_ld(s, rt, tcg_addr, size);
2578 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2579 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2581 do_gpr_st(s, tcg_rt, tcg_addr, size,
2582 true, rt, iss_sf, false);
2584 do_gpr_ld(s, tcg_rt, tcg_addr, size,
2585 is_signed, is_extended,
2586 true, rt, iss_sf, false);
2592 * Load/store (unsigned immediate)
2594 * 31 30 29 27 26 25 24 23 22 21 10 9 5
2595 * +----+-------+---+-----+-----+------------+-------+------+
2596 * |size| 1 1 1 | V | 0 1 | opc | imm12 | Rn | Rt |
2597 * +----+-------+---+-----+-----+------------+-------+------+
2600 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
2601 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2603 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
2604 * opc<0>: 0 -> store, 1 -> load
2605 * Rn: base address register (inc SP)
2606 * Rt: target register
2608 static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
2614 int rn = extract32(insn, 5, 5);
2615 unsigned int imm12 = extract32(insn, 10, 12);
2616 unsigned int offset;
2621 bool is_signed = false;
2622 bool is_extended = false;
2625 size |= (opc & 2) << 1;
2627 unallocated_encoding(s);
2630 is_store = !extract32(opc, 0, 1);
2631 if (!fp_access_check(s)) {
2635 if (size == 3 && opc == 2) {
2636 /* PRFM - prefetch */
2639 if (opc == 3 && size > 1) {
2640 unallocated_encoding(s);
2643 is_store = (opc == 0);
2644 is_signed = extract32(opc, 1, 1);
2645 is_extended = (size < 3) && extract32(opc, 0, 1);
2649 gen_check_sp_alignment(s);
2651 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2652 offset = imm12 << size;
2653 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
2657 do_fp_st(s, rt, tcg_addr, size);
2659 do_fp_ld(s, rt, tcg_addr, size);
2662 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2663 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2665 do_gpr_st(s, tcg_rt, tcg_addr, size,
2666 true, rt, iss_sf, false);
2668 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended,
2669 true, rt, iss_sf, false);
2674 /* Load/store register (all forms) */
2675 static void disas_ldst_reg(DisasContext *s, uint32_t insn)
2677 int rt = extract32(insn, 0, 5);
2678 int opc = extract32(insn, 22, 2);
2679 bool is_vector = extract32(insn, 26, 1);
2680 int size = extract32(insn, 30, 2);
2682 switch (extract32(insn, 24, 2)) {
2684 if (extract32(insn, 21, 1) == 1 && extract32(insn, 10, 2) == 2) {
2685 disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
2687 /* Load/store register (unscaled immediate)
2688 * Load/store immediate pre/post-indexed
2689 * Load/store register unprivileged
2691 disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector);
2695 disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector);
2698 unallocated_encoding(s);
2703 /* AdvSIMD load/store multiple structures
2705 * 31 30 29 23 22 21 16 15 12 11 10 9 5 4 0
2706 * +---+---+---------------+---+-------------+--------+------+------+------+
2707 * | 0 | Q | 0 0 1 1 0 0 0 | L | 0 0 0 0 0 0 | opcode | size | Rn | Rt |
2708 * +---+---+---------------+---+-------------+--------+------+------+------+
2710 * AdvSIMD load/store multiple structures (post-indexed)
2712 * 31 30 29 23 22 21 20 16 15 12 11 10 9 5 4 0
2713 * +---+---+---------------+---+---+---------+--------+------+------+------+
2714 * | 0 | Q | 0 0 1 1 0 0 1 | L | 0 | Rm | opcode | size | Rn | Rt |
2715 * +---+---+---------------+---+---+---------+--------+------+------+------+
2717 * Rt: first (or only) SIMD&FP register to be transferred
2718 * Rn: base address or SP
2719 * Rm (post-index only): post-index register (when !31) or size dependent #imm
2721 static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
2723 int rt = extract32(insn, 0, 5);
2724 int rn = extract32(insn, 5, 5);
2725 int size = extract32(insn, 10, 2);
2726 int opcode = extract32(insn, 12, 4);
2727 bool is_store = !extract32(insn, 22, 1);
2728 bool is_postidx = extract32(insn, 23, 1);
2729 bool is_q = extract32(insn, 30, 1);
2730 TCGv_i64 tcg_addr, tcg_rn;
2732 int ebytes = 1 << size;
2733 int elements = (is_q ? 128 : 64) / (8 << size);
2734 int rpt; /* num iterations */
2735 int selem; /* structure elements */
2738 if (extract32(insn, 31, 1) || extract32(insn, 21, 1)) {
2739 unallocated_encoding(s);
2743 /* From the shared decode logic */
2774 unallocated_encoding(s);
2778 if (size == 3 && !is_q && selem != 1) {
2780 unallocated_encoding(s);
2784 if (!fp_access_check(s)) {
2789 gen_check_sp_alignment(s);
2792 tcg_rn = cpu_reg_sp(s, rn);
2793 tcg_addr = tcg_temp_new_i64();
2794 tcg_gen_mov_i64(tcg_addr, tcg_rn);
2796 for (r = 0; r < rpt; r++) {
2798 for (e = 0; e < elements; e++) {
2799 int tt = (rt + r) % 32;
2801 for (xs = 0; xs < selem; xs++) {
2803 do_vec_st(s, tt, e, tcg_addr, size);
2805 do_vec_ld(s, tt, e, tcg_addr, size);
2807 /* For non-quad operations, setting a slice of the low
2808 * 64 bits of the register clears the high 64 bits (in
2809 * the ARM ARM pseudocode this is implicit in the fact
2810 * that 'rval' is a 64 bit wide variable).
2811 * For quad operations, we might still need to zero the
2812 * high bits of SVE. We optimize by noticing that we only
2813 * need to do this the first time we touch a register.
2815 if (e == 0 && (r == 0 || xs == selem - 1)) {
2816 clear_vec_high(s, is_q, tt);
2819 tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
2826 int rm = extract32(insn, 16, 5);
2828 tcg_gen_mov_i64(tcg_rn, tcg_addr);
2830 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
2833 tcg_temp_free_i64(tcg_addr);
2836 /* AdvSIMD load/store single structure
2838 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
2839 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2840 * | 0 | Q | 0 0 1 1 0 1 0 | L R | 0 0 0 0 0 | opc | S | size | Rn | Rt |
2841 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2843 * AdvSIMD load/store single structure (post-indexed)
2845 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
2846 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2847 * | 0 | Q | 0 0 1 1 0 1 1 | L R | Rm | opc | S | size | Rn | Rt |
2848 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2850 * Rt: first (or only) SIMD&FP register to be transferred
2851 * Rn: base address or SP
2852 * Rm (post-index only): post-index register (when !31) or size dependent #imm
2853 * index = encoded in Q:S:size dependent on size
2855 * lane_size = encoded in R, opc
2856 * transfer width = encoded in opc, S, size
2858 static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
2860 int rt = extract32(insn, 0, 5);
2861 int rn = extract32(insn, 5, 5);
2862 int size = extract32(insn, 10, 2);
2863 int S = extract32(insn, 12, 1);
2864 int opc = extract32(insn, 13, 3);
2865 int R = extract32(insn, 21, 1);
2866 int is_load = extract32(insn, 22, 1);
2867 int is_postidx = extract32(insn, 23, 1);
2868 int is_q = extract32(insn, 30, 1);
2870 int scale = extract32(opc, 1, 2);
2871 int selem = (extract32(opc, 0, 1) << 1 | R) + 1;
2872 bool replicate = false;
2873 int index = is_q << 3 | S << 2 | size;
2875 TCGv_i64 tcg_addr, tcg_rn;
2879 if (!is_load || S) {
2880 unallocated_encoding(s);
2889 if (extract32(size, 0, 1)) {
2890 unallocated_encoding(s);
2896 if (extract32(size, 1, 1)) {
2897 unallocated_encoding(s);
2900 if (!extract32(size, 0, 1)) {
2904 unallocated_encoding(s);
2912 g_assert_not_reached();
2915 if (!fp_access_check(s)) {
2919 ebytes = 1 << scale;
2922 gen_check_sp_alignment(s);
2925 tcg_rn = cpu_reg_sp(s, rn);
2926 tcg_addr = tcg_temp_new_i64();
2927 tcg_gen_mov_i64(tcg_addr, tcg_rn);
2929 for (xs = 0; xs < selem; xs++) {
2931 /* Load and replicate to all elements */
2933 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
2935 tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr,
2936 get_mem_index(s), s->be_data + scale);
2939 mulconst = 0x0101010101010101ULL;
2942 mulconst = 0x0001000100010001ULL;
2945 mulconst = 0x0000000100000001ULL;
2951 g_assert_not_reached();
2954 tcg_gen_muli_i64(tcg_tmp, tcg_tmp, mulconst);
2956 write_vec_element(s, tcg_tmp, rt, 0, MO_64);
2958 write_vec_element(s, tcg_tmp, rt, 1, MO_64);
2960 tcg_temp_free_i64(tcg_tmp);
2961 clear_vec_high(s, is_q, rt);
2963 /* Load/store one element per register */
2965 do_vec_ld(s, rt, index, tcg_addr, scale);
2967 do_vec_st(s, rt, index, tcg_addr, scale);
2970 tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
2975 int rm = extract32(insn, 16, 5);
2977 tcg_gen_mov_i64(tcg_rn, tcg_addr);
2979 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
2982 tcg_temp_free_i64(tcg_addr);
2985 /* Loads and stores */
2986 static void disas_ldst(DisasContext *s, uint32_t insn)
2988 switch (extract32(insn, 24, 6)) {
2989 case 0x08: /* Load/store exclusive */
2990 disas_ldst_excl(s, insn);
2992 case 0x18: case 0x1c: /* Load register (literal) */
2993 disas_ld_lit(s, insn);
2995 case 0x28: case 0x29:
2996 case 0x2c: case 0x2d: /* Load/store pair (all forms) */
2997 disas_ldst_pair(s, insn);
2999 case 0x38: case 0x39:
3000 case 0x3c: case 0x3d: /* Load/store register (all forms) */
3001 disas_ldst_reg(s, insn);
3003 case 0x0c: /* AdvSIMD load/store multiple structures */
3004 disas_ldst_multiple_struct(s, insn);
3006 case 0x0d: /* AdvSIMD load/store single structure */
3007 disas_ldst_single_struct(s, insn);
3010 unallocated_encoding(s);
3015 /* PC-rel. addressing
3016 * 31 30 29 28 24 23 5 4 0
3017 * +----+-------+-----------+-------------------+------+
3018 * | op | immlo | 1 0 0 0 0 | immhi | Rd |
3019 * +----+-------+-----------+-------------------+------+
3021 static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
3023 unsigned int page, rd;
3027 page = extract32(insn, 31, 1);
3028 /* SignExtend(immhi:immlo) -> offset */
3029 offset = sextract64(insn, 5, 19);
3030 offset = offset << 2 | extract32(insn, 29, 2);
3031 rd = extract32(insn, 0, 5);
3035 /* ADRP (page based) */
3040 tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
3044 * Add/subtract (immediate)
3046 * 31 30 29 28 24 23 22 21 10 9 5 4 0
3047 * +--+--+--+-----------+-----+-------------+-----+-----+
3048 * |sf|op| S| 1 0 0 0 1 |shift| imm12 | Rn | Rd |
3049 * +--+--+--+-----------+-----+-------------+-----+-----+
3051 * sf: 0 -> 32bit, 1 -> 64bit
3052 * op: 0 -> add , 1 -> sub
3054 * shift: 00 -> LSL imm by 0, 01 -> LSL imm by 12
3056 static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
3058 int rd = extract32(insn, 0, 5);
3059 int rn = extract32(insn, 5, 5);
3060 uint64_t imm = extract32(insn, 10, 12);
3061 int shift = extract32(insn, 22, 2);
3062 bool setflags = extract32(insn, 29, 1);
3063 bool sub_op = extract32(insn, 30, 1);
3064 bool is_64bit = extract32(insn, 31, 1);
3066 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
3067 TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd);
3068 TCGv_i64 tcg_result;
3077 unallocated_encoding(s);
3081 tcg_result = tcg_temp_new_i64();
3084 tcg_gen_subi_i64(tcg_result, tcg_rn, imm);
3086 tcg_gen_addi_i64(tcg_result, tcg_rn, imm);
3089 TCGv_i64 tcg_imm = tcg_const_i64(imm);
3091 gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
3093 gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
3095 tcg_temp_free_i64(tcg_imm);
3099 tcg_gen_mov_i64(tcg_rd, tcg_result);
3101 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
3104 tcg_temp_free_i64(tcg_result);
3107 /* The input should be a value in the bottom e bits (with higher
3108 * bits zero); returns that value replicated into every element
3109 * of size e in a 64 bit integer.
3111 static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
3121 /* Return a value with the bottom len bits set (where 0 < len <= 64) */
3122 static inline uint64_t bitmask64(unsigned int length)
3124 assert(length > 0 && length <= 64);
3125 return ~0ULL >> (64 - length);
3128 /* Simplified variant of pseudocode DecodeBitMasks() for the case where we
3129 * only require the wmask. Returns false if the imms/immr/immn are a reserved
3130 * value (ie should cause a guest UNDEF exception), and true if they are
3131 * valid, in which case the decoded bit pattern is written to result.
3133 static bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
3134 unsigned int imms, unsigned int immr)
3137 unsigned e, levels, s, r;
3140 assert(immn < 2 && imms < 64 && immr < 64);
3142 /* The bit patterns we create here are 64 bit patterns which
3143 * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
3144 * 64 bits each. Each element contains the same value: a run
3145 * of between 1 and e-1 non-zero bits, rotated within the
3146 * element by between 0 and e-1 bits.
3148 * The element size and run length are encoded into immn (1 bit)
3149 * and imms (6 bits) as follows:
3150 * 64 bit elements: immn = 1, imms = <length of run - 1>
3151 * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
3152 * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
3153 * 8 bit elements: immn = 0, imms = 110 : <length of run - 1>
3154 * 4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
3155 * 2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
3156 * Notice that immn = 0, imms = 11111x is the only combination
3157 * not covered by one of the above options; this is reserved.
3158 * Further, <length of run - 1> all-ones is a reserved pattern.
3160 * In all cases the rotation is by immr % e (and immr is 6 bits).
3163 /* First determine the element size */
3164 len = 31 - clz32((immn << 6) | (~imms & 0x3f));
3166 /* This is the immn == 0, imms == 0x11111x case */
3176 /* <length of run - 1> mustn't be all-ones. */
3180 /* Create the value of one element: s+1 set bits rotated
3181 * by r within the element (which is e bits wide)...
3183 mask = bitmask64(s + 1);
3185 mask = (mask >> r) | (mask << (e - r));
3186 mask &= bitmask64(e);
3188 /* ...then replicate the element over the whole 64 bit value */
3189 mask = bitfield_replicate(mask, e);
3194 /* Logical (immediate)
3195 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
3196 * +----+-----+-------------+---+------+------+------+------+
3197 * | sf | opc | 1 0 0 1 0 0 | N | immr | imms | Rn | Rd |
3198 * +----+-----+-------------+---+------+------+------+------+
3200 static void disas_logic_imm(DisasContext *s, uint32_t insn)
3202 unsigned int sf, opc, is_n, immr, imms, rn, rd;
3203 TCGv_i64 tcg_rd, tcg_rn;
3205 bool is_and = false;
3207 sf = extract32(insn, 31, 1);
3208 opc = extract32(insn, 29, 2);
3209 is_n = extract32(insn, 22, 1);
3210 immr = extract32(insn, 16, 6);
3211 imms = extract32(insn, 10, 6);
3212 rn = extract32(insn, 5, 5);
3213 rd = extract32(insn, 0, 5);
3216 unallocated_encoding(s);
3220 if (opc == 0x3) { /* ANDS */
3221 tcg_rd = cpu_reg(s, rd);
3223 tcg_rd = cpu_reg_sp(s, rd);
3225 tcg_rn = cpu_reg(s, rn);
3227 if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) {
3228 /* some immediate field values are reserved */
3229 unallocated_encoding(s);
3234 wmask &= 0xffffffff;
3238 case 0x3: /* ANDS */
3240 tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask);
3244 tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask);
3247 tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask);
3250 assert(FALSE); /* must handle all above */
3254 if (!sf && !is_and) {
3255 /* zero extend final result; we know we can skip this for AND
3256 * since the immediate had the high 32 bits clear.
3258 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3261 if (opc == 3) { /* ANDS */
3262 gen_logic_CC(sf, tcg_rd);
3267 * Move wide (immediate)
3269 * 31 30 29 28 23 22 21 20 5 4 0
3270 * +--+-----+-------------+-----+----------------+------+
3271 * |sf| opc | 1 0 0 1 0 1 | hw | imm16 | Rd |
3272 * +--+-----+-------------+-----+----------------+------+
3274 * sf: 0 -> 32 bit, 1 -> 64 bit
3275 * opc: 00 -> N, 10 -> Z, 11 -> K
3276 * hw: shift/16 (0,16, and sf only 32, 48)
3278 static void disas_movw_imm(DisasContext *s, uint32_t insn)
3280 int rd = extract32(insn, 0, 5);
3281 uint64_t imm = extract32(insn, 5, 16);
3282 int sf = extract32(insn, 31, 1);
3283 int opc = extract32(insn, 29, 2);
3284 int pos = extract32(insn, 21, 2) << 4;
3285 TCGv_i64 tcg_rd = cpu_reg(s, rd);
3288 if (!sf && (pos >= 32)) {
3289 unallocated_encoding(s);
3303 tcg_gen_movi_i64(tcg_rd, imm);
3306 tcg_imm = tcg_const_i64(imm);
3307 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_imm, pos, 16);
3308 tcg_temp_free_i64(tcg_imm);
3310 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3314 unallocated_encoding(s);
3320 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
3321 * +----+-----+-------------+---+------+------+------+------+
3322 * | sf | opc | 1 0 0 1 1 0 | N | immr | imms | Rn | Rd |
3323 * +----+-----+-------------+---+------+------+------+------+
3325 static void disas_bitfield(DisasContext *s, uint32_t insn)
3327 unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len;
3328 TCGv_i64 tcg_rd, tcg_tmp;
3330 sf = extract32(insn, 31, 1);
3331 opc = extract32(insn, 29, 2);
3332 n = extract32(insn, 22, 1);
3333 ri = extract32(insn, 16, 6);
3334 si = extract32(insn, 10, 6);
3335 rn = extract32(insn, 5, 5);
3336 rd = extract32(insn, 0, 5);
3337 bitsize = sf ? 64 : 32;
3339 if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) {
3340 unallocated_encoding(s);
3344 tcg_rd = cpu_reg(s, rd);
3346 /* Suppress the zero-extend for !sf. Since RI and SI are constrained
3347 to be smaller than bitsize, we'll never reference data outside the
3348 low 32-bits anyway. */
3349 tcg_tmp = read_cpu_reg(s, rn, 1);
3351 /* Recognize simple(r) extractions. */
3353 /* Wd<s-r:0> = Wn<s:r> */
3354 len = (si - ri) + 1;
3355 if (opc == 0) { /* SBFM: ASR, SBFX, SXTB, SXTH, SXTW */
3356 tcg_gen_sextract_i64(tcg_rd, tcg_tmp, ri, len);
3358 } else if (opc == 2) { /* UBFM: UBFX, LSR, UXTB, UXTH */
3359 tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len);
3362 /* opc == 1, BXFIL fall through to deposit */
3363 tcg_gen_extract_i64(tcg_tmp, tcg_tmp, ri, len);
3366 /* Handle the ri > si case with a deposit
3367 * Wd<32+s-r,32-r> = Wn<s:0>
3370 pos = (bitsize - ri) & (bitsize - 1);
3373 if (opc == 0 && len < ri) {
3374 /* SBFM: sign extend the destination field from len to fill
3375 the balance of the word. Let the deposit below insert all
3376 of those sign bits. */
3377 tcg_gen_sextract_i64(tcg_tmp, tcg_tmp, 0, len);
3381 if (opc == 1) { /* BFM, BXFIL */
3382 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
3384 /* SBFM or UBFM: We start with zero, and we haven't modified
3385 any bits outside bitsize, therefore the zero-extension
3386 below is unneeded. */
3387 tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
3392 if (!sf) { /* zero extend final result */
3393 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3398 * 31 30 29 28 23 22 21 20 16 15 10 9 5 4 0
3399 * +----+------+-------------+---+----+------+--------+------+------+
3400 * | sf | op21 | 1 0 0 1 1 1 | N | o0 | Rm | imms | Rn | Rd |
3401 * +----+------+-------------+---+----+------+--------+------+------+
3403 static void disas_extract(DisasContext *s, uint32_t insn)
3405 unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0;
3407 sf = extract32(insn, 31, 1);
3408 n = extract32(insn, 22, 1);
3409 rm = extract32(insn, 16, 5);
3410 imm = extract32(insn, 10, 6);
3411 rn = extract32(insn, 5, 5);
3412 rd = extract32(insn, 0, 5);
3413 op21 = extract32(insn, 29, 2);
3414 op0 = extract32(insn, 21, 1);
3415 bitsize = sf ? 64 : 32;
3417 if (sf != n || op21 || op0 || imm >= bitsize) {
3418 unallocated_encoding(s);
3420 TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
3422 tcg_rd = cpu_reg(s, rd);
3424 if (unlikely(imm == 0)) {
3425 /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
3426 * so an extract from bit 0 is a special case.
3429 tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm));
3431 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
3433 } else if (rm == rn) { /* ROR */
3434 tcg_rm = cpu_reg(s, rm);
3436 tcg_gen_rotri_i64(tcg_rd, tcg_rm, imm);
3438 TCGv_i32 tmp = tcg_temp_new_i32();
3439 tcg_gen_extrl_i64_i32(tmp, tcg_rm);
3440 tcg_gen_rotri_i32(tmp, tmp, imm);
3441 tcg_gen_extu_i32_i64(tcg_rd, tmp);
3442 tcg_temp_free_i32(tmp);
3445 tcg_rm = read_cpu_reg(s, rm, sf);
3446 tcg_rn = read_cpu_reg(s, rn, sf);
3447 tcg_gen_shri_i64(tcg_rm, tcg_rm, imm);
3448 tcg_gen_shli_i64(tcg_rn, tcg_rn, bitsize - imm);
3449 tcg_gen_or_i64(tcg_rd, tcg_rm, tcg_rn);
3451 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3457 /* Data processing - immediate */
3458 static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
3460 switch (extract32(insn, 23, 6)) {
3461 case 0x20: case 0x21: /* PC-rel. addressing */
3462 disas_pc_rel_adr(s, insn);
3464 case 0x22: case 0x23: /* Add/subtract (immediate) */
3465 disas_add_sub_imm(s, insn);
3467 case 0x24: /* Logical (immediate) */
3468 disas_logic_imm(s, insn);
3470 case 0x25: /* Move wide (immediate) */
3471 disas_movw_imm(s, insn);
3473 case 0x26: /* Bitfield */
3474 disas_bitfield(s, insn);
3476 case 0x27: /* Extract */
3477 disas_extract(s, insn);
3480 unallocated_encoding(s);
3485 /* Shift a TCGv src by TCGv shift_amount, put result in dst.
3486 * Note that it is the caller's responsibility to ensure that the
3487 * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
3488 * mandated semantics for out of range shifts.
3490 static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
3491 enum a64_shift_type shift_type, TCGv_i64 shift_amount)
3493 switch (shift_type) {
3494 case A64_SHIFT_TYPE_LSL:
3495 tcg_gen_shl_i64(dst, src, shift_amount);
3497 case A64_SHIFT_TYPE_LSR:
3498 tcg_gen_shr_i64(dst, src, shift_amount);
3500 case A64_SHIFT_TYPE_ASR:
3502 tcg_gen_ext32s_i64(dst, src);
3504 tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
3506 case A64_SHIFT_TYPE_ROR:
3508 tcg_gen_rotr_i64(dst, src, shift_amount);
3511 t0 = tcg_temp_new_i32();
3512 t1 = tcg_temp_new_i32();
3513 tcg_gen_extrl_i64_i32(t0, src);
3514 tcg_gen_extrl_i64_i32(t1, shift_amount);
3515 tcg_gen_rotr_i32(t0, t0, t1);
3516 tcg_gen_extu_i32_i64(dst, t0);
3517 tcg_temp_free_i32(t0);
3518 tcg_temp_free_i32(t1);
3522 assert(FALSE); /* all shift types should be handled */
3526 if (!sf) { /* zero extend final result */
3527 tcg_gen_ext32u_i64(dst, dst);
3531 /* Shift a TCGv src by immediate, put result in dst.
3532 * The shift amount must be in range (this should always be true as the
3533 * relevant instructions will UNDEF on bad shift immediates).
3535 static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
3536 enum a64_shift_type shift_type, unsigned int shift_i)
3538 assert(shift_i < (sf ? 64 : 32));
3541 tcg_gen_mov_i64(dst, src);
3543 TCGv_i64 shift_const;
3545 shift_const = tcg_const_i64(shift_i);
3546 shift_reg(dst, src, sf, shift_type, shift_const);
3547 tcg_temp_free_i64(shift_const);
3551 /* Logical (shifted register)
3552 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
3553 * +----+-----+-----------+-------+---+------+--------+------+------+
3554 * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd |
3555 * +----+-----+-----------+-------+---+------+--------+------+------+
3557 static void disas_logic_reg(DisasContext *s, uint32_t insn)
3559 TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
3560 unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
3562 sf = extract32(insn, 31, 1);
3563 opc = extract32(insn, 29, 2);
3564 shift_type = extract32(insn, 22, 2);
3565 invert = extract32(insn, 21, 1);
3566 rm = extract32(insn, 16, 5);
3567 shift_amount = extract32(insn, 10, 6);
3568 rn = extract32(insn, 5, 5);
3569 rd = extract32(insn, 0, 5);
3571 if (!sf && (shift_amount & (1 << 5))) {
3572 unallocated_encoding(s);
3576 tcg_rd = cpu_reg(s, rd);
3578 if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
3579 /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
3580 * register-register MOV and MVN, so it is worth special casing.
3582 tcg_rm = cpu_reg(s, rm);
3584 tcg_gen_not_i64(tcg_rd, tcg_rm);
3586 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3590 tcg_gen_mov_i64(tcg_rd, tcg_rm);
3592 tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
3598 tcg_rm = read_cpu_reg(s, rm, sf);
3601 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
3604 tcg_rn = cpu_reg(s, rn);
3606 switch (opc | (invert << 2)) {
3609 tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
3612 tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
3615 tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
3619 tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
3622 tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
3625 tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
3633 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3637 gen_logic_CC(sf, tcg_rd);
3642 * Add/subtract (extended register)
3644 * 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0|
3645 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
3646 * |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd |
3647 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
3649 * sf: 0 -> 32bit, 1 -> 64bit
3650 * op: 0 -> add , 1 -> sub
3653 * option: extension type (see DecodeRegExtend)
3654 * imm3: optional shift to Rm
3656 * Rd = Rn + LSL(extend(Rm), amount)
3658 static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
3660 int rd = extract32(insn, 0, 5);
3661 int rn = extract32(insn, 5, 5);
3662 int imm3 = extract32(insn, 10, 3);
3663 int option = extract32(insn, 13, 3);
3664 int rm = extract32(insn, 16, 5);
3665 bool setflags = extract32(insn, 29, 1);
3666 bool sub_op = extract32(insn, 30, 1);
3667 bool sf = extract32(insn, 31, 1);
3669 TCGv_i64 tcg_rm, tcg_rn; /* temps */
3671 TCGv_i64 tcg_result;
3674 unallocated_encoding(s);
3678 /* non-flag setting ops may use SP */
3680 tcg_rd = cpu_reg_sp(s, rd);
3682 tcg_rd = cpu_reg(s, rd);
3684 tcg_rn = read_cpu_reg_sp(s, rn, sf);
3686 tcg_rm = read_cpu_reg(s, rm, sf);
3687 ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
3689 tcg_result = tcg_temp_new_i64();
3693 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
3695 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
3699 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
3701 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
3706 tcg_gen_mov_i64(tcg_rd, tcg_result);
3708 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
3711 tcg_temp_free_i64(tcg_result);
3715 * Add/subtract (shifted register)
3717 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
3718 * +--+--+--+-----------+-----+--+-------+---------+------+------+
3719 * |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd |
3720 * +--+--+--+-----------+-----+--+-------+---------+------+------+
3722 * sf: 0 -> 32bit, 1 -> 64bit
3723 * op: 0 -> add , 1 -> sub
3725 * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
3726 * imm6: Shift amount to apply to Rm before the add/sub
3728 static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
3730 int rd = extract32(insn, 0, 5);
3731 int rn = extract32(insn, 5, 5);
3732 int imm6 = extract32(insn, 10, 6);
3733 int rm = extract32(insn, 16, 5);
3734 int shift_type = extract32(insn, 22, 2);
3735 bool setflags = extract32(insn, 29, 1);
3736 bool sub_op = extract32(insn, 30, 1);
3737 bool sf = extract32(insn, 31, 1);
3739 TCGv_i64 tcg_rd = cpu_reg(s, rd);
3740 TCGv_i64 tcg_rn, tcg_rm;
3741 TCGv_i64 tcg_result;
3743 if ((shift_type == 3) || (!sf && (imm6 > 31))) {
3744 unallocated_encoding(s);
3748 tcg_rn = read_cpu_reg(s, rn, sf);
3749 tcg_rm = read_cpu_reg(s, rm, sf);
3751 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
3753 tcg_result = tcg_temp_new_i64();
3757 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
3759 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
3763 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
3765 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
3770 tcg_gen_mov_i64(tcg_rd, tcg_result);
3772 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
3775 tcg_temp_free_i64(tcg_result);
3778 /* Data-processing (3 source)
3780 * 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0
3781 * +--+------+-----------+------+------+----+------+------+------+
3782 * |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd |
3783 * +--+------+-----------+------+------+----+------+------+------+
3785 static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
3787 int rd = extract32(insn, 0, 5);
3788 int rn = extract32(insn, 5, 5);
3789 int ra = extract32(insn, 10, 5);
3790 int rm = extract32(insn, 16, 5);
3791 int op_id = (extract32(insn, 29, 3) << 4) |
3792 (extract32(insn, 21, 3) << 1) |
3793 extract32(insn, 15, 1);
3794 bool sf = extract32(insn, 31, 1);
3795 bool is_sub = extract32(op_id, 0, 1);
3796 bool is_high = extract32(op_id, 2, 1);
3797 bool is_signed = false;
3802 /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
3804 case 0x42: /* SMADDL */
3805 case 0x43: /* SMSUBL */
3806 case 0x44: /* SMULH */
3809 case 0x0: /* MADD (32bit) */
3810 case 0x1: /* MSUB (32bit) */
3811 case 0x40: /* MADD (64bit) */
3812 case 0x41: /* MSUB (64bit) */
3813 case 0x4a: /* UMADDL */
3814 case 0x4b: /* UMSUBL */
3815 case 0x4c: /* UMULH */
3818 unallocated_encoding(s);
3823 TCGv_i64 low_bits = tcg_temp_new_i64(); /* low bits discarded */
3824 TCGv_i64 tcg_rd = cpu_reg(s, rd);
3825 TCGv_i64 tcg_rn = cpu_reg(s, rn);
3826 TCGv_i64 tcg_rm = cpu_reg(s, rm);
3829 tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
3831 tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
3834 tcg_temp_free_i64(low_bits);
3838 tcg_op1 = tcg_temp_new_i64();
3839 tcg_op2 = tcg_temp_new_i64();
3840 tcg_tmp = tcg_temp_new_i64();
3843 tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
3844 tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
3847 tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
3848 tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
3850 tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
3851 tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
3855 if (ra == 31 && !is_sub) {
3856 /* Special-case MADD with rA == XZR; it is the standard MUL alias */
3857 tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
3859 tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
3861 tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
3863 tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
3868 tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
3871 tcg_temp_free_i64(tcg_op1);
3872 tcg_temp_free_i64(tcg_op2);
3873 tcg_temp_free_i64(tcg_tmp);
3876 /* Add/subtract (with carry)
3877 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0
3878 * +--+--+--+------------------------+------+---------+------+-----+
3879 * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | opcode2 | Rn | Rd |
3880 * +--+--+--+------------------------+------+---------+------+-----+
3884 static void disas_adc_sbc(DisasContext *s, uint32_t insn)
3886 unsigned int sf, op, setflags, rm, rn, rd;
3887 TCGv_i64 tcg_y, tcg_rn, tcg_rd;
3889 if (extract32(insn, 10, 6) != 0) {
3890 unallocated_encoding(s);
3894 sf = extract32(insn, 31, 1);
3895 op = extract32(insn, 30, 1);
3896 setflags = extract32(insn, 29, 1);
3897 rm = extract32(insn, 16, 5);
3898 rn = extract32(insn, 5, 5);
3899 rd = extract32(insn, 0, 5);
3901 tcg_rd = cpu_reg(s, rd);
3902 tcg_rn = cpu_reg(s, rn);
3905 tcg_y = new_tmp_a64(s);
3906 tcg_gen_not_i64(tcg_y, cpu_reg(s, rm));
3908 tcg_y = cpu_reg(s, rm);
3912 gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y);
3914 gen_adc(sf, tcg_rd, tcg_rn, tcg_y);
3918 /* Conditional compare (immediate / register)
3919 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
3920 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
3921 * |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv |
3922 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
3925 static void disas_cc(DisasContext *s, uint32_t insn)
3927 unsigned int sf, op, y, cond, rn, nzcv, is_imm;
3928 TCGv_i32 tcg_t0, tcg_t1, tcg_t2;
3929 TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
3932 if (!extract32(insn, 29, 1)) {
3933 unallocated_encoding(s);
3936 if (insn & (1 << 10 | 1 << 4)) {
3937 unallocated_encoding(s);
3940 sf = extract32(insn, 31, 1);
3941 op = extract32(insn, 30, 1);
3942 is_imm = extract32(insn, 11, 1);
3943 y = extract32(insn, 16, 5); /* y = rm (reg) or imm5 (imm) */
3944 cond = extract32(insn, 12, 4);
3945 rn = extract32(insn, 5, 5);
3946 nzcv = extract32(insn, 0, 4);
3948 /* Set T0 = !COND. */
3949 tcg_t0 = tcg_temp_new_i32();
3950 arm_test_cc(&c, cond);
3951 tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0);
3954 /* Load the arguments for the new comparison. */
3956 tcg_y = new_tmp_a64(s);
3957 tcg_gen_movi_i64(tcg_y, y);
3959 tcg_y = cpu_reg(s, y);
3961 tcg_rn = cpu_reg(s, rn);
3963 /* Set the flags for the new comparison. */
3964 tcg_tmp = tcg_temp_new_i64();
3966 gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
3968 gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y);
3970 tcg_temp_free_i64(tcg_tmp);
3972 /* If COND was false, force the flags to #nzcv. Compute two masks
3973 * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0).
3974 * For tcg hosts that support ANDC, we can make do with just T1.
3975 * In either case, allow the tcg optimizer to delete any unused mask.
3977 tcg_t1 = tcg_temp_new_i32();
3978 tcg_t2 = tcg_temp_new_i32();
3979 tcg_gen_neg_i32(tcg_t1, tcg_t0);
3980 tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
3982 if (nzcv & 8) { /* N */
3983 tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
3985 if (TCG_TARGET_HAS_andc_i32) {
3986 tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1);
3988 tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2);
3991 if (nzcv & 4) { /* Z */
3992 if (TCG_TARGET_HAS_andc_i32) {
3993 tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1);
3995 tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2);
3998 tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0);
4000 if (nzcv & 2) { /* C */
4001 tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0);
4003 if (TCG_TARGET_HAS_andc_i32) {
4004 tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1);
4006 tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2);
4009 if (nzcv & 1) { /* V */
4010 tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1);
4012 if (TCG_TARGET_HAS_andc_i32) {
4013 tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1);
4015 tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2);
4018 tcg_temp_free_i32(tcg_t0);
4019 tcg_temp_free_i32(tcg_t1);
4020 tcg_temp_free_i32(tcg_t2);
4023 /* Conditional select
4024 * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0
4025 * +----+----+---+-----------------+------+------+-----+------+------+
4026 * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd |
4027 * +----+----+---+-----------------+------+------+-----+------+------+
4029 static void disas_cond_select(DisasContext *s, uint32_t insn)
4031 unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
4032 TCGv_i64 tcg_rd, zero;
4035 if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
4036 /* S == 1 or op2<1> == 1 */
4037 unallocated_encoding(s);
4040 sf = extract32(insn, 31, 1);
4041 else_inv = extract32(insn, 30, 1);
4042 rm = extract32(insn, 16, 5);
4043 cond = extract32(insn, 12, 4);
4044 else_inc = extract32(insn, 10, 1);
4045 rn = extract32(insn, 5, 5);
4046 rd = extract32(insn, 0, 5);
4048 tcg_rd = cpu_reg(s, rd);
4050 a64_test_cc(&c, cond);
4051 zero = tcg_const_i64(0);
4053 if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) {
4055 tcg_gen_setcond_i64(tcg_invert_cond(c.cond), tcg_rd, c.value, zero);
4057 tcg_gen_neg_i64(tcg_rd, tcg_rd);
4060 TCGv_i64 t_true = cpu_reg(s, rn);
4061 TCGv_i64 t_false = read_cpu_reg(s, rm, 1);
4062 if (else_inv && else_inc) {
4063 tcg_gen_neg_i64(t_false, t_false);
4064 } else if (else_inv) {
4065 tcg_gen_not_i64(t_false, t_false);
4066 } else if (else_inc) {
4067 tcg_gen_addi_i64(t_false, t_false, 1);
4069 tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false);
4072 tcg_temp_free_i64(zero);
4076 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4080 static void handle_clz(DisasContext *s, unsigned int sf,
4081 unsigned int rn, unsigned int rd)
4083 TCGv_i64 tcg_rd, tcg_rn;
4084 tcg_rd = cpu_reg(s, rd);
4085 tcg_rn = cpu_reg(s, rn);
4088 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
4090 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4091 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4092 tcg_gen_clzi_i32(tcg_tmp32, tcg_tmp32, 32);
4093 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4094 tcg_temp_free_i32(tcg_tmp32);
4098 static void handle_cls(DisasContext *s, unsigned int sf,
4099 unsigned int rn, unsigned int rd)
4101 TCGv_i64 tcg_rd, tcg_rn;
4102 tcg_rd = cpu_reg(s, rd);
4103 tcg_rn = cpu_reg(s, rn);
4106 tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
4108 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4109 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4110 tcg_gen_clrsb_i32(tcg_tmp32, tcg_tmp32);
4111 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4112 tcg_temp_free_i32(tcg_tmp32);
4116 static void handle_rbit(DisasContext *s, unsigned int sf,
4117 unsigned int rn, unsigned int rd)
4119 TCGv_i64 tcg_rd, tcg_rn;
4120 tcg_rd = cpu_reg(s, rd);
4121 tcg_rn = cpu_reg(s, rn);
4124 gen_helper_rbit64(tcg_rd, tcg_rn);
4126 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4127 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4128 gen_helper_rbit(tcg_tmp32, tcg_tmp32);
4129 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4130 tcg_temp_free_i32(tcg_tmp32);
4134 /* REV with sf==1, opcode==3 ("REV64") */
4135 static void handle_rev64(DisasContext *s, unsigned int sf,
4136 unsigned int rn, unsigned int rd)
4139 unallocated_encoding(s);
4142 tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
4145 /* REV with sf==0, opcode==2
4146 * REV32 (sf==1, opcode==2)
4148 static void handle_rev32(DisasContext *s, unsigned int sf,
4149 unsigned int rn, unsigned int rd)
4151 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4154 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
4155 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
4157 /* bswap32_i64 requires zero high word */
4158 tcg_gen_ext32u_i64(tcg_tmp, tcg_rn);
4159 tcg_gen_bswap32_i64(tcg_rd, tcg_tmp);
4160 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
4161 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
4162 tcg_gen_concat32_i64(tcg_rd, tcg_rd, tcg_tmp);
4164 tcg_temp_free_i64(tcg_tmp);
4166 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rn));
4167 tcg_gen_bswap32_i64(tcg_rd, tcg_rd);
4171 /* REV16 (opcode==1) */
4172 static void handle_rev16(DisasContext *s, unsigned int sf,
4173 unsigned int rn, unsigned int rd)
4175 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4176 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
4177 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
4178 TCGv_i64 mask = tcg_const_i64(sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff);
4180 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 8);
4181 tcg_gen_and_i64(tcg_rd, tcg_rn, mask);
4182 tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask);
4183 tcg_gen_shli_i64(tcg_rd, tcg_rd, 8);
4184 tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp);
4186 tcg_temp_free_i64(mask);
4187 tcg_temp_free_i64(tcg_tmp);
4190 /* Data-processing (1 source)
4191 * 31 30 29 28 21 20 16 15 10 9 5 4 0
4192 * +----+---+---+-----------------+---------+--------+------+------+
4193 * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd |
4194 * +----+---+---+-----------------+---------+--------+------+------+
4196 static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
4198 unsigned int sf, opcode, rn, rd;
4200 if (extract32(insn, 29, 1) || extract32(insn, 16, 5)) {
4201 unallocated_encoding(s);
4205 sf = extract32(insn, 31, 1);
4206 opcode = extract32(insn, 10, 6);
4207 rn = extract32(insn, 5, 5);
4208 rd = extract32(insn, 0, 5);
4212 handle_rbit(s, sf, rn, rd);
4215 handle_rev16(s, sf, rn, rd);
4218 handle_rev32(s, sf, rn, rd);
4221 handle_rev64(s, sf, rn, rd);
4224 handle_clz(s, sf, rn, rd);
4227 handle_cls(s, sf, rn, rd);
4232 static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
4233 unsigned int rm, unsigned int rn, unsigned int rd)
4235 TCGv_i64 tcg_n, tcg_m, tcg_rd;
4236 tcg_rd = cpu_reg(s, rd);
4238 if (!sf && is_signed) {
4239 tcg_n = new_tmp_a64(s);
4240 tcg_m = new_tmp_a64(s);
4241 tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
4242 tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
4244 tcg_n = read_cpu_reg(s, rn, sf);
4245 tcg_m = read_cpu_reg(s, rm, sf);
4249 gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
4251 gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
4254 if (!sf) { /* zero extend final result */
4255 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4259 /* LSLV, LSRV, ASRV, RORV */
4260 static void handle_shift_reg(DisasContext *s,
4261 enum a64_shift_type shift_type, unsigned int sf,
4262 unsigned int rm, unsigned int rn, unsigned int rd)
4264 TCGv_i64 tcg_shift = tcg_temp_new_i64();
4265 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4266 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
4268 tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
4269 shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
4270 tcg_temp_free_i64(tcg_shift);
4273 /* CRC32[BHWX], CRC32C[BHWX] */
4274 static void handle_crc32(DisasContext *s,
4275 unsigned int sf, unsigned int sz, bool crc32c,
4276 unsigned int rm, unsigned int rn, unsigned int rd)
4278 TCGv_i64 tcg_acc, tcg_val;
4281 if (!arm_dc_feature(s, ARM_FEATURE_CRC)
4282 || (sf == 1 && sz != 3)
4283 || (sf == 0 && sz == 3)) {
4284 unallocated_encoding(s);
4289 tcg_val = cpu_reg(s, rm);
4303 g_assert_not_reached();
4305 tcg_val = new_tmp_a64(s);
4306 tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask);
4309 tcg_acc = cpu_reg(s, rn);
4310 tcg_bytes = tcg_const_i32(1 << sz);
4313 gen_helper_crc32c_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
4315 gen_helper_crc32_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
4318 tcg_temp_free_i32(tcg_bytes);
4321 /* Data-processing (2 source)
4322 * 31 30 29 28 21 20 16 15 10 9 5 4 0
4323 * +----+---+---+-----------------+------+--------+------+------+
4324 * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd |
4325 * +----+---+---+-----------------+------+--------+------+------+
4327 static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
4329 unsigned int sf, rm, opcode, rn, rd;
4330 sf = extract32(insn, 31, 1);
4331 rm = extract32(insn, 16, 5);
4332 opcode = extract32(insn, 10, 6);
4333 rn = extract32(insn, 5, 5);
4334 rd = extract32(insn, 0, 5);
4336 if (extract32(insn, 29, 1)) {
4337 unallocated_encoding(s);
4343 handle_div(s, false, sf, rm, rn, rd);
4346 handle_div(s, true, sf, rm, rn, rd);
4349 handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
4352 handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
4355 handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
4358 handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
4367 case 23: /* CRC32 */
4369 int sz = extract32(opcode, 0, 2);
4370 bool crc32c = extract32(opcode, 2, 1);
4371 handle_crc32(s, sf, sz, crc32c, rm, rn, rd);
4375 unallocated_encoding(s);
4380 /* Data processing - register */
4381 static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
4383 switch (extract32(insn, 24, 5)) {
4384 case 0x0a: /* Logical (shifted register) */
4385 disas_logic_reg(s, insn);
4387 case 0x0b: /* Add/subtract */
4388 if (insn & (1 << 21)) { /* (extended register) */
4389 disas_add_sub_ext_reg(s, insn);
4391 disas_add_sub_reg(s, insn);
4394 case 0x1b: /* Data-processing (3 source) */
4395 disas_data_proc_3src(s, insn);
4398 switch (extract32(insn, 21, 3)) {
4399 case 0x0: /* Add/subtract (with carry) */
4400 disas_adc_sbc(s, insn);
4402 case 0x2: /* Conditional compare */
4403 disas_cc(s, insn); /* both imm and reg forms */
4405 case 0x4: /* Conditional select */
4406 disas_cond_select(s, insn);
4408 case 0x6: /* Data-processing */
4409 if (insn & (1 << 30)) { /* (1 source) */
4410 disas_data_proc_1src(s, insn);
4411 } else { /* (2 source) */
4412 disas_data_proc_2src(s, insn);
4416 unallocated_encoding(s);
4421 unallocated_encoding(s);
4426 static void handle_fp_compare(DisasContext *s, bool is_double,
4427 unsigned int rn, unsigned int rm,
4428 bool cmp_with_zero, bool signal_all_nans)
4430 TCGv_i64 tcg_flags = tcg_temp_new_i64();
4431 TCGv_ptr fpst = get_fpstatus_ptr(false);
4434 TCGv_i64 tcg_vn, tcg_vm;
4436 tcg_vn = read_fp_dreg(s, rn);
4437 if (cmp_with_zero) {
4438 tcg_vm = tcg_const_i64(0);
4440 tcg_vm = read_fp_dreg(s, rm);
4442 if (signal_all_nans) {
4443 gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4445 gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4447 tcg_temp_free_i64(tcg_vn);
4448 tcg_temp_free_i64(tcg_vm);
4450 TCGv_i32 tcg_vn, tcg_vm;
4452 tcg_vn = read_fp_sreg(s, rn);
4453 if (cmp_with_zero) {
4454 tcg_vm = tcg_const_i32(0);
4456 tcg_vm = read_fp_sreg(s, rm);
4458 if (signal_all_nans) {
4459 gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4461 gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4463 tcg_temp_free_i32(tcg_vn);
4464 tcg_temp_free_i32(tcg_vm);
4467 tcg_temp_free_ptr(fpst);
4469 gen_set_nzcv(tcg_flags);
4471 tcg_temp_free_i64(tcg_flags);
4474 /* Floating point compare
4475 * 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0
4476 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
4477 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 |
4478 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
4480 static void disas_fp_compare(DisasContext *s, uint32_t insn)
4482 unsigned int mos, type, rm, op, rn, opc, op2r;
4484 mos = extract32(insn, 29, 3);
4485 type = extract32(insn, 22, 2); /* 0 = single, 1 = double */
4486 rm = extract32(insn, 16, 5);
4487 op = extract32(insn, 14, 2);
4488 rn = extract32(insn, 5, 5);
4489 opc = extract32(insn, 3, 2);
4490 op2r = extract32(insn, 0, 3);
4492 if (mos || op || op2r || type > 1) {
4493 unallocated_encoding(s);
4497 if (!fp_access_check(s)) {
4501 handle_fp_compare(s, type, rn, rm, opc & 1, opc & 2);
4504 /* Floating point conditional compare
4505 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
4506 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
4507 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv |
4508 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
4510 static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
4512 unsigned int mos, type, rm, cond, rn, op, nzcv;
4514 TCGLabel *label_continue = NULL;
4516 mos = extract32(insn, 29, 3);
4517 type = extract32(insn, 22, 2); /* 0 = single, 1 = double */
4518 rm = extract32(insn, 16, 5);
4519 cond = extract32(insn, 12, 4);
4520 rn = extract32(insn, 5, 5);
4521 op = extract32(insn, 4, 1);
4522 nzcv = extract32(insn, 0, 4);
4524 if (mos || type > 1) {
4525 unallocated_encoding(s);
4529 if (!fp_access_check(s)) {
4533 if (cond < 0x0e) { /* not always */
4534 TCGLabel *label_match = gen_new_label();
4535 label_continue = gen_new_label();
4536 arm_gen_test_cc(cond, label_match);
4538 tcg_flags = tcg_const_i64(nzcv << 28);
4539 gen_set_nzcv(tcg_flags);
4540 tcg_temp_free_i64(tcg_flags);
4541 tcg_gen_br(label_continue);
4542 gen_set_label(label_match);
4545 handle_fp_compare(s, type, rn, rm, false, op);
4548 gen_set_label(label_continue);
4552 /* Floating point conditional select
4553 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
4554 * +---+---+---+-----------+------+---+------+------+-----+------+------+
4555 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 1 1 | Rn | Rd |
4556 * +---+---+---+-----------+------+---+------+------+-----+------+------+
4558 static void disas_fp_csel(DisasContext *s, uint32_t insn)
4560 unsigned int mos, type, rm, cond, rn, rd;
4561 TCGv_i64 t_true, t_false, t_zero;
4564 mos = extract32(insn, 29, 3);
4565 type = extract32(insn, 22, 2); /* 0 = single, 1 = double */
4566 rm = extract32(insn, 16, 5);
4567 cond = extract32(insn, 12, 4);
4568 rn = extract32(insn, 5, 5);
4569 rd = extract32(insn, 0, 5);
4571 if (mos || type > 1) {
4572 unallocated_encoding(s);
4576 if (!fp_access_check(s)) {
4580 /* Zero extend sreg inputs to 64 bits now. */
4581 t_true = tcg_temp_new_i64();
4582 t_false = tcg_temp_new_i64();
4583 read_vec_element(s, t_true, rn, 0, type ? MO_64 : MO_32);
4584 read_vec_element(s, t_false, rm, 0, type ? MO_64 : MO_32);
4586 a64_test_cc(&c, cond);
4587 t_zero = tcg_const_i64(0);
4588 tcg_gen_movcond_i64(c.cond, t_true, c.value, t_zero, t_true, t_false);
4589 tcg_temp_free_i64(t_zero);
4590 tcg_temp_free_i64(t_false);
4593 /* Note that sregs write back zeros to the high bits,
4594 and we've already done the zero-extension. */
4595 write_fp_dreg(s, rd, t_true);
4596 tcg_temp_free_i64(t_true);
4599 /* Floating-point data-processing (1 source) - half precision */
4600 static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn)
4602 TCGv_ptr fpst = NULL;
4603 TCGv_i32 tcg_op = tcg_temp_new_i32();
4604 TCGv_i32 tcg_res = tcg_temp_new_i32();
4606 read_vec_element_i32(s, tcg_op, rn, 0, MO_16);
4609 case 0x0: /* FMOV */
4610 tcg_gen_mov_i32(tcg_res, tcg_op);
4612 case 0x1: /* FABS */
4613 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
4615 case 0x2: /* FNEG */
4616 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
4618 case 0x3: /* FSQRT */
4619 gen_helper_sqrt_f16(tcg_res, tcg_op, cpu_env);
4621 case 0x8: /* FRINTN */
4622 case 0x9: /* FRINTP */
4623 case 0xa: /* FRINTM */
4624 case 0xb: /* FRINTZ */
4625 case 0xc: /* FRINTA */
4627 TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
4628 fpst = get_fpstatus_ptr(true);
4630 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
4631 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
4633 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
4634 tcg_temp_free_i32(tcg_rmode);
4637 case 0xe: /* FRINTX */
4638 fpst = get_fpstatus_ptr(true);
4639 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, fpst);
4641 case 0xf: /* FRINTI */
4642 fpst = get_fpstatus_ptr(true);
4643 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
4649 write_fp_sreg(s, rd, tcg_res);
4652 tcg_temp_free_ptr(fpst);
4654 tcg_temp_free_i32(tcg_op);
4655 tcg_temp_free_i32(tcg_res);
4658 /* Floating-point data-processing (1 source) - single precision */
4659 static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
4665 fpst = get_fpstatus_ptr(false);
4666 tcg_op = read_fp_sreg(s, rn);
4667 tcg_res = tcg_temp_new_i32();
4670 case 0x0: /* FMOV */
4671 tcg_gen_mov_i32(tcg_res, tcg_op);
4673 case 0x1: /* FABS */
4674 gen_helper_vfp_abss(tcg_res, tcg_op);
4676 case 0x2: /* FNEG */
4677 gen_helper_vfp_negs(tcg_res, tcg_op);
4679 case 0x3: /* FSQRT */
4680 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
4682 case 0x8: /* FRINTN */
4683 case 0x9: /* FRINTP */
4684 case 0xa: /* FRINTM */
4685 case 0xb: /* FRINTZ */
4686 case 0xc: /* FRINTA */
4688 TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
4690 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
4691 gen_helper_rints(tcg_res, tcg_op, fpst);
4693 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
4694 tcg_temp_free_i32(tcg_rmode);
4697 case 0xe: /* FRINTX */
4698 gen_helper_rints_exact(tcg_res, tcg_op, fpst);
4700 case 0xf: /* FRINTI */
4701 gen_helper_rints(tcg_res, tcg_op, fpst);
4707 write_fp_sreg(s, rd, tcg_res);
4709 tcg_temp_free_ptr(fpst);
4710 tcg_temp_free_i32(tcg_op);
4711 tcg_temp_free_i32(tcg_res);
4714 /* Floating-point data-processing (1 source) - double precision */
4715 static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
4722 case 0x0: /* FMOV */
4723 gen_gvec_fn2(s, false, rd, rn, tcg_gen_gvec_mov, 0);
4727 fpst = get_fpstatus_ptr(false);
4728 tcg_op = read_fp_dreg(s, rn);
4729 tcg_res = tcg_temp_new_i64();
4732 case 0x1: /* FABS */
4733 gen_helper_vfp_absd(tcg_res, tcg_op);
4735 case 0x2: /* FNEG */
4736 gen_helper_vfp_negd(tcg_res, tcg_op);
4738 case 0x3: /* FSQRT */
4739 gen_helper_vfp_sqrtd(tcg_res, tcg_op, cpu_env);
4741 case 0x8: /* FRINTN */
4742 case 0x9: /* FRINTP */
4743 case 0xa: /* FRINTM */
4744 case 0xb: /* FRINTZ */
4745 case 0xc: /* FRINTA */
4747 TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
4749 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
4750 gen_helper_rintd(tcg_res, tcg_op, fpst);
4752 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
4753 tcg_temp_free_i32(tcg_rmode);
4756 case 0xe: /* FRINTX */
4757 gen_helper_rintd_exact(tcg_res, tcg_op, fpst);
4759 case 0xf: /* FRINTI */
4760 gen_helper_rintd(tcg_res, tcg_op, fpst);
4766 write_fp_dreg(s, rd, tcg_res);
4768 tcg_temp_free_ptr(fpst);
4769 tcg_temp_free_i64(tcg_op);
4770 tcg_temp_free_i64(tcg_res);
4773 static void handle_fp_fcvt(DisasContext *s, int opcode,
4774 int rd, int rn, int dtype, int ntype)
4779 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
4781 /* Single to double */
4782 TCGv_i64 tcg_rd = tcg_temp_new_i64();
4783 gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, cpu_env);
4784 write_fp_dreg(s, rd, tcg_rd);
4785 tcg_temp_free_i64(tcg_rd);
4787 /* Single to half */
4788 TCGv_i32 tcg_rd = tcg_temp_new_i32();
4789 gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, cpu_env);
4790 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
4791 write_fp_sreg(s, rd, tcg_rd);
4792 tcg_temp_free_i32(tcg_rd);
4794 tcg_temp_free_i32(tcg_rn);
4799 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
4800 TCGv_i32 tcg_rd = tcg_temp_new_i32();
4802 /* Double to single */
4803 gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, cpu_env);
4805 /* Double to half */
4806 gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, cpu_env);
4807 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
4809 write_fp_sreg(s, rd, tcg_rd);
4810 tcg_temp_free_i32(tcg_rd);
4811 tcg_temp_free_i64(tcg_rn);
4816 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
4817 tcg_gen_ext16u_i32(tcg_rn, tcg_rn);
4819 /* Half to single */
4820 TCGv_i32 tcg_rd = tcg_temp_new_i32();
4821 gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, cpu_env);
4822 write_fp_sreg(s, rd, tcg_rd);
4823 tcg_temp_free_i32(tcg_rd);
4825 /* Half to double */
4826 TCGv_i64 tcg_rd = tcg_temp_new_i64();
4827 gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, cpu_env);
4828 write_fp_dreg(s, rd, tcg_rd);
4829 tcg_temp_free_i64(tcg_rd);
4831 tcg_temp_free_i32(tcg_rn);
4839 /* Floating point data-processing (1 source)
4840 * 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0
4841 * +---+---+---+-----------+------+---+--------+-----------+------+------+
4842 * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd |
4843 * +---+---+---+-----------+------+---+--------+-----------+------+------+
4845 static void disas_fp_1src(DisasContext *s, uint32_t insn)
4847 int type = extract32(insn, 22, 2);
4848 int opcode = extract32(insn, 15, 6);
4849 int rn = extract32(insn, 5, 5);
4850 int rd = extract32(insn, 0, 5);
4853 case 0x4: case 0x5: case 0x7:
4855 /* FCVT between half, single and double precision */
4856 int dtype = extract32(opcode, 0, 2);
4857 if (type == 2 || dtype == type) {
4858 unallocated_encoding(s);
4861 if (!fp_access_check(s)) {
4865 handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
4871 /* 32-to-32 and 64-to-64 ops */
4874 if (!fp_access_check(s)) {
4878 handle_fp_1src_single(s, opcode, rd, rn);
4881 if (!fp_access_check(s)) {
4885 handle_fp_1src_double(s, opcode, rd, rn);
4888 if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
4889 unallocated_encoding(s);
4893 if (!fp_access_check(s)) {
4897 handle_fp_1src_half(s, opcode, rd, rn);
4900 unallocated_encoding(s);
4904 unallocated_encoding(s);
4909 /* Floating-point data-processing (2 source) - single precision */
4910 static void handle_fp_2src_single(DisasContext *s, int opcode,
4911 int rd, int rn, int rm)
4918 tcg_res = tcg_temp_new_i32();
4919 fpst = get_fpstatus_ptr(false);
4920 tcg_op1 = read_fp_sreg(s, rn);
4921 tcg_op2 = read_fp_sreg(s, rm);
4924 case 0x0: /* FMUL */
4925 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
4927 case 0x1: /* FDIV */
4928 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
4930 case 0x2: /* FADD */
4931 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
4933 case 0x3: /* FSUB */
4934 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
4936 case 0x4: /* FMAX */
4937 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
4939 case 0x5: /* FMIN */
4940 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
4942 case 0x6: /* FMAXNM */
4943 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
4945 case 0x7: /* FMINNM */
4946 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
4948 case 0x8: /* FNMUL */
4949 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
4950 gen_helper_vfp_negs(tcg_res, tcg_res);
4954 write_fp_sreg(s, rd, tcg_res);
4956 tcg_temp_free_ptr(fpst);
4957 tcg_temp_free_i32(tcg_op1);
4958 tcg_temp_free_i32(tcg_op2);
4959 tcg_temp_free_i32(tcg_res);
4962 /* Floating-point data-processing (2 source) - double precision */
4963 static void handle_fp_2src_double(DisasContext *s, int opcode,
4964 int rd, int rn, int rm)
4971 tcg_res = tcg_temp_new_i64();
4972 fpst = get_fpstatus_ptr(false);
4973 tcg_op1 = read_fp_dreg(s, rn);
4974 tcg_op2 = read_fp_dreg(s, rm);
4977 case 0x0: /* FMUL */
4978 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
4980 case 0x1: /* FDIV */
4981 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
4983 case 0x2: /* FADD */
4984 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
4986 case 0x3: /* FSUB */
4987 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
4989 case 0x4: /* FMAX */
4990 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
4992 case 0x5: /* FMIN */
4993 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
4995 case 0x6: /* FMAXNM */
4996 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
4998 case 0x7: /* FMINNM */
4999 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
5001 case 0x8: /* FNMUL */
5002 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
5003 gen_helper_vfp_negd(tcg_res, tcg_res);
5007 write_fp_dreg(s, rd, tcg_res);
5009 tcg_temp_free_ptr(fpst);
5010 tcg_temp_free_i64(tcg_op1);
5011 tcg_temp_free_i64(tcg_op2);
5012 tcg_temp_free_i64(tcg_res);
5015 /* Floating point data-processing (2 source)
5016 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
5017 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
5018 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | opcode | 1 0 | Rn | Rd |
5019 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
5021 static void disas_fp_2src(DisasContext *s, uint32_t insn)
5023 int type = extract32(insn, 22, 2);
5024 int rd = extract32(insn, 0, 5);
5025 int rn = extract32(insn, 5, 5);
5026 int rm = extract32(insn, 16, 5);
5027 int opcode = extract32(insn, 12, 4);
5030 unallocated_encoding(s);
5036 if (!fp_access_check(s)) {
5039 handle_fp_2src_single(s, opcode, rd, rn, rm);
5042 if (!fp_access_check(s)) {
5045 handle_fp_2src_double(s, opcode, rd, rn, rm);
5048 unallocated_encoding(s);
5052 /* Floating-point data-processing (3 source) - single precision */
5053 static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1,
5054 int rd, int rn, int rm, int ra)
5056 TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
5057 TCGv_i32 tcg_res = tcg_temp_new_i32();
5058 TCGv_ptr fpst = get_fpstatus_ptr(false);
5060 tcg_op1 = read_fp_sreg(s, rn);
5061 tcg_op2 = read_fp_sreg(s, rm);
5062 tcg_op3 = read_fp_sreg(s, ra);
5064 /* These are fused multiply-add, and must be done as one
5065 * floating point operation with no rounding between the
5066 * multiplication and addition steps.
5067 * NB that doing the negations here as separate steps is
5068 * correct : an input NaN should come out with its sign bit
5069 * flipped if it is a negated-input.
5072 gen_helper_vfp_negs(tcg_op3, tcg_op3);
5076 gen_helper_vfp_negs(tcg_op1, tcg_op1);
5079 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
5081 write_fp_sreg(s, rd, tcg_res);
5083 tcg_temp_free_ptr(fpst);
5084 tcg_temp_free_i32(tcg_op1);
5085 tcg_temp_free_i32(tcg_op2);
5086 tcg_temp_free_i32(tcg_op3);
5087 tcg_temp_free_i32(tcg_res);
5090 /* Floating-point data-processing (3 source) - double precision */
5091 static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1,
5092 int rd, int rn, int rm, int ra)
5094 TCGv_i64 tcg_op1, tcg_op2, tcg_op3;
5095 TCGv_i64 tcg_res = tcg_temp_new_i64();
5096 TCGv_ptr fpst = get_fpstatus_ptr(false);
5098 tcg_op1 = read_fp_dreg(s, rn);
5099 tcg_op2 = read_fp_dreg(s, rm);
5100 tcg_op3 = read_fp_dreg(s, ra);
5102 /* These are fused multiply-add, and must be done as one
5103 * floating point operation with no rounding between the
5104 * multiplication and addition steps.
5105 * NB that doing the negations here as separate steps is
5106 * correct : an input NaN should come out with its sign bit
5107 * flipped if it is a negated-input.
5110 gen_helper_vfp_negd(tcg_op3, tcg_op3);
5114 gen_helper_vfp_negd(tcg_op1, tcg_op1);
5117 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
5119 write_fp_dreg(s, rd, tcg_res);
5121 tcg_temp_free_ptr(fpst);
5122 tcg_temp_free_i64(tcg_op1);
5123 tcg_temp_free_i64(tcg_op2);
5124 tcg_temp_free_i64(tcg_op3);
5125 tcg_temp_free_i64(tcg_res);
5128 /* Floating point data-processing (3 source)
5129 * 31 30 29 28 24 23 22 21 20 16 15 14 10 9 5 4 0
5130 * +---+---+---+-----------+------+----+------+----+------+------+------+
5131 * | M | 0 | S | 1 1 1 1 1 | type | o1 | Rm | o0 | Ra | Rn | Rd |
5132 * +---+---+---+-----------+------+----+------+----+------+------+------+
5134 static void disas_fp_3src(DisasContext *s, uint32_t insn)
5136 int type = extract32(insn, 22, 2);
5137 int rd = extract32(insn, 0, 5);
5138 int rn = extract32(insn, 5, 5);
5139 int ra = extract32(insn, 10, 5);
5140 int rm = extract32(insn, 16, 5);
5141 bool o0 = extract32(insn, 15, 1);
5142 bool o1 = extract32(insn, 21, 1);
5146 if (!fp_access_check(s)) {
5149 handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra);
5152 if (!fp_access_check(s)) {
5155 handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
5158 unallocated_encoding(s);
5162 /* The imm8 encodes the sign bit, enough bits to represent an exponent in
5163 * the range 01....1xx to 10....0xx, and the most significant 4 bits of
5164 * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
5166 static uint64_t vfp_expand_imm(int size, uint8_t imm8)
5172 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
5173 (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) |
5174 extract32(imm8, 0, 6);
5178 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
5179 (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) |
5180 (extract32(imm8, 0, 6) << 3);
5184 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
5185 (extract32(imm8, 6, 1) ? 0x3000 : 0x4000) |
5186 (extract32(imm8, 0, 6) << 6);
5189 g_assert_not_reached();
5194 /* Floating point immediate
5195 * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
5196 * +---+---+---+-----------+------+---+------------+-------+------+------+
5197 * | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd |
5198 * +---+---+---+-----------+------+---+------------+-------+------+------+
5200 static void disas_fp_imm(DisasContext *s, uint32_t insn)
5202 int rd = extract32(insn, 0, 5);
5203 int imm8 = extract32(insn, 13, 8);
5204 int is_double = extract32(insn, 22, 2);
5208 if (is_double > 1) {
5209 unallocated_encoding(s);
5213 if (!fp_access_check(s)) {
5217 imm = vfp_expand_imm(MO_32 + is_double, imm8);
5219 tcg_res = tcg_const_i64(imm);
5220 write_fp_dreg(s, rd, tcg_res);
5221 tcg_temp_free_i64(tcg_res);
5224 /* Handle floating point <=> fixed point conversions. Note that we can
5225 * also deal with fp <=> integer conversions as a special case (scale == 64)
5226 * OPTME: consider handling that special case specially or at least skipping
5227 * the call to scalbn in the helpers for zero shifts.
5229 static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode,
5230 bool itof, int rmode, int scale, int sf, int type)
5232 bool is_signed = !(opcode & 1);
5233 bool is_double = type;
5234 TCGv_ptr tcg_fpstatus;
5237 tcg_fpstatus = get_fpstatus_ptr(false);
5239 tcg_shift = tcg_const_i32(64 - scale);
5242 TCGv_i64 tcg_int = cpu_reg(s, rn);
5244 TCGv_i64 tcg_extend = new_tmp_a64(s);
5247 tcg_gen_ext32s_i64(tcg_extend, tcg_int);
5249 tcg_gen_ext32u_i64(tcg_extend, tcg_int);
5252 tcg_int = tcg_extend;
5256 TCGv_i64 tcg_double = tcg_temp_new_i64();
5258 gen_helper_vfp_sqtod(tcg_double, tcg_int,
5259 tcg_shift, tcg_fpstatus);
5261 gen_helper_vfp_uqtod(tcg_double, tcg_int,
5262 tcg_shift, tcg_fpstatus);
5264 write_fp_dreg(s, rd, tcg_double);
5265 tcg_temp_free_i64(tcg_double);
5267 TCGv_i32 tcg_single = tcg_temp_new_i32();
5269 gen_helper_vfp_sqtos(tcg_single, tcg_int,
5270 tcg_shift, tcg_fpstatus);
5272 gen_helper_vfp_uqtos(tcg_single, tcg_int,
5273 tcg_shift, tcg_fpstatus);
5275 write_fp_sreg(s, rd, tcg_single);
5276 tcg_temp_free_i32(tcg_single);
5279 TCGv_i64 tcg_int = cpu_reg(s, rd);
5282 if (extract32(opcode, 2, 1)) {
5283 /* There are too many rounding modes to all fit into rmode,
5284 * so FCVTA[US] is a special case.
5286 rmode = FPROUNDING_TIEAWAY;
5289 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
5291 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
5294 TCGv_i64 tcg_double = read_fp_dreg(s, rn);
5297 gen_helper_vfp_tosld(tcg_int, tcg_double,
5298 tcg_shift, tcg_fpstatus);
5300 gen_helper_vfp_tosqd(tcg_int, tcg_double,
5301 tcg_shift, tcg_fpstatus);
5305 gen_helper_vfp_tould(tcg_int, tcg_double,
5306 tcg_shift, tcg_fpstatus);
5308 gen_helper_vfp_touqd(tcg_int, tcg_double,
5309 tcg_shift, tcg_fpstatus);
5312 tcg_temp_free_i64(tcg_double);
5314 TCGv_i32 tcg_single = read_fp_sreg(s, rn);
5317 gen_helper_vfp_tosqs(tcg_int, tcg_single,
5318 tcg_shift, tcg_fpstatus);
5320 gen_helper_vfp_touqs(tcg_int, tcg_single,
5321 tcg_shift, tcg_fpstatus);
5324 TCGv_i32 tcg_dest = tcg_temp_new_i32();
5326 gen_helper_vfp_tosls(tcg_dest, tcg_single,
5327 tcg_shift, tcg_fpstatus);
5329 gen_helper_vfp_touls(tcg_dest, tcg_single,
5330 tcg_shift, tcg_fpstatus);
5332 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
5333 tcg_temp_free_i32(tcg_dest);
5335 tcg_temp_free_i32(tcg_single);
5338 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
5339 tcg_temp_free_i32(tcg_rmode);
5342 tcg_gen_ext32u_i64(tcg_int, tcg_int);
5346 tcg_temp_free_ptr(tcg_fpstatus);
5347 tcg_temp_free_i32(tcg_shift);
5350 /* Floating point <-> fixed point conversions
5351 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
5352 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
5353 * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd |
5354 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
5356 static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
5358 int rd = extract32(insn, 0, 5);
5359 int rn = extract32(insn, 5, 5);
5360 int scale = extract32(insn, 10, 6);
5361 int opcode = extract32(insn, 16, 3);
5362 int rmode = extract32(insn, 19, 2);
5363 int type = extract32(insn, 22, 2);
5364 bool sbit = extract32(insn, 29, 1);
5365 bool sf = extract32(insn, 31, 1);
5368 if (sbit || (type > 1)
5369 || (!sf && scale < 32)) {
5370 unallocated_encoding(s);
5374 switch ((rmode << 3) | opcode) {
5375 case 0x2: /* SCVTF */
5376 case 0x3: /* UCVTF */
5379 case 0x18: /* FCVTZS */
5380 case 0x19: /* FCVTZU */
5384 unallocated_encoding(s);
5388 if (!fp_access_check(s)) {
5392 handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type);
5395 static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
5397 /* FMOV: gpr to or from float, double, or top half of quad fp reg,
5398 * without conversion.
5402 TCGv_i64 tcg_rn = cpu_reg(s, rn);
5408 TCGv_i64 tmp = tcg_temp_new_i64();
5409 tcg_gen_ext32u_i64(tmp, tcg_rn);
5410 tcg_gen_st_i64(tmp, cpu_env, fp_reg_offset(s, rd, MO_64));
5411 tcg_gen_movi_i64(tmp, 0);
5412 tcg_gen_st_i64(tmp, cpu_env, fp_reg_hi_offset(s, rd));
5413 tcg_temp_free_i64(tmp);
5419 TCGv_i64 tmp = tcg_const_i64(0);
5420 tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_offset(s, rd, MO_64));
5421 tcg_gen_st_i64(tmp, cpu_env, fp_reg_hi_offset(s, rd));
5422 tcg_temp_free_i64(tmp);
5426 /* 64 bit to top half. */
5427 tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(s, rd));
5431 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5436 tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_32));
5440 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_64));
5443 /* 64 bits from top half */
5444 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(s, rn));
5450 /* Floating point <-> integer conversions
5451 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
5452 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
5453 * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
5454 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
5456 static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
5458 int rd = extract32(insn, 0, 5);
5459 int rn = extract32(insn, 5, 5);
5460 int opcode = extract32(insn, 16, 3);
5461 int rmode = extract32(insn, 19, 2);
5462 int type = extract32(insn, 22, 2);
5463 bool sbit = extract32(insn, 29, 1);
5464 bool sf = extract32(insn, 31, 1);
5467 unallocated_encoding(s);
5473 bool itof = opcode & 1;
5476 unallocated_encoding(s);
5480 switch (sf << 3 | type << 1 | rmode) {
5481 case 0x0: /* 32 bit */
5482 case 0xa: /* 64 bit */
5483 case 0xd: /* 64 bit to top half of quad */
5486 /* all other sf/type/rmode combinations are invalid */
5487 unallocated_encoding(s);
5491 if (!fp_access_check(s)) {
5494 handle_fmov(s, rd, rn, type, itof);
5496 /* actual FP conversions */
5497 bool itof = extract32(opcode, 1, 1);
5499 if (type > 1 || (rmode != 0 && opcode > 1)) {
5500 unallocated_encoding(s);
5504 if (!fp_access_check(s)) {
5507 handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
5511 /* FP-specific subcases of table C3-6 (SIMD and FP data processing)
5512 * 31 30 29 28 25 24 0
5513 * +---+---+---+---------+-----------------------------+
5514 * | | 0 | | 1 1 1 1 | |
5515 * +---+---+---+---------+-----------------------------+
5517 static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
5519 if (extract32(insn, 24, 1)) {
5520 /* Floating point data-processing (3 source) */
5521 disas_fp_3src(s, insn);
5522 } else if (extract32(insn, 21, 1) == 0) {
5523 /* Floating point to fixed point conversions */
5524 disas_fp_fixed_conv(s, insn);
5526 switch (extract32(insn, 10, 2)) {
5528 /* Floating point conditional compare */
5529 disas_fp_ccomp(s, insn);
5532 /* Floating point data-processing (2 source) */
5533 disas_fp_2src(s, insn);
5536 /* Floating point conditional select */
5537 disas_fp_csel(s, insn);
5540 switch (ctz32(extract32(insn, 12, 4))) {
5541 case 0: /* [15:12] == xxx1 */
5542 /* Floating point immediate */
5543 disas_fp_imm(s, insn);
5545 case 1: /* [15:12] == xx10 */
5546 /* Floating point compare */
5547 disas_fp_compare(s, insn);
5549 case 2: /* [15:12] == x100 */
5550 /* Floating point data-processing (1 source) */
5551 disas_fp_1src(s, insn);
5553 case 3: /* [15:12] == 1000 */
5554 unallocated_encoding(s);
5556 default: /* [15:12] == 0000 */
5557 /* Floating point <-> integer conversions */
5558 disas_fp_int_conv(s, insn);
5566 static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right,
5569 /* Extract 64 bits from the middle of two concatenated 64 bit
5570 * vector register slices left:right. The extracted bits start
5571 * at 'pos' bits into the right (least significant) side.
5572 * We return the result in tcg_right, and guarantee not to
5575 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
5576 assert(pos > 0 && pos < 64);
5578 tcg_gen_shri_i64(tcg_right, tcg_right, pos);
5579 tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos);
5580 tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp);
5582 tcg_temp_free_i64(tcg_tmp);
5586 * 31 30 29 24 23 22 21 20 16 15 14 11 10 9 5 4 0
5587 * +---+---+-------------+-----+---+------+---+------+---+------+------+
5588 * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 | Rm | 0 | imm4 | 0 | Rn | Rd |
5589 * +---+---+-------------+-----+---+------+---+------+---+------+------+
5591 static void disas_simd_ext(DisasContext *s, uint32_t insn)
5593 int is_q = extract32(insn, 30, 1);
5594 int op2 = extract32(insn, 22, 2);
5595 int imm4 = extract32(insn, 11, 4);
5596 int rm = extract32(insn, 16, 5);
5597 int rn = extract32(insn, 5, 5);
5598 int rd = extract32(insn, 0, 5);
5599 int pos = imm4 << 3;
5600 TCGv_i64 tcg_resl, tcg_resh;
5602 if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) {
5603 unallocated_encoding(s);
5607 if (!fp_access_check(s)) {
5611 tcg_resh = tcg_temp_new_i64();
5612 tcg_resl = tcg_temp_new_i64();
5614 /* Vd gets bits starting at pos bits into Vm:Vn. This is
5615 * either extracting 128 bits from a 128:128 concatenation, or
5616 * extracting 64 bits from a 64:64 concatenation.
5619 read_vec_element(s, tcg_resl, rn, 0, MO_64);
5621 read_vec_element(s, tcg_resh, rm, 0, MO_64);
5622 do_ext64(s, tcg_resh, tcg_resl, pos);
5624 tcg_gen_movi_i64(tcg_resh, 0);
5631 EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} };
5632 EltPosns *elt = eltposns;
5639 read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64);
5641 read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64);
5644 do_ext64(s, tcg_resh, tcg_resl, pos);
5645 tcg_hh = tcg_temp_new_i64();
5646 read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64);
5647 do_ext64(s, tcg_hh, tcg_resh, pos);
5648 tcg_temp_free_i64(tcg_hh);
5652 write_vec_element(s, tcg_resl, rd, 0, MO_64);
5653 tcg_temp_free_i64(tcg_resl);
5654 write_vec_element(s, tcg_resh, rd, 1, MO_64);
5655 tcg_temp_free_i64(tcg_resh);
5659 * 31 30 29 24 23 22 21 20 16 15 14 13 12 11 10 9 5 4 0
5660 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
5661 * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 | Rm | 0 | len | op | 0 0 | Rn | Rd |
5662 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
5664 static void disas_simd_tb(DisasContext *s, uint32_t insn)
5666 int op2 = extract32(insn, 22, 2);
5667 int is_q = extract32(insn, 30, 1);
5668 int rm = extract32(insn, 16, 5);
5669 int rn = extract32(insn, 5, 5);
5670 int rd = extract32(insn, 0, 5);
5671 int is_tblx = extract32(insn, 12, 1);
5672 int len = extract32(insn, 13, 2);
5673 TCGv_i64 tcg_resl, tcg_resh, tcg_idx;
5674 TCGv_i32 tcg_regno, tcg_numregs;
5677 unallocated_encoding(s);
5681 if (!fp_access_check(s)) {
5685 /* This does a table lookup: for every byte element in the input
5686 * we index into a table formed from up to four vector registers,
5687 * and then the output is the result of the lookups. Our helper
5688 * function does the lookup operation for a single 64 bit part of
5691 tcg_resl = tcg_temp_new_i64();
5692 tcg_resh = tcg_temp_new_i64();
5695 read_vec_element(s, tcg_resl, rd, 0, MO_64);
5697 tcg_gen_movi_i64(tcg_resl, 0);
5699 if (is_tblx && is_q) {
5700 read_vec_element(s, tcg_resh, rd, 1, MO_64);
5702 tcg_gen_movi_i64(tcg_resh, 0);
5705 tcg_idx = tcg_temp_new_i64();
5706 tcg_regno = tcg_const_i32(rn);
5707 tcg_numregs = tcg_const_i32(len + 1);
5708 read_vec_element(s, tcg_idx, rm, 0, MO_64);
5709 gen_helper_simd_tbl(tcg_resl, cpu_env, tcg_resl, tcg_idx,
5710 tcg_regno, tcg_numregs);
5712 read_vec_element(s, tcg_idx, rm, 1, MO_64);
5713 gen_helper_simd_tbl(tcg_resh, cpu_env, tcg_resh, tcg_idx,
5714 tcg_regno, tcg_numregs);
5716 tcg_temp_free_i64(tcg_idx);
5717 tcg_temp_free_i32(tcg_regno);
5718 tcg_temp_free_i32(tcg_numregs);
5720 write_vec_element(s, tcg_resl, rd, 0, MO_64);
5721 tcg_temp_free_i64(tcg_resl);
5722 write_vec_element(s, tcg_resh, rd, 1, MO_64);
5723 tcg_temp_free_i64(tcg_resh);
5727 * 31 30 29 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
5728 * +---+---+-------------+------+---+------+---+------------------+------+
5729 * | 0 | Q | 0 0 1 1 1 0 | size | 0 | Rm | 0 | opc | 1 0 | Rn | Rd |
5730 * +---+---+-------------+------+---+------+---+------------------+------+
5732 static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
5734 int rd = extract32(insn, 0, 5);
5735 int rn = extract32(insn, 5, 5);
5736 int rm = extract32(insn, 16, 5);
5737 int size = extract32(insn, 22, 2);
5738 /* opc field bits [1:0] indicate ZIP/UZP/TRN;
5739 * bit 2 indicates 1 vs 2 variant of the insn.
5741 int opcode = extract32(insn, 12, 2);
5742 bool part = extract32(insn, 14, 1);
5743 bool is_q = extract32(insn, 30, 1);
5744 int esize = 8 << size;
5746 int datasize = is_q ? 128 : 64;
5747 int elements = datasize / esize;
5748 TCGv_i64 tcg_res, tcg_resl, tcg_resh;
5750 if (opcode == 0 || (size == 3 && !is_q)) {
5751 unallocated_encoding(s);
5755 if (!fp_access_check(s)) {
5759 tcg_resl = tcg_const_i64(0);
5760 tcg_resh = tcg_const_i64(0);
5761 tcg_res = tcg_temp_new_i64();
5763 for (i = 0; i < elements; i++) {
5765 case 1: /* UZP1/2 */
5767 int midpoint = elements / 2;
5769 read_vec_element(s, tcg_res, rn, 2 * i + part, size);
5771 read_vec_element(s, tcg_res, rm,
5772 2 * (i - midpoint) + part, size);
5776 case 2: /* TRN1/2 */
5778 read_vec_element(s, tcg_res, rm, (i & ~1) + part, size);
5780 read_vec_element(s, tcg_res, rn, (i & ~1) + part, size);
5783 case 3: /* ZIP1/2 */
5785 int base = part * elements / 2;
5787 read_vec_element(s, tcg_res, rm, base + (i >> 1), size);
5789 read_vec_element(s, tcg_res, rn, base + (i >> 1), size);
5794 g_assert_not_reached();
5799 tcg_gen_shli_i64(tcg_res, tcg_res, ofs);
5800 tcg_gen_or_i64(tcg_resl, tcg_resl, tcg_res);
5802 tcg_gen_shli_i64(tcg_res, tcg_res, ofs - 64);
5803 tcg_gen_or_i64(tcg_resh, tcg_resh, tcg_res);
5807 tcg_temp_free_i64(tcg_res);
5809 write_vec_element(s, tcg_resl, rd, 0, MO_64);
5810 tcg_temp_free_i64(tcg_resl);
5811 write_vec_element(s, tcg_resh, rd, 1, MO_64);
5812 tcg_temp_free_i64(tcg_resh);
5816 * do_reduction_op helper
5818 * This mirrors the Reduce() pseudocode in the ARM ARM. It is
5819 * important for correct NaN propagation that we do these
5820 * operations in exactly the order specified by the pseudocode.
5822 * This is a recursive function, TCG temps should be freed by the
5823 * calling function once it is done with the values.
5825 static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn,
5826 int esize, int size, int vmap, TCGv_ptr fpst)
5828 if (esize == size) {
5830 TCGMemOp msize = esize == 16 ? MO_16 : MO_32;
5833 /* We should have one register left here */
5834 assert(ctpop8(vmap) == 1);
5835 element = ctz32(vmap);
5836 assert(element < 8);
5838 tcg_elem = tcg_temp_new_i32();
5839 read_vec_element_i32(s, tcg_elem, rn, element, msize);
5842 int bits = size / 2;
5843 int shift = ctpop8(vmap) / 2;
5844 int vmap_lo = (vmap >> shift) & vmap;
5845 int vmap_hi = (vmap & ~vmap_lo);
5846 TCGv_i32 tcg_hi, tcg_lo, tcg_res;
5848 tcg_hi = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_hi, fpst);
5849 tcg_lo = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_lo, fpst);
5850 tcg_res = tcg_temp_new_i32();
5853 case 0x0c: /* fmaxnmv half-precision */
5854 gen_helper_advsimd_maxnumh(tcg_res, tcg_lo, tcg_hi, fpst);
5856 case 0x0f: /* fmaxv half-precision */
5857 gen_helper_advsimd_maxh(tcg_res, tcg_lo, tcg_hi, fpst);
5859 case 0x1c: /* fminnmv half-precision */
5860 gen_helper_advsimd_minnumh(tcg_res, tcg_lo, tcg_hi, fpst);
5862 case 0x1f: /* fminv half-precision */
5863 gen_helper_advsimd_minh(tcg_res, tcg_lo, tcg_hi, fpst);
5865 case 0x2c: /* fmaxnmv */
5866 gen_helper_vfp_maxnums(tcg_res, tcg_lo, tcg_hi, fpst);
5868 case 0x2f: /* fmaxv */
5869 gen_helper_vfp_maxs(tcg_res, tcg_lo, tcg_hi, fpst);
5871 case 0x3c: /* fminnmv */
5872 gen_helper_vfp_minnums(tcg_res, tcg_lo, tcg_hi, fpst);
5874 case 0x3f: /* fminv */
5875 gen_helper_vfp_mins(tcg_res, tcg_lo, tcg_hi, fpst);
5878 g_assert_not_reached();
5881 tcg_temp_free_i32(tcg_hi);
5882 tcg_temp_free_i32(tcg_lo);
5887 /* AdvSIMD across lanes
5888 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
5889 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
5890 * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
5891 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
5893 static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
5895 int rd = extract32(insn, 0, 5);
5896 int rn = extract32(insn, 5, 5);
5897 int size = extract32(insn, 22, 2);
5898 int opcode = extract32(insn, 12, 5);
5899 bool is_q = extract32(insn, 30, 1);
5900 bool is_u = extract32(insn, 29, 1);
5902 bool is_min = false;
5906 TCGv_i64 tcg_res, tcg_elt;
5909 case 0x1b: /* ADDV */
5911 unallocated_encoding(s);
5915 case 0x3: /* SADDLV, UADDLV */
5916 case 0xa: /* SMAXV, UMAXV */
5917 case 0x1a: /* SMINV, UMINV */
5918 if (size == 3 || (size == 2 && !is_q)) {
5919 unallocated_encoding(s);
5923 case 0xc: /* FMAXNMV, FMINNMV */
5924 case 0xf: /* FMAXV, FMINV */
5925 /* Bit 1 of size field encodes min vs max and the actual size
5926 * depends on the encoding of the U bit. If not set (and FP16
5927 * enabled) then we do half-precision float instead of single
5930 is_min = extract32(size, 1, 1);
5932 if (!is_u && arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
5934 } else if (!is_u || !is_q || extract32(size, 0, 1)) {
5935 unallocated_encoding(s);
5942 unallocated_encoding(s);
5946 if (!fp_access_check(s)) {
5951 elements = (is_q ? 128 : 64) / esize;
5953 tcg_res = tcg_temp_new_i64();
5954 tcg_elt = tcg_temp_new_i64();
5956 /* These instructions operate across all lanes of a vector
5957 * to produce a single result. We can guarantee that a 64
5958 * bit intermediate is sufficient:
5959 * + for [US]ADDLV the maximum element size is 32 bits, and
5960 * the result type is 64 bits
5961 * + for FMAX*V, FMIN*V, ADDV the intermediate type is the
5962 * same as the element size, which is 32 bits at most
5963 * For the integer operations we can choose to work at 64
5964 * or 32 bits and truncate at the end; for simplicity
5965 * we use 64 bits always. The floating point
5966 * ops do require 32 bit intermediates, though.
5969 read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN));
5971 for (i = 1; i < elements; i++) {
5972 read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN));
5975 case 0x03: /* SADDLV / UADDLV */
5976 case 0x1b: /* ADDV */
5977 tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
5979 case 0x0a: /* SMAXV / UMAXV */
5980 tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
5982 tcg_res, tcg_elt, tcg_res, tcg_elt);
5984 case 0x1a: /* SMINV / UMINV */
5985 tcg_gen_movcond_i64(is_u ? TCG_COND_LEU : TCG_COND_LE,
5987 tcg_res, tcg_elt, tcg_res, tcg_elt);
5991 g_assert_not_reached();
5996 /* Floating point vector reduction ops which work across 32
5997 * bit (single) or 16 bit (half-precision) intermediates.
5998 * Note that correct NaN propagation requires that we do these
5999 * operations in exactly the order specified by the pseudocode.
6001 TCGv_ptr fpst = get_fpstatus_ptr(size == MO_16);
6002 int fpopcode = opcode | is_min << 4 | is_u << 5;
6003 int vmap = (1 << elements) - 1;
6004 TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, esize,
6005 (is_q ? 128 : 64), vmap, fpst);
6006 tcg_gen_extu_i32_i64(tcg_res, tcg_res32);
6007 tcg_temp_free_i32(tcg_res32);
6008 tcg_temp_free_ptr(fpst);
6011 tcg_temp_free_i64(tcg_elt);
6013 /* Now truncate the result to the width required for the final output */
6014 if (opcode == 0x03) {
6015 /* SADDLV, UADDLV: result is 2*esize */
6021 tcg_gen_ext8u_i64(tcg_res, tcg_res);
6024 tcg_gen_ext16u_i64(tcg_res, tcg_res);
6027 tcg_gen_ext32u_i64(tcg_res, tcg_res);
6032 g_assert_not_reached();
6035 write_fp_dreg(s, rd, tcg_res);
6036 tcg_temp_free_i64(tcg_res);
6039 /* DUP (Element, Vector)
6041 * 31 30 29 21 20 16 15 10 9 5 4 0
6042 * +---+---+-------------------+--------+-------------+------+------+
6043 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
6044 * +---+---+-------------------+--------+-------------+------+------+
6046 * size: encoded in imm5 (see ARM ARM LowestSetBit())
6048 static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn,
6051 int size = ctz32(imm5);
6052 int index = imm5 >> (size + 1);
6054 if (size > 3 || (size == 3 && !is_q)) {
6055 unallocated_encoding(s);
6059 if (!fp_access_check(s)) {
6063 tcg_gen_gvec_dup_mem(size, vec_full_reg_offset(s, rd),
6064 vec_reg_offset(s, rn, index, size),
6065 is_q ? 16 : 8, vec_full_reg_size(s));
6068 /* DUP (element, scalar)
6069 * 31 21 20 16 15 10 9 5 4 0
6070 * +-----------------------+--------+-------------+------+------+
6071 * | 0 1 0 1 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
6072 * +-----------------------+--------+-------------+------+------+
6074 static void handle_simd_dupes(DisasContext *s, int rd, int rn,
6077 int size = ctz32(imm5);
6082 unallocated_encoding(s);
6086 if (!fp_access_check(s)) {
6090 index = imm5 >> (size + 1);
6092 /* This instruction just extracts the specified element and
6093 * zero-extends it into the bottom of the destination register.
6095 tmp = tcg_temp_new_i64();
6096 read_vec_element(s, tmp, rn, index, size);
6097 write_fp_dreg(s, rd, tmp);
6098 tcg_temp_free_i64(tmp);
6103 * 31 30 29 21 20 16 15 10 9 5 4 0
6104 * +---+---+-------------------+--------+-------------+------+------+
6105 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 1 1 | Rn | Rd |
6106 * +---+---+-------------------+--------+-------------+------+------+
6108 * size: encoded in imm5 (see ARM ARM LowestSetBit())
6110 static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn,
6113 int size = ctz32(imm5);
6114 uint32_t dofs, oprsz, maxsz;
6116 if (size > 3 || ((size == 3) && !is_q)) {
6117 unallocated_encoding(s);
6121 if (!fp_access_check(s)) {
6125 dofs = vec_full_reg_offset(s, rd);
6126 oprsz = is_q ? 16 : 8;
6127 maxsz = vec_full_reg_size(s);
6129 tcg_gen_gvec_dup_i64(size, dofs, oprsz, maxsz, cpu_reg(s, rn));
6134 * 31 21 20 16 15 14 11 10 9 5 4 0
6135 * +-----------------------+--------+------------+---+------+------+
6136 * | 0 1 1 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
6137 * +-----------------------+--------+------------+---+------+------+
6139 * size: encoded in imm5 (see ARM ARM LowestSetBit())
6140 * index: encoded in imm5<4:size+1>
6142 static void handle_simd_inse(DisasContext *s, int rd, int rn,
6145 int size = ctz32(imm5);
6146 int src_index, dst_index;
6150 unallocated_encoding(s);
6154 if (!fp_access_check(s)) {
6158 dst_index = extract32(imm5, 1+size, 5);
6159 src_index = extract32(imm4, size, 4);
6161 tmp = tcg_temp_new_i64();
6163 read_vec_element(s, tmp, rn, src_index, size);
6164 write_vec_element(s, tmp, rd, dst_index, size);
6166 tcg_temp_free_i64(tmp);
6172 * 31 21 20 16 15 10 9 5 4 0
6173 * +-----------------------+--------+-------------+------+------+
6174 * | 0 1 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 1 1 1 | Rn | Rd |
6175 * +-----------------------+--------+-------------+------+------+
6177 * size: encoded in imm5 (see ARM ARM LowestSetBit())
6178 * index: encoded in imm5<4:size+1>
6180 static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5)
6182 int size = ctz32(imm5);
6186 unallocated_encoding(s);
6190 if (!fp_access_check(s)) {
6194 idx = extract32(imm5, 1 + size, 4 - size);
6195 write_vec_element(s, cpu_reg(s, rn), rd, idx, size);
6202 * 31 30 29 21 20 16 15 12 10 9 5 4 0
6203 * +---+---+-------------------+--------+-------------+------+------+
6204 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 1 U 1 1 | Rn | Rd |
6205 * +---+---+-------------------+--------+-------------+------+------+
6207 * U: unsigned when set
6208 * size: encoded in imm5 (see ARM ARM LowestSetBit())
6210 static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed,
6211 int rn, int rd, int imm5)
6213 int size = ctz32(imm5);
6217 /* Check for UnallocatedEncodings */
6219 if (size > 2 || (size == 2 && !is_q)) {
6220 unallocated_encoding(s);
6225 || (size < 3 && is_q)
6226 || (size == 3 && !is_q)) {
6227 unallocated_encoding(s);
6232 if (!fp_access_check(s)) {
6236 element = extract32(imm5, 1+size, 4);
6238 tcg_rd = cpu_reg(s, rd);
6239 read_vec_element(s, tcg_rd, rn, element, size | (is_signed ? MO_SIGN : 0));
6240 if (is_signed && !is_q) {
6241 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
6246 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
6247 * +---+---+----+-----------------+------+---+------+---+------+------+
6248 * | 0 | Q | op | 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
6249 * +---+---+----+-----------------+------+---+------+---+------+------+
6251 static void disas_simd_copy(DisasContext *s, uint32_t insn)
6253 int rd = extract32(insn, 0, 5);
6254 int rn = extract32(insn, 5, 5);
6255 int imm4 = extract32(insn, 11, 4);
6256 int op = extract32(insn, 29, 1);
6257 int is_q = extract32(insn, 30, 1);
6258 int imm5 = extract32(insn, 16, 5);
6263 handle_simd_inse(s, rd, rn, imm4, imm5);
6265 unallocated_encoding(s);
6270 /* DUP (element - vector) */
6271 handle_simd_dupe(s, is_q, rd, rn, imm5);
6275 handle_simd_dupg(s, is_q, rd, rn, imm5);
6280 handle_simd_insg(s, rd, rn, imm5);
6282 unallocated_encoding(s);
6287 /* UMOV/SMOV (is_q indicates 32/64; imm4 indicates signedness) */
6288 handle_simd_umov_smov(s, is_q, (imm4 == 5), rn, rd, imm5);
6291 unallocated_encoding(s);
6297 /* AdvSIMD modified immediate
6298 * 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0
6299 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
6300 * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd |
6301 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
6303 * There are a number of operations that can be carried out here:
6304 * MOVI - move (shifted) imm into register
6305 * MVNI - move inverted (shifted) imm into register
6306 * ORR - bitwise OR of (shifted) imm with register
6307 * BIC - bitwise clear of (shifted) imm with register
6308 * With ARMv8.2 we also have:
6309 * FMOV half-precision
6311 static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
6313 int rd = extract32(insn, 0, 5);
6314 int cmode = extract32(insn, 12, 4);
6315 int cmode_3_1 = extract32(cmode, 1, 3);
6316 int cmode_0 = extract32(cmode, 0, 1);
6317 int o2 = extract32(insn, 11, 1);
6318 uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
6319 bool is_neg = extract32(insn, 29, 1);
6320 bool is_q = extract32(insn, 30, 1);
6323 if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
6324 /* Check for FMOV (vector, immediate) - half-precision */
6325 if (!(arm_dc_feature(s, ARM_FEATURE_V8_FP16) && o2 && cmode == 0xf)) {
6326 unallocated_encoding(s);
6331 if (!fp_access_check(s)) {
6335 /* See AdvSIMDExpandImm() in ARM ARM */
6336 switch (cmode_3_1) {
6337 case 0: /* Replicate(Zeros(24):imm8, 2) */
6338 case 1: /* Replicate(Zeros(16):imm8:Zeros(8), 2) */
6339 case 2: /* Replicate(Zeros(8):imm8:Zeros(16), 2) */
6340 case 3: /* Replicate(imm8:Zeros(24), 2) */
6342 int shift = cmode_3_1 * 8;
6343 imm = bitfield_replicate(abcdefgh << shift, 32);
6346 case 4: /* Replicate(Zeros(8):imm8, 4) */
6347 case 5: /* Replicate(imm8:Zeros(8), 4) */
6349 int shift = (cmode_3_1 & 0x1) * 8;
6350 imm = bitfield_replicate(abcdefgh << shift, 16);
6355 /* Replicate(Zeros(8):imm8:Ones(16), 2) */
6356 imm = (abcdefgh << 16) | 0xffff;
6358 /* Replicate(Zeros(16):imm8:Ones(8), 2) */
6359 imm = (abcdefgh << 8) | 0xff;
6361 imm = bitfield_replicate(imm, 32);
6364 if (!cmode_0 && !is_neg) {
6365 imm = bitfield_replicate(abcdefgh, 8);
6366 } else if (!cmode_0 && is_neg) {
6369 for (i = 0; i < 8; i++) {
6370 if ((abcdefgh) & (1 << i)) {
6371 imm |= 0xffULL << (i * 8);
6374 } else if (cmode_0) {
6376 imm = (abcdefgh & 0x3f) << 48;
6377 if (abcdefgh & 0x80) {
6378 imm |= 0x8000000000000000ULL;
6380 if (abcdefgh & 0x40) {
6381 imm |= 0x3fc0000000000000ULL;
6383 imm |= 0x4000000000000000ULL;
6387 /* FMOV (vector, immediate) - half-precision */
6388 imm = vfp_expand_imm(MO_16, abcdefgh);
6389 /* now duplicate across the lanes */
6390 imm = bitfield_replicate(imm, 16);
6392 imm = (abcdefgh & 0x3f) << 19;
6393 if (abcdefgh & 0x80) {
6396 if (abcdefgh & 0x40) {
6407 fprintf(stderr, "%s: cmode_3_1: %x\n", __func__, cmode_3_1);
6408 g_assert_not_reached();
6411 if (cmode_3_1 != 7 && is_neg) {
6415 if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
6416 /* MOVI or MVNI, with MVNI negation handled above. */
6417 tcg_gen_gvec_dup64i(vec_full_reg_offset(s, rd), is_q ? 16 : 8,
6418 vec_full_reg_size(s), imm);
6420 /* ORR or BIC, with BIC negation to AND handled above. */
6422 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_andi, MO_64);
6424 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_ori, MO_64);
6429 /* AdvSIMD scalar copy
6430 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
6431 * +-----+----+-----------------+------+---+------+---+------+------+
6432 * | 0 1 | op | 1 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
6433 * +-----+----+-----------------+------+---+------+---+------+------+
6435 static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn)
6437 int rd = extract32(insn, 0, 5);
6438 int rn = extract32(insn, 5, 5);
6439 int imm4 = extract32(insn, 11, 4);
6440 int imm5 = extract32(insn, 16, 5);
6441 int op = extract32(insn, 29, 1);
6443 if (op != 0 || imm4 != 0) {
6444 unallocated_encoding(s);
6448 /* DUP (element, scalar) */
6449 handle_simd_dupes(s, rd, rn, imm5);
6452 /* AdvSIMD scalar pairwise
6453 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
6454 * +-----+---+-----------+------+-----------+--------+-----+------+------+
6455 * | 0 1 | U | 1 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
6456 * +-----+---+-----------+------+-----------+--------+-----+------+------+
6458 static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
6460 int u = extract32(insn, 29, 1);
6461 int size = extract32(insn, 22, 2);
6462 int opcode = extract32(insn, 12, 5);
6463 int rn = extract32(insn, 5, 5);
6464 int rd = extract32(insn, 0, 5);
6467 /* For some ops (the FP ones), size[1] is part of the encoding.
6468 * For ADDP strictly it is not but size[1] is always 1 for valid
6471 opcode |= (extract32(size, 1, 1) << 5);
6474 case 0x3b: /* ADDP */
6475 if (u || size != 3) {
6476 unallocated_encoding(s);
6479 if (!fp_access_check(s)) {
6485 case 0xc: /* FMAXNMP */
6486 case 0xd: /* FADDP */
6487 case 0xf: /* FMAXP */
6488 case 0x2c: /* FMINNMP */
6489 case 0x2f: /* FMINP */
6490 /* FP op, size[0] is 32 or 64 bit*/
6492 if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
6493 unallocated_encoding(s);
6499 size = extract32(size, 0, 1) ? MO_64 : MO_32;
6502 if (!fp_access_check(s)) {
6506 fpst = get_fpstatus_ptr(size == MO_16);
6509 unallocated_encoding(s);
6513 if (size == MO_64) {
6514 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
6515 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
6516 TCGv_i64 tcg_res = tcg_temp_new_i64();
6518 read_vec_element(s, tcg_op1, rn, 0, MO_64);
6519 read_vec_element(s, tcg_op2, rn, 1, MO_64);
6522 case 0x3b: /* ADDP */
6523 tcg_gen_add_i64(tcg_res, tcg_op1, tcg_op2);
6525 case 0xc: /* FMAXNMP */
6526 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6528 case 0xd: /* FADDP */
6529 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
6531 case 0xf: /* FMAXP */
6532 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
6534 case 0x2c: /* FMINNMP */
6535 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6537 case 0x2f: /* FMINP */
6538 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
6541 g_assert_not_reached();
6544 write_fp_dreg(s, rd, tcg_res);
6546 tcg_temp_free_i64(tcg_op1);
6547 tcg_temp_free_i64(tcg_op2);
6548 tcg_temp_free_i64(tcg_res);
6550 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
6551 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
6552 TCGv_i32 tcg_res = tcg_temp_new_i32();
6554 read_vec_element_i32(s, tcg_op1, rn, 0, size);
6555 read_vec_element_i32(s, tcg_op2, rn, 1, size);
6557 if (size == MO_16) {
6559 case 0xc: /* FMAXNMP */
6560 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
6562 case 0xd: /* FADDP */
6563 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
6565 case 0xf: /* FMAXP */
6566 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
6568 case 0x2c: /* FMINNMP */
6569 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
6571 case 0x2f: /* FMINP */
6572 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
6575 g_assert_not_reached();
6579 case 0xc: /* FMAXNMP */
6580 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
6582 case 0xd: /* FADDP */
6583 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
6585 case 0xf: /* FMAXP */
6586 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
6588 case 0x2c: /* FMINNMP */
6589 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
6591 case 0x2f: /* FMINP */
6592 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
6595 g_assert_not_reached();
6599 write_fp_sreg(s, rd, tcg_res);
6601 tcg_temp_free_i32(tcg_op1);
6602 tcg_temp_free_i32(tcg_op2);
6603 tcg_temp_free_i32(tcg_res);
6607 tcg_temp_free_ptr(fpst);
6612 * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
6614 * This code is handles the common shifting code and is used by both
6615 * the vector and scalar code.
6617 static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
6618 TCGv_i64 tcg_rnd, bool accumulate,
6619 bool is_u, int size, int shift)
6621 bool extended_result = false;
6622 bool round = tcg_rnd != NULL;
6624 TCGv_i64 tcg_src_hi;
6626 if (round && size == 3) {
6627 extended_result = true;
6628 ext_lshift = 64 - shift;
6629 tcg_src_hi = tcg_temp_new_i64();
6630 } else if (shift == 64) {
6631 if (!accumulate && is_u) {
6632 /* result is zero */
6633 tcg_gen_movi_i64(tcg_res, 0);
6638 /* Deal with the rounding step */
6640 if (extended_result) {
6641 TCGv_i64 tcg_zero = tcg_const_i64(0);
6643 /* take care of sign extending tcg_res */
6644 tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63);
6645 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
6646 tcg_src, tcg_src_hi,
6649 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
6653 tcg_temp_free_i64(tcg_zero);
6655 tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd);
6659 /* Now do the shift right */
6660 if (round && extended_result) {
6661 /* extended case, >64 bit precision required */
6662 if (ext_lshift == 0) {
6663 /* special case, only high bits matter */
6664 tcg_gen_mov_i64(tcg_src, tcg_src_hi);
6666 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
6667 tcg_gen_shli_i64(tcg_src_hi, tcg_src_hi, ext_lshift);
6668 tcg_gen_or_i64(tcg_src, tcg_src, tcg_src_hi);
6673 /* essentially shifting in 64 zeros */
6674 tcg_gen_movi_i64(tcg_src, 0);
6676 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
6680 /* effectively extending the sign-bit */
6681 tcg_gen_sari_i64(tcg_src, tcg_src, 63);
6683 tcg_gen_sari_i64(tcg_src, tcg_src, shift);
6689 tcg_gen_add_i64(tcg_res, tcg_res, tcg_src);
6691 tcg_gen_mov_i64(tcg_res, tcg_src);
6694 if (extended_result) {
6695 tcg_temp_free_i64(tcg_src_hi);
6699 /* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */
6700 static void handle_scalar_simd_shri(DisasContext *s,
6701 bool is_u, int immh, int immb,
6702 int opcode, int rn, int rd)
6705 int immhb = immh << 3 | immb;
6706 int shift = 2 * (8 << size) - immhb;
6707 bool accumulate = false;
6709 bool insert = false;
6714 if (!extract32(immh, 3, 1)) {
6715 unallocated_encoding(s);
6719 if (!fp_access_check(s)) {
6724 case 0x02: /* SSRA / USRA (accumulate) */
6727 case 0x04: /* SRSHR / URSHR (rounding) */
6730 case 0x06: /* SRSRA / URSRA (accum + rounding) */
6731 accumulate = round = true;
6733 case 0x08: /* SRI */
6739 uint64_t round_const = 1ULL << (shift - 1);
6740 tcg_round = tcg_const_i64(round_const);
6745 tcg_rn = read_fp_dreg(s, rn);
6746 tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
6749 /* shift count same as element size is valid but does nothing;
6750 * special case to avoid potential shift by 64.
6752 int esize = 8 << size;
6753 if (shift != esize) {
6754 tcg_gen_shri_i64(tcg_rn, tcg_rn, shift);
6755 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, 0, esize - shift);
6758 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
6759 accumulate, is_u, size, shift);
6762 write_fp_dreg(s, rd, tcg_rd);
6764 tcg_temp_free_i64(tcg_rn);
6765 tcg_temp_free_i64(tcg_rd);
6767 tcg_temp_free_i64(tcg_round);
6771 /* SHL/SLI - Scalar shift left */
6772 static void handle_scalar_simd_shli(DisasContext *s, bool insert,
6773 int immh, int immb, int opcode,
6776 int size = 32 - clz32(immh) - 1;
6777 int immhb = immh << 3 | immb;
6778 int shift = immhb - (8 << size);
6779 TCGv_i64 tcg_rn = new_tmp_a64(s);
6780 TCGv_i64 tcg_rd = new_tmp_a64(s);
6782 if (!extract32(immh, 3, 1)) {
6783 unallocated_encoding(s);
6787 if (!fp_access_check(s)) {
6791 tcg_rn = read_fp_dreg(s, rn);
6792 tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
6795 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift);
6797 tcg_gen_shli_i64(tcg_rd, tcg_rn, shift);
6800 write_fp_dreg(s, rd, tcg_rd);
6802 tcg_temp_free_i64(tcg_rn);
6803 tcg_temp_free_i64(tcg_rd);
6806 /* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with
6807 * (signed/unsigned) narrowing */
6808 static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
6809 bool is_u_shift, bool is_u_narrow,
6810 int immh, int immb, int opcode,
6813 int immhb = immh << 3 | immb;
6814 int size = 32 - clz32(immh) - 1;
6815 int esize = 8 << size;
6816 int shift = (2 * esize) - immhb;
6817 int elements = is_scalar ? 1 : (64 / esize);
6818 bool round = extract32(opcode, 0, 1);
6819 TCGMemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
6820 TCGv_i64 tcg_rn, tcg_rd, tcg_round;
6821 TCGv_i32 tcg_rd_narrowed;
6824 static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = {
6825 { gen_helper_neon_narrow_sat_s8,
6826 gen_helper_neon_unarrow_sat8 },
6827 { gen_helper_neon_narrow_sat_s16,
6828 gen_helper_neon_unarrow_sat16 },
6829 { gen_helper_neon_narrow_sat_s32,
6830 gen_helper_neon_unarrow_sat32 },
6833 static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = {
6834 gen_helper_neon_narrow_sat_u8,
6835 gen_helper_neon_narrow_sat_u16,
6836 gen_helper_neon_narrow_sat_u32,
6839 NeonGenNarrowEnvFn *narrowfn;
6845 if (extract32(immh, 3, 1)) {
6846 unallocated_encoding(s);
6850 if (!fp_access_check(s)) {
6855 narrowfn = unsigned_narrow_fns[size];
6857 narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0];
6860 tcg_rn = tcg_temp_new_i64();
6861 tcg_rd = tcg_temp_new_i64();
6862 tcg_rd_narrowed = tcg_temp_new_i32();
6863 tcg_final = tcg_const_i64(0);
6866 uint64_t round_const = 1ULL << (shift - 1);
6867 tcg_round = tcg_const_i64(round_const);
6872 for (i = 0; i < elements; i++) {
6873 read_vec_element(s, tcg_rn, rn, i, ldop);
6874 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
6875 false, is_u_shift, size+1, shift);
6876 narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
6877 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
6878 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
6882 write_vec_element(s, tcg_final, rd, 0, MO_64);
6884 write_vec_element(s, tcg_final, rd, 1, MO_64);
6888 tcg_temp_free_i64(tcg_round);
6890 tcg_temp_free_i64(tcg_rn);
6891 tcg_temp_free_i64(tcg_rd);
6892 tcg_temp_free_i32(tcg_rd_narrowed);
6893 tcg_temp_free_i64(tcg_final);
6895 clear_vec_high(s, is_q, rd);
6898 /* SQSHLU, UQSHL, SQSHL: saturating left shifts */
6899 static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q,
6900 bool src_unsigned, bool dst_unsigned,
6901 int immh, int immb, int rn, int rd)
6903 int immhb = immh << 3 | immb;
6904 int size = 32 - clz32(immh) - 1;
6905 int shift = immhb - (8 << size);
6909 assert(!(scalar && is_q));
6912 if (!is_q && extract32(immh, 3, 1)) {
6913 unallocated_encoding(s);
6917 /* Since we use the variable-shift helpers we must
6918 * replicate the shift count into each element of
6919 * the tcg_shift value.
6923 shift |= shift << 8;
6926 shift |= shift << 16;
6932 g_assert_not_reached();
6936 if (!fp_access_check(s)) {
6941 TCGv_i64 tcg_shift = tcg_const_i64(shift);
6942 static NeonGenTwo64OpEnvFn * const fns[2][2] = {
6943 { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 },
6944 { NULL, gen_helper_neon_qshl_u64 },
6946 NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned];
6947 int maxpass = is_q ? 2 : 1;
6949 for (pass = 0; pass < maxpass; pass++) {
6950 TCGv_i64 tcg_op = tcg_temp_new_i64();
6952 read_vec_element(s, tcg_op, rn, pass, MO_64);
6953 genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
6954 write_vec_element(s, tcg_op, rd, pass, MO_64);
6956 tcg_temp_free_i64(tcg_op);
6958 tcg_temp_free_i64(tcg_shift);
6959 clear_vec_high(s, is_q, rd);
6961 TCGv_i32 tcg_shift = tcg_const_i32(shift);
6962 static NeonGenTwoOpEnvFn * const fns[2][2][3] = {
6964 { gen_helper_neon_qshl_s8,
6965 gen_helper_neon_qshl_s16,
6966 gen_helper_neon_qshl_s32 },
6967 { gen_helper_neon_qshlu_s8,
6968 gen_helper_neon_qshlu_s16,
6969 gen_helper_neon_qshlu_s32 }
6971 { NULL, NULL, NULL },
6972 { gen_helper_neon_qshl_u8,
6973 gen_helper_neon_qshl_u16,
6974 gen_helper_neon_qshl_u32 }
6977 NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size];
6978 TCGMemOp memop = scalar ? size : MO_32;
6979 int maxpass = scalar ? 1 : is_q ? 4 : 2;
6981 for (pass = 0; pass < maxpass; pass++) {
6982 TCGv_i32 tcg_op = tcg_temp_new_i32();
6984 read_vec_element_i32(s, tcg_op, rn, pass, memop);
6985 genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
6989 tcg_gen_ext8u_i32(tcg_op, tcg_op);
6992 tcg_gen_ext16u_i32(tcg_op, tcg_op);
6997 g_assert_not_reached();
6999 write_fp_sreg(s, rd, tcg_op);
7001 write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
7004 tcg_temp_free_i32(tcg_op);
7006 tcg_temp_free_i32(tcg_shift);
7009 clear_vec_high(s, is_q, rd);
7014 /* Common vector code for handling integer to FP conversion */
7015 static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn,
7016 int elements, int is_signed,
7017 int fracbits, int size)
7019 TCGv_ptr tcg_fpst = get_fpstatus_ptr(size == MO_16);
7020 TCGv_i32 tcg_shift = NULL;
7022 TCGMemOp mop = size | (is_signed ? MO_SIGN : 0);
7025 if (fracbits || size == MO_64) {
7026 tcg_shift = tcg_const_i32(fracbits);
7029 if (size == MO_64) {
7030 TCGv_i64 tcg_int64 = tcg_temp_new_i64();
7031 TCGv_i64 tcg_double = tcg_temp_new_i64();
7033 for (pass = 0; pass < elements; pass++) {
7034 read_vec_element(s, tcg_int64, rn, pass, mop);
7037 gen_helper_vfp_sqtod(tcg_double, tcg_int64,
7038 tcg_shift, tcg_fpst);
7040 gen_helper_vfp_uqtod(tcg_double, tcg_int64,
7041 tcg_shift, tcg_fpst);
7043 if (elements == 1) {
7044 write_fp_dreg(s, rd, tcg_double);
7046 write_vec_element(s, tcg_double, rd, pass, MO_64);
7050 tcg_temp_free_i64(tcg_int64);
7051 tcg_temp_free_i64(tcg_double);
7054 TCGv_i32 tcg_int32 = tcg_temp_new_i32();
7055 TCGv_i32 tcg_float = tcg_temp_new_i32();
7057 for (pass = 0; pass < elements; pass++) {
7058 read_vec_element_i32(s, tcg_int32, rn, pass, mop);
7064 gen_helper_vfp_sltos(tcg_float, tcg_int32,
7065 tcg_shift, tcg_fpst);
7067 gen_helper_vfp_ultos(tcg_float, tcg_int32,
7068 tcg_shift, tcg_fpst);
7072 gen_helper_vfp_sitos(tcg_float, tcg_int32, tcg_fpst);
7074 gen_helper_vfp_uitos(tcg_float, tcg_int32, tcg_fpst);
7081 gen_helper_vfp_sltoh(tcg_float, tcg_int32,
7082 tcg_shift, tcg_fpst);
7084 gen_helper_vfp_ultoh(tcg_float, tcg_int32,
7085 tcg_shift, tcg_fpst);
7089 gen_helper_vfp_sitoh(tcg_float, tcg_int32, tcg_fpst);
7091 gen_helper_vfp_uitoh(tcg_float, tcg_int32, tcg_fpst);
7096 g_assert_not_reached();
7099 if (elements == 1) {
7100 write_fp_sreg(s, rd, tcg_float);
7102 write_vec_element_i32(s, tcg_float, rd, pass, size);
7106 tcg_temp_free_i32(tcg_int32);
7107 tcg_temp_free_i32(tcg_float);
7110 tcg_temp_free_ptr(tcg_fpst);
7112 tcg_temp_free_i32(tcg_shift);
7115 clear_vec_high(s, elements << size == 16, rd);
7118 /* UCVTF/SCVTF - Integer to FP conversion */
7119 static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
7120 bool is_q, bool is_u,
7121 int immh, int immb, int opcode,
7124 bool is_double = extract32(immh, 3, 1);
7125 int size = is_double ? MO_64 : MO_32;
7127 int immhb = immh << 3 | immb;
7128 int fracbits = (is_double ? 128 : 64) - immhb;
7130 if (!extract32(immh, 2, 2)) {
7131 unallocated_encoding(s);
7138 elements = is_double ? 2 : is_q ? 4 : 2;
7139 if (is_double && !is_q) {
7140 unallocated_encoding(s);
7145 if (!fp_access_check(s)) {
7149 /* immh == 0 would be a failure of the decode logic */
7152 handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size);
7155 /* FCVTZS, FVCVTZU - FP to fixedpoint conversion */
7156 static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
7157 bool is_q, bool is_u,
7158 int immh, int immb, int rn, int rd)
7160 bool is_double = extract32(immh, 3, 1);
7161 int immhb = immh << 3 | immb;
7162 int fracbits = (is_double ? 128 : 64) - immhb;
7164 TCGv_ptr tcg_fpstatus;
7165 TCGv_i32 tcg_rmode, tcg_shift;
7167 if (!extract32(immh, 2, 2)) {
7168 unallocated_encoding(s);
7172 if (!is_scalar && !is_q && is_double) {
7173 unallocated_encoding(s);
7177 if (!fp_access_check(s)) {
7181 assert(!(is_scalar && is_q));
7183 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO));
7184 tcg_fpstatus = get_fpstatus_ptr(false);
7185 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
7186 tcg_shift = tcg_const_i32(fracbits);
7189 int maxpass = is_scalar ? 1 : 2;
7191 for (pass = 0; pass < maxpass; pass++) {
7192 TCGv_i64 tcg_op = tcg_temp_new_i64();
7194 read_vec_element(s, tcg_op, rn, pass, MO_64);
7196 gen_helper_vfp_touqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
7198 gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
7200 write_vec_element(s, tcg_op, rd, pass, MO_64);
7201 tcg_temp_free_i64(tcg_op);
7203 clear_vec_high(s, is_q, rd);
7205 int maxpass = is_scalar ? 1 : is_q ? 4 : 2;
7206 for (pass = 0; pass < maxpass; pass++) {
7207 TCGv_i32 tcg_op = tcg_temp_new_i32();
7209 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
7211 gen_helper_vfp_touls(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
7213 gen_helper_vfp_tosls(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
7216 write_fp_sreg(s, rd, tcg_op);
7218 write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
7220 tcg_temp_free_i32(tcg_op);
7223 clear_vec_high(s, is_q, rd);
7227 tcg_temp_free_ptr(tcg_fpstatus);
7228 tcg_temp_free_i32(tcg_shift);
7229 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
7230 tcg_temp_free_i32(tcg_rmode);
7233 /* AdvSIMD scalar shift by immediate
7234 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
7235 * +-----+---+-------------+------+------+--------+---+------+------+
7236 * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
7237 * +-----+---+-------------+------+------+--------+---+------+------+
7239 * This is the scalar version so it works on a fixed sized registers
7241 static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
7243 int rd = extract32(insn, 0, 5);
7244 int rn = extract32(insn, 5, 5);
7245 int opcode = extract32(insn, 11, 5);
7246 int immb = extract32(insn, 16, 3);
7247 int immh = extract32(insn, 19, 4);
7248 bool is_u = extract32(insn, 29, 1);
7251 unallocated_encoding(s);
7256 case 0x08: /* SRI */
7258 unallocated_encoding(s);
7262 case 0x00: /* SSHR / USHR */
7263 case 0x02: /* SSRA / USRA */
7264 case 0x04: /* SRSHR / URSHR */
7265 case 0x06: /* SRSRA / URSRA */
7266 handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd);
7268 case 0x0a: /* SHL / SLI */
7269 handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd);
7271 case 0x1c: /* SCVTF, UCVTF */
7272 handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb,
7275 case 0x10: /* SQSHRUN, SQSHRUN2 */
7276 case 0x11: /* SQRSHRUN, SQRSHRUN2 */
7278 unallocated_encoding(s);
7281 handle_vec_simd_sqshrn(s, true, false, false, true,
7282 immh, immb, opcode, rn, rd);
7284 case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */
7285 case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */
7286 handle_vec_simd_sqshrn(s, true, false, is_u, is_u,
7287 immh, immb, opcode, rn, rd);
7289 case 0xc: /* SQSHLU */
7291 unallocated_encoding(s);
7294 handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd);
7296 case 0xe: /* SQSHL, UQSHL */
7297 handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd);
7299 case 0x1f: /* FCVTZS, FCVTZU */
7300 handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd);
7303 unallocated_encoding(s);
7308 /* AdvSIMD scalar three different
7309 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
7310 * +-----+---+-----------+------+---+------+--------+-----+------+------+
7311 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
7312 * +-----+---+-----------+------+---+------+--------+-----+------+------+
7314 static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
7316 bool is_u = extract32(insn, 29, 1);
7317 int size = extract32(insn, 22, 2);
7318 int opcode = extract32(insn, 12, 4);
7319 int rm = extract32(insn, 16, 5);
7320 int rn = extract32(insn, 5, 5);
7321 int rd = extract32(insn, 0, 5);
7324 unallocated_encoding(s);
7329 case 0x9: /* SQDMLAL, SQDMLAL2 */
7330 case 0xb: /* SQDMLSL, SQDMLSL2 */
7331 case 0xd: /* SQDMULL, SQDMULL2 */
7332 if (size == 0 || size == 3) {
7333 unallocated_encoding(s);
7338 unallocated_encoding(s);
7342 if (!fp_access_check(s)) {
7347 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
7348 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
7349 TCGv_i64 tcg_res = tcg_temp_new_i64();
7351 read_vec_element(s, tcg_op1, rn, 0, MO_32 | MO_SIGN);
7352 read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN);
7354 tcg_gen_mul_i64(tcg_res, tcg_op1, tcg_op2);
7355 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env, tcg_res, tcg_res);
7358 case 0xd: /* SQDMULL, SQDMULL2 */
7360 case 0xb: /* SQDMLSL, SQDMLSL2 */
7361 tcg_gen_neg_i64(tcg_res, tcg_res);
7363 case 0x9: /* SQDMLAL, SQDMLAL2 */
7364 read_vec_element(s, tcg_op1, rd, 0, MO_64);
7365 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env,
7369 g_assert_not_reached();
7372 write_fp_dreg(s, rd, tcg_res);
7374 tcg_temp_free_i64(tcg_op1);
7375 tcg_temp_free_i64(tcg_op2);
7376 tcg_temp_free_i64(tcg_res);
7378 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
7379 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
7380 TCGv_i64 tcg_res = tcg_temp_new_i64();
7382 read_vec_element_i32(s, tcg_op1, rn, 0, MO_16);
7383 read_vec_element_i32(s, tcg_op2, rm, 0, MO_16);
7385 gen_helper_neon_mull_s16(tcg_res, tcg_op1, tcg_op2);
7386 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env, tcg_res, tcg_res);
7389 case 0xd: /* SQDMULL, SQDMULL2 */
7391 case 0xb: /* SQDMLSL, SQDMLSL2 */
7392 gen_helper_neon_negl_u32(tcg_res, tcg_res);
7394 case 0x9: /* SQDMLAL, SQDMLAL2 */
7396 TCGv_i64 tcg_op3 = tcg_temp_new_i64();
7397 read_vec_element(s, tcg_op3, rd, 0, MO_32);
7398 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env,
7400 tcg_temp_free_i64(tcg_op3);
7404 g_assert_not_reached();
7407 tcg_gen_ext32u_i64(tcg_res, tcg_res);
7408 write_fp_dreg(s, rd, tcg_res);
7410 tcg_temp_free_i32(tcg_op1);
7411 tcg_temp_free_i32(tcg_op2);
7412 tcg_temp_free_i64(tcg_res);
7416 /* CMTST : test is "if (X & Y != 0)". */
7417 static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
7419 tcg_gen_and_i32(d, a, b);
7420 tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
7421 tcg_gen_neg_i32(d, d);
7424 static void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
7426 tcg_gen_and_i64(d, a, b);
7427 tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
7428 tcg_gen_neg_i64(d, d);
7431 static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
7433 tcg_gen_and_vec(vece, d, a, b);
7434 tcg_gen_dupi_vec(vece, a, 0);
7435 tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
7438 static void handle_3same_64(DisasContext *s, int opcode, bool u,
7439 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
7441 /* Handle 64x64->64 opcodes which are shared between the scalar
7442 * and vector 3-same groups. We cover every opcode where size == 3
7443 * is valid in either the three-reg-same (integer, not pairwise)
7444 * or scalar-three-reg-same groups.
7449 case 0x1: /* SQADD */
7451 gen_helper_neon_qadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7453 gen_helper_neon_qadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7456 case 0x5: /* SQSUB */
7458 gen_helper_neon_qsub_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7460 gen_helper_neon_qsub_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7463 case 0x6: /* CMGT, CMHI */
7464 /* 64 bit integer comparison, result = test ? (2^64 - 1) : 0.
7465 * We implement this using setcond (test) and then negating.
7467 cond = u ? TCG_COND_GTU : TCG_COND_GT;
7469 tcg_gen_setcond_i64(cond, tcg_rd, tcg_rn, tcg_rm);
7470 tcg_gen_neg_i64(tcg_rd, tcg_rd);
7472 case 0x7: /* CMGE, CMHS */
7473 cond = u ? TCG_COND_GEU : TCG_COND_GE;
7475 case 0x11: /* CMTST, CMEQ */
7480 gen_cmtst_i64(tcg_rd, tcg_rn, tcg_rm);
7482 case 0x8: /* SSHL, USHL */
7484 gen_helper_neon_shl_u64(tcg_rd, tcg_rn, tcg_rm);
7486 gen_helper_neon_shl_s64(tcg_rd, tcg_rn, tcg_rm);
7489 case 0x9: /* SQSHL, UQSHL */
7491 gen_helper_neon_qshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7493 gen_helper_neon_qshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7496 case 0xa: /* SRSHL, URSHL */
7498 gen_helper_neon_rshl_u64(tcg_rd, tcg_rn, tcg_rm);
7500 gen_helper_neon_rshl_s64(tcg_rd, tcg_rn, tcg_rm);
7503 case 0xb: /* SQRSHL, UQRSHL */
7505 gen_helper_neon_qrshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7507 gen_helper_neon_qrshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7510 case 0x10: /* ADD, SUB */
7512 tcg_gen_sub_i64(tcg_rd, tcg_rn, tcg_rm);
7514 tcg_gen_add_i64(tcg_rd, tcg_rn, tcg_rm);
7518 g_assert_not_reached();
7522 /* Handle the 3-same-operands float operations; shared by the scalar
7523 * and vector encodings. The caller must filter out any encodings
7524 * not allocated for the encoding it is dealing with.
7526 static void handle_3same_float(DisasContext *s, int size, int elements,
7527 int fpopcode, int rd, int rn, int rm)
7530 TCGv_ptr fpst = get_fpstatus_ptr(false);
7532 for (pass = 0; pass < elements; pass++) {
7535 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
7536 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
7537 TCGv_i64 tcg_res = tcg_temp_new_i64();
7539 read_vec_element(s, tcg_op1, rn, pass, MO_64);
7540 read_vec_element(s, tcg_op2, rm, pass, MO_64);
7543 case 0x39: /* FMLS */
7544 /* As usual for ARM, separate negation for fused multiply-add */
7545 gen_helper_vfp_negd(tcg_op1, tcg_op1);
7547 case 0x19: /* FMLA */
7548 read_vec_element(s, tcg_res, rd, pass, MO_64);
7549 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2,
7552 case 0x18: /* FMAXNM */
7553 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
7555 case 0x1a: /* FADD */
7556 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
7558 case 0x1b: /* FMULX */
7559 gen_helper_vfp_mulxd(tcg_res, tcg_op1, tcg_op2, fpst);
7561 case 0x1c: /* FCMEQ */
7562 gen_helper_neon_ceq_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7564 case 0x1e: /* FMAX */
7565 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
7567 case 0x1f: /* FRECPS */
7568 gen_helper_recpsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7570 case 0x38: /* FMINNM */
7571 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
7573 case 0x3a: /* FSUB */
7574 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
7576 case 0x3e: /* FMIN */
7577 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
7579 case 0x3f: /* FRSQRTS */
7580 gen_helper_rsqrtsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7582 case 0x5b: /* FMUL */
7583 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
7585 case 0x5c: /* FCMGE */
7586 gen_helper_neon_cge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7588 case 0x5d: /* FACGE */
7589 gen_helper_neon_acge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7591 case 0x5f: /* FDIV */
7592 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
7594 case 0x7a: /* FABD */
7595 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
7596 gen_helper_vfp_absd(tcg_res, tcg_res);
7598 case 0x7c: /* FCMGT */
7599 gen_helper_neon_cgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7601 case 0x7d: /* FACGT */
7602 gen_helper_neon_acgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7605 g_assert_not_reached();
7608 write_vec_element(s, tcg_res, rd, pass, MO_64);
7610 tcg_temp_free_i64(tcg_res);
7611 tcg_temp_free_i64(tcg_op1);
7612 tcg_temp_free_i64(tcg_op2);
7615 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
7616 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
7617 TCGv_i32 tcg_res = tcg_temp_new_i32();
7619 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
7620 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
7623 case 0x39: /* FMLS */
7624 /* As usual for ARM, separate negation for fused multiply-add */
7625 gen_helper_vfp_negs(tcg_op1, tcg_op1);
7627 case 0x19: /* FMLA */
7628 read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
7629 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2,
7632 case 0x1a: /* FADD */
7633 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
7635 case 0x1b: /* FMULX */
7636 gen_helper_vfp_mulxs(tcg_res, tcg_op1, tcg_op2, fpst);
7638 case 0x1c: /* FCMEQ */
7639 gen_helper_neon_ceq_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7641 case 0x1e: /* FMAX */
7642 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
7644 case 0x1f: /* FRECPS */
7645 gen_helper_recpsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7647 case 0x18: /* FMAXNM */
7648 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
7650 case 0x38: /* FMINNM */
7651 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
7653 case 0x3a: /* FSUB */
7654 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
7656 case 0x3e: /* FMIN */
7657 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
7659 case 0x3f: /* FRSQRTS */
7660 gen_helper_rsqrtsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7662 case 0x5b: /* FMUL */
7663 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
7665 case 0x5c: /* FCMGE */
7666 gen_helper_neon_cge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7668 case 0x5d: /* FACGE */
7669 gen_helper_neon_acge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7671 case 0x5f: /* FDIV */
7672 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
7674 case 0x7a: /* FABD */
7675 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
7676 gen_helper_vfp_abss(tcg_res, tcg_res);
7678 case 0x7c: /* FCMGT */
7679 gen_helper_neon_cgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7681 case 0x7d: /* FACGT */
7682 gen_helper_neon_acgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7685 g_assert_not_reached();
7688 if (elements == 1) {
7689 /* scalar single so clear high part */
7690 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
7692 tcg_gen_extu_i32_i64(tcg_tmp, tcg_res);
7693 write_vec_element(s, tcg_tmp, rd, pass, MO_64);
7694 tcg_temp_free_i64(tcg_tmp);
7696 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
7699 tcg_temp_free_i32(tcg_res);
7700 tcg_temp_free_i32(tcg_op1);
7701 tcg_temp_free_i32(tcg_op2);
7705 tcg_temp_free_ptr(fpst);
7707 clear_vec_high(s, elements * (size ? 8 : 4) > 8, rd);
7710 /* AdvSIMD scalar three same
7711 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
7712 * +-----+---+-----------+------+---+------+--------+---+------+------+
7713 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
7714 * +-----+---+-----------+------+---+------+--------+---+------+------+
7716 static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
7718 int rd = extract32(insn, 0, 5);
7719 int rn = extract32(insn, 5, 5);
7720 int opcode = extract32(insn, 11, 5);
7721 int rm = extract32(insn, 16, 5);
7722 int size = extract32(insn, 22, 2);
7723 bool u = extract32(insn, 29, 1);
7726 if (opcode >= 0x18) {
7727 /* Floating point: U, size[1] and opcode indicate operation */
7728 int fpopcode = opcode | (extract32(size, 1, 1) << 5) | (u << 6);
7730 case 0x1b: /* FMULX */
7731 case 0x1f: /* FRECPS */
7732 case 0x3f: /* FRSQRTS */
7733 case 0x5d: /* FACGE */
7734 case 0x7d: /* FACGT */
7735 case 0x1c: /* FCMEQ */
7736 case 0x5c: /* FCMGE */
7737 case 0x7c: /* FCMGT */
7738 case 0x7a: /* FABD */
7741 unallocated_encoding(s);
7745 if (!fp_access_check(s)) {
7749 handle_3same_float(s, extract32(size, 0, 1), 1, fpopcode, rd, rn, rm);
7754 case 0x1: /* SQADD, UQADD */
7755 case 0x5: /* SQSUB, UQSUB */
7756 case 0x9: /* SQSHL, UQSHL */
7757 case 0xb: /* SQRSHL, UQRSHL */
7759 case 0x8: /* SSHL, USHL */
7760 case 0xa: /* SRSHL, URSHL */
7761 case 0x6: /* CMGT, CMHI */
7762 case 0x7: /* CMGE, CMHS */
7763 case 0x11: /* CMTST, CMEQ */
7764 case 0x10: /* ADD, SUB (vector) */
7766 unallocated_encoding(s);
7770 case 0x16: /* SQDMULH, SQRDMULH (vector) */
7771 if (size != 1 && size != 2) {
7772 unallocated_encoding(s);
7777 unallocated_encoding(s);
7781 if (!fp_access_check(s)) {
7785 tcg_rd = tcg_temp_new_i64();
7788 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
7789 TCGv_i64 tcg_rm = read_fp_dreg(s, rm);
7791 handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm);
7792 tcg_temp_free_i64(tcg_rn);
7793 tcg_temp_free_i64(tcg_rm);
7795 /* Do a single operation on the lowest element in the vector.
7796 * We use the standard Neon helpers and rely on 0 OP 0 == 0 with
7797 * no side effects for all these operations.
7798 * OPTME: special-purpose helpers would avoid doing some
7799 * unnecessary work in the helper for the 8 and 16 bit cases.
7801 NeonGenTwoOpEnvFn *genenvfn;
7802 TCGv_i32 tcg_rn = tcg_temp_new_i32();
7803 TCGv_i32 tcg_rm = tcg_temp_new_i32();
7804 TCGv_i32 tcg_rd32 = tcg_temp_new_i32();
7806 read_vec_element_i32(s, tcg_rn, rn, 0, size);
7807 read_vec_element_i32(s, tcg_rm, rm, 0, size);
7810 case 0x1: /* SQADD, UQADD */
7812 static NeonGenTwoOpEnvFn * const fns[3][2] = {
7813 { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
7814 { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
7815 { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
7817 genenvfn = fns[size][u];
7820 case 0x5: /* SQSUB, UQSUB */
7822 static NeonGenTwoOpEnvFn * const fns[3][2] = {
7823 { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
7824 { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
7825 { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
7827 genenvfn = fns[size][u];
7830 case 0x9: /* SQSHL, UQSHL */
7832 static NeonGenTwoOpEnvFn * const fns[3][2] = {
7833 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
7834 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
7835 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
7837 genenvfn = fns[size][u];
7840 case 0xb: /* SQRSHL, UQRSHL */
7842 static NeonGenTwoOpEnvFn * const fns[3][2] = {
7843 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
7844 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
7845 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
7847 genenvfn = fns[size][u];
7850 case 0x16: /* SQDMULH, SQRDMULH */
7852 static NeonGenTwoOpEnvFn * const fns[2][2] = {
7853 { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
7854 { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
7856 assert(size == 1 || size == 2);
7857 genenvfn = fns[size - 1][u];
7861 g_assert_not_reached();
7864 genenvfn(tcg_rd32, cpu_env, tcg_rn, tcg_rm);
7865 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd32);
7866 tcg_temp_free_i32(tcg_rd32);
7867 tcg_temp_free_i32(tcg_rn);
7868 tcg_temp_free_i32(tcg_rm);
7871 write_fp_dreg(s, rd, tcg_rd);
7873 tcg_temp_free_i64(tcg_rd);
7876 /* AdvSIMD scalar three same FP16
7877 * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0
7878 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
7879 * | 0 1 | U | 1 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd |
7880 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
7881 * v: 0101 1110 0100 0000 0000 0100 0000 0000 => 5e400400
7882 * m: 1101 1111 0110 0000 1100 0100 0000 0000 => df60c400
7884 static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s,
7887 int rd = extract32(insn, 0, 5);
7888 int rn = extract32(insn, 5, 5);
7889 int opcode = extract32(insn, 11, 3);
7890 int rm = extract32(insn, 16, 5);
7891 bool u = extract32(insn, 29, 1);
7892 bool a = extract32(insn, 23, 1);
7893 int fpopcode = opcode | (a << 3) | (u << 4);
7900 case 0x03: /* FMULX */
7901 case 0x04: /* FCMEQ (reg) */
7902 case 0x07: /* FRECPS */
7903 case 0x0f: /* FRSQRTS */
7904 case 0x14: /* FCMGE (reg) */
7905 case 0x15: /* FACGE */
7906 case 0x1a: /* FABD */
7907 case 0x1c: /* FCMGT (reg) */
7908 case 0x1d: /* FACGT */
7911 unallocated_encoding(s);
7915 if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
7916 unallocated_encoding(s);
7919 if (!fp_access_check(s)) {
7923 fpst = get_fpstatus_ptr(true);
7925 tcg_op1 = tcg_temp_new_i32();
7926 tcg_op2 = tcg_temp_new_i32();
7927 tcg_res = tcg_temp_new_i32();
7929 read_vec_element_i32(s, tcg_op1, rn, 0, MO_16);
7930 read_vec_element_i32(s, tcg_op2, rm, 0, MO_16);
7933 case 0x03: /* FMULX */
7934 gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
7936 case 0x04: /* FCMEQ (reg) */
7937 gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
7939 case 0x07: /* FRECPS */
7940 gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
7942 case 0x0f: /* FRSQRTS */
7943 gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
7945 case 0x14: /* FCMGE (reg) */
7946 gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
7948 case 0x15: /* FACGE */
7949 gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
7951 case 0x1a: /* FABD */
7952 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
7953 tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
7955 case 0x1c: /* FCMGT (reg) */
7956 gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
7958 case 0x1d: /* FACGT */
7959 gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
7962 g_assert_not_reached();
7965 write_fp_sreg(s, rd, tcg_res);
7968 tcg_temp_free_i32(tcg_res);
7969 tcg_temp_free_i32(tcg_op1);
7970 tcg_temp_free_i32(tcg_op2);
7971 tcg_temp_free_ptr(fpst);
7974 static void handle_2misc_64(DisasContext *s, int opcode, bool u,
7975 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn,
7976 TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus)
7978 /* Handle 64->64 opcodes which are shared between the scalar and
7979 * vector 2-reg-misc groups. We cover every integer opcode where size == 3
7980 * is valid in either group and also the double-precision fp ops.
7981 * The caller only need provide tcg_rmode and tcg_fpstatus if the op
7987 case 0x4: /* CLS, CLZ */
7989 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
7991 tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
7995 /* This opcode is shared with CNT and RBIT but we have earlier
7996 * enforced that size == 3 if and only if this is the NOT insn.
7998 tcg_gen_not_i64(tcg_rd, tcg_rn);
8000 case 0x7: /* SQABS, SQNEG */
8002 gen_helper_neon_qneg_s64(tcg_rd, cpu_env, tcg_rn);
8004 gen_helper_neon_qabs_s64(tcg_rd, cpu_env, tcg_rn);
8007 case 0xa: /* CMLT */
8008 /* 64 bit integer comparison against zero, result is
8009 * test ? (2^64 - 1) : 0. We implement via setcond(!test) and
8014 tcg_gen_setcondi_i64(cond, tcg_rd, tcg_rn, 0);
8015 tcg_gen_neg_i64(tcg_rd, tcg_rd);
8017 case 0x8: /* CMGT, CMGE */
8018 cond = u ? TCG_COND_GE : TCG_COND_GT;
8020 case 0x9: /* CMEQ, CMLE */
8021 cond = u ? TCG_COND_LE : TCG_COND_EQ;
8023 case 0xb: /* ABS, NEG */
8025 tcg_gen_neg_i64(tcg_rd, tcg_rn);
8027 TCGv_i64 tcg_zero = tcg_const_i64(0);
8028 tcg_gen_neg_i64(tcg_rd, tcg_rn);
8029 tcg_gen_movcond_i64(TCG_COND_GT, tcg_rd, tcg_rn, tcg_zero,
8031 tcg_temp_free_i64(tcg_zero);
8034 case 0x2f: /* FABS */
8035 gen_helper_vfp_absd(tcg_rd, tcg_rn);
8037 case 0x6f: /* FNEG */
8038 gen_helper_vfp_negd(tcg_rd, tcg_rn);
8040 case 0x7f: /* FSQRT */
8041 gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, cpu_env);
8043 case 0x1a: /* FCVTNS */
8044 case 0x1b: /* FCVTMS */
8045 case 0x1c: /* FCVTAS */
8046 case 0x3a: /* FCVTPS */
8047 case 0x3b: /* FCVTZS */
8049 TCGv_i32 tcg_shift = tcg_const_i32(0);
8050 gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
8051 tcg_temp_free_i32(tcg_shift);
8054 case 0x5a: /* FCVTNU */
8055 case 0x5b: /* FCVTMU */
8056 case 0x5c: /* FCVTAU */
8057 case 0x7a: /* FCVTPU */
8058 case 0x7b: /* FCVTZU */
8060 TCGv_i32 tcg_shift = tcg_const_i32(0);
8061 gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
8062 tcg_temp_free_i32(tcg_shift);
8065 case 0x18: /* FRINTN */
8066 case 0x19: /* FRINTM */
8067 case 0x38: /* FRINTP */
8068 case 0x39: /* FRINTZ */
8069 case 0x58: /* FRINTA */
8070 case 0x79: /* FRINTI */
8071 gen_helper_rintd(tcg_rd, tcg_rn, tcg_fpstatus);
8073 case 0x59: /* FRINTX */
8074 gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus);
8077 g_assert_not_reached();
8081 static void handle_2misc_fcmp_zero(DisasContext *s, int opcode,
8082 bool is_scalar, bool is_u, bool is_q,
8083 int size, int rn, int rd)
8085 bool is_double = (size == MO_64);
8088 if (!fp_access_check(s)) {
8092 fpst = get_fpstatus_ptr(size == MO_16);
8095 TCGv_i64 tcg_op = tcg_temp_new_i64();
8096 TCGv_i64 tcg_zero = tcg_const_i64(0);
8097 TCGv_i64 tcg_res = tcg_temp_new_i64();
8098 NeonGenTwoDoubleOPFn *genfn;
8103 case 0x2e: /* FCMLT (zero) */
8106 case 0x2c: /* FCMGT (zero) */
8107 genfn = gen_helper_neon_cgt_f64;
8109 case 0x2d: /* FCMEQ (zero) */
8110 genfn = gen_helper_neon_ceq_f64;
8112 case 0x6d: /* FCMLE (zero) */
8115 case 0x6c: /* FCMGE (zero) */
8116 genfn = gen_helper_neon_cge_f64;
8119 g_assert_not_reached();
8122 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
8123 read_vec_element(s, tcg_op, rn, pass, MO_64);
8125 genfn(tcg_res, tcg_zero, tcg_op, fpst);
8127 genfn(tcg_res, tcg_op, tcg_zero, fpst);
8129 write_vec_element(s, tcg_res, rd, pass, MO_64);
8131 tcg_temp_free_i64(tcg_res);
8132 tcg_temp_free_i64(tcg_zero);
8133 tcg_temp_free_i64(tcg_op);
8135 clear_vec_high(s, !is_scalar, rd);
8137 TCGv_i32 tcg_op = tcg_temp_new_i32();
8138 TCGv_i32 tcg_zero = tcg_const_i32(0);
8139 TCGv_i32 tcg_res = tcg_temp_new_i32();
8140 NeonGenTwoSingleOPFn *genfn;
8142 int pass, maxpasses;
8144 if (size == MO_16) {
8146 case 0x2e: /* FCMLT (zero) */
8149 case 0x2c: /* FCMGT (zero) */
8150 genfn = gen_helper_advsimd_cgt_f16;
8152 case 0x2d: /* FCMEQ (zero) */
8153 genfn = gen_helper_advsimd_ceq_f16;
8155 case 0x6d: /* FCMLE (zero) */
8158 case 0x6c: /* FCMGE (zero) */
8159 genfn = gen_helper_advsimd_cge_f16;
8162 g_assert_not_reached();
8166 case 0x2e: /* FCMLT (zero) */
8169 case 0x2c: /* FCMGT (zero) */
8170 genfn = gen_helper_neon_cgt_f32;
8172 case 0x2d: /* FCMEQ (zero) */
8173 genfn = gen_helper_neon_ceq_f32;
8175 case 0x6d: /* FCMLE (zero) */
8178 case 0x6c: /* FCMGE (zero) */
8179 genfn = gen_helper_neon_cge_f32;
8182 g_assert_not_reached();
8189 int vector_size = 8 << is_q;
8190 maxpasses = vector_size >> size;
8193 for (pass = 0; pass < maxpasses; pass++) {
8194 read_vec_element_i32(s, tcg_op, rn, pass, size);
8196 genfn(tcg_res, tcg_zero, tcg_op, fpst);
8198 genfn(tcg_res, tcg_op, tcg_zero, fpst);
8201 write_fp_sreg(s, rd, tcg_res);
8203 write_vec_element_i32(s, tcg_res, rd, pass, size);
8206 tcg_temp_free_i32(tcg_res);
8207 tcg_temp_free_i32(tcg_zero);
8208 tcg_temp_free_i32(tcg_op);
8210 clear_vec_high(s, is_q, rd);
8214 tcg_temp_free_ptr(fpst);
8217 static void handle_2misc_reciprocal(DisasContext *s, int opcode,
8218 bool is_scalar, bool is_u, bool is_q,
8219 int size, int rn, int rd)
8221 bool is_double = (size == 3);
8222 TCGv_ptr fpst = get_fpstatus_ptr(false);
8225 TCGv_i64 tcg_op = tcg_temp_new_i64();
8226 TCGv_i64 tcg_res = tcg_temp_new_i64();
8229 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
8230 read_vec_element(s, tcg_op, rn, pass, MO_64);
8232 case 0x3d: /* FRECPE */
8233 gen_helper_recpe_f64(tcg_res, tcg_op, fpst);
8235 case 0x3f: /* FRECPX */
8236 gen_helper_frecpx_f64(tcg_res, tcg_op, fpst);
8238 case 0x7d: /* FRSQRTE */
8239 gen_helper_rsqrte_f64(tcg_res, tcg_op, fpst);
8242 g_assert_not_reached();
8244 write_vec_element(s, tcg_res, rd, pass, MO_64);
8246 tcg_temp_free_i64(tcg_res);
8247 tcg_temp_free_i64(tcg_op);
8248 clear_vec_high(s, !is_scalar, rd);
8250 TCGv_i32 tcg_op = tcg_temp_new_i32();
8251 TCGv_i32 tcg_res = tcg_temp_new_i32();
8252 int pass, maxpasses;
8257 maxpasses = is_q ? 4 : 2;
8260 for (pass = 0; pass < maxpasses; pass++) {
8261 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
8264 case 0x3c: /* URECPE */
8265 gen_helper_recpe_u32(tcg_res, tcg_op, fpst);
8267 case 0x3d: /* FRECPE */
8268 gen_helper_recpe_f32(tcg_res, tcg_op, fpst);
8270 case 0x3f: /* FRECPX */
8271 gen_helper_frecpx_f32(tcg_res, tcg_op, fpst);
8273 case 0x7d: /* FRSQRTE */
8274 gen_helper_rsqrte_f32(tcg_res, tcg_op, fpst);
8277 g_assert_not_reached();
8281 write_fp_sreg(s, rd, tcg_res);
8283 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
8286 tcg_temp_free_i32(tcg_res);
8287 tcg_temp_free_i32(tcg_op);
8289 clear_vec_high(s, is_q, rd);
8292 tcg_temp_free_ptr(fpst);
8295 static void handle_2misc_narrow(DisasContext *s, bool scalar,
8296 int opcode, bool u, bool is_q,
8297 int size, int rn, int rd)
8299 /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
8300 * in the source becomes a size element in the destination).
8303 TCGv_i32 tcg_res[2];
8304 int destelt = is_q ? 2 : 0;
8305 int passes = scalar ? 1 : 2;
8308 tcg_res[1] = tcg_const_i32(0);
8311 for (pass = 0; pass < passes; pass++) {
8312 TCGv_i64 tcg_op = tcg_temp_new_i64();
8313 NeonGenNarrowFn *genfn = NULL;
8314 NeonGenNarrowEnvFn *genenvfn = NULL;
8317 read_vec_element(s, tcg_op, rn, pass, size + 1);
8319 read_vec_element(s, tcg_op, rn, pass, MO_64);
8321 tcg_res[pass] = tcg_temp_new_i32();
8324 case 0x12: /* XTN, SQXTUN */
8326 static NeonGenNarrowFn * const xtnfns[3] = {
8327 gen_helper_neon_narrow_u8,
8328 gen_helper_neon_narrow_u16,
8329 tcg_gen_extrl_i64_i32,
8331 static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
8332 gen_helper_neon_unarrow_sat8,
8333 gen_helper_neon_unarrow_sat16,
8334 gen_helper_neon_unarrow_sat32,
8337 genenvfn = sqxtunfns[size];
8339 genfn = xtnfns[size];
8343 case 0x14: /* SQXTN, UQXTN */
8345 static NeonGenNarrowEnvFn * const fns[3][2] = {
8346 { gen_helper_neon_narrow_sat_s8,
8347 gen_helper_neon_narrow_sat_u8 },
8348 { gen_helper_neon_narrow_sat_s16,
8349 gen_helper_neon_narrow_sat_u16 },
8350 { gen_helper_neon_narrow_sat_s32,
8351 gen_helper_neon_narrow_sat_u32 },
8353 genenvfn = fns[size][u];
8356 case 0x16: /* FCVTN, FCVTN2 */
8357 /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */
8359 gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, cpu_env);
8361 TCGv_i32 tcg_lo = tcg_temp_new_i32();
8362 TCGv_i32 tcg_hi = tcg_temp_new_i32();
8363 tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, tcg_op);
8364 gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, cpu_env);
8365 gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, cpu_env);
8366 tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16);
8367 tcg_temp_free_i32(tcg_lo);
8368 tcg_temp_free_i32(tcg_hi);
8371 case 0x56: /* FCVTXN, FCVTXN2 */
8372 /* 64 bit to 32 bit float conversion
8373 * with von Neumann rounding (round to odd)
8376 gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, cpu_env);
8379 g_assert_not_reached();
8383 genfn(tcg_res[pass], tcg_op);
8384 } else if (genenvfn) {
8385 genenvfn(tcg_res[pass], cpu_env, tcg_op);
8388 tcg_temp_free_i64(tcg_op);
8391 for (pass = 0; pass < 2; pass++) {
8392 write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32);
8393 tcg_temp_free_i32(tcg_res[pass]);
8395 clear_vec_high(s, is_q, rd);
8398 /* Remaining saturating accumulating ops */
8399 static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u,
8400 bool is_q, int size, int rn, int rd)
8402 bool is_double = (size == 3);
8405 TCGv_i64 tcg_rn = tcg_temp_new_i64();
8406 TCGv_i64 tcg_rd = tcg_temp_new_i64();
8409 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
8410 read_vec_element(s, tcg_rn, rn, pass, MO_64);
8411 read_vec_element(s, tcg_rd, rd, pass, MO_64);
8413 if (is_u) { /* USQADD */
8414 gen_helper_neon_uqadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8415 } else { /* SUQADD */
8416 gen_helper_neon_sqadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8418 write_vec_element(s, tcg_rd, rd, pass, MO_64);
8420 tcg_temp_free_i64(tcg_rd);
8421 tcg_temp_free_i64(tcg_rn);
8422 clear_vec_high(s, !is_scalar, rd);
8424 TCGv_i32 tcg_rn = tcg_temp_new_i32();
8425 TCGv_i32 tcg_rd = tcg_temp_new_i32();
8426 int pass, maxpasses;
8431 maxpasses = is_q ? 4 : 2;
8434 for (pass = 0; pass < maxpasses; pass++) {
8436 read_vec_element_i32(s, tcg_rn, rn, pass, size);
8437 read_vec_element_i32(s, tcg_rd, rd, pass, size);
8439 read_vec_element_i32(s, tcg_rn, rn, pass, MO_32);
8440 read_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
8443 if (is_u) { /* USQADD */
8446 gen_helper_neon_uqadd_s8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8449 gen_helper_neon_uqadd_s16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8452 gen_helper_neon_uqadd_s32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8455 g_assert_not_reached();
8457 } else { /* SUQADD */
8460 gen_helper_neon_sqadd_u8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8463 gen_helper_neon_sqadd_u16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8466 gen_helper_neon_sqadd_u32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8469 g_assert_not_reached();
8474 TCGv_i64 tcg_zero = tcg_const_i64(0);
8475 write_vec_element(s, tcg_zero, rd, 0, MO_64);
8476 tcg_temp_free_i64(tcg_zero);
8478 write_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
8480 tcg_temp_free_i32(tcg_rd);
8481 tcg_temp_free_i32(tcg_rn);
8482 clear_vec_high(s, is_q, rd);
8486 /* AdvSIMD scalar two reg misc
8487 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
8488 * +-----+---+-----------+------+-----------+--------+-----+------+------+
8489 * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
8490 * +-----+---+-----------+------+-----------+--------+-----+------+------+
8492 static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
8494 int rd = extract32(insn, 0, 5);
8495 int rn = extract32(insn, 5, 5);
8496 int opcode = extract32(insn, 12, 5);
8497 int size = extract32(insn, 22, 2);
8498 bool u = extract32(insn, 29, 1);
8499 bool is_fcvt = false;
8502 TCGv_ptr tcg_fpstatus;
8505 case 0x3: /* USQADD / SUQADD*/
8506 if (!fp_access_check(s)) {
8509 handle_2misc_satacc(s, true, u, false, size, rn, rd);
8511 case 0x7: /* SQABS / SQNEG */
8513 case 0xa: /* CMLT */
8515 unallocated_encoding(s);
8519 case 0x8: /* CMGT, CMGE */
8520 case 0x9: /* CMEQ, CMLE */
8521 case 0xb: /* ABS, NEG */
8523 unallocated_encoding(s);
8527 case 0x12: /* SQXTUN */
8529 unallocated_encoding(s);
8533 case 0x14: /* SQXTN, UQXTN */
8535 unallocated_encoding(s);
8538 if (!fp_access_check(s)) {
8541 handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd);
8546 /* Floating point: U, size[1] and opcode indicate operation;
8547 * size[0] indicates single or double precision.
8549 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
8550 size = extract32(size, 0, 1) ? 3 : 2;
8552 case 0x2c: /* FCMGT (zero) */
8553 case 0x2d: /* FCMEQ (zero) */
8554 case 0x2e: /* FCMLT (zero) */
8555 case 0x6c: /* FCMGE (zero) */
8556 case 0x6d: /* FCMLE (zero) */
8557 handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd);
8559 case 0x1d: /* SCVTF */
8560 case 0x5d: /* UCVTF */
8562 bool is_signed = (opcode == 0x1d);
8563 if (!fp_access_check(s)) {
8566 handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size);
8569 case 0x3d: /* FRECPE */
8570 case 0x3f: /* FRECPX */
8571 case 0x7d: /* FRSQRTE */
8572 if (!fp_access_check(s)) {
8575 handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd);
8577 case 0x1a: /* FCVTNS */
8578 case 0x1b: /* FCVTMS */
8579 case 0x3a: /* FCVTPS */
8580 case 0x3b: /* FCVTZS */
8581 case 0x5a: /* FCVTNU */
8582 case 0x5b: /* FCVTMU */
8583 case 0x7a: /* FCVTPU */
8584 case 0x7b: /* FCVTZU */
8586 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
8588 case 0x1c: /* FCVTAS */
8589 case 0x5c: /* FCVTAU */
8590 /* TIEAWAY doesn't fit in the usual rounding mode encoding */
8592 rmode = FPROUNDING_TIEAWAY;
8594 case 0x56: /* FCVTXN, FCVTXN2 */
8596 unallocated_encoding(s);
8599 if (!fp_access_check(s)) {
8602 handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd);
8605 unallocated_encoding(s);
8610 unallocated_encoding(s);
8614 if (!fp_access_check(s)) {
8619 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
8620 tcg_fpstatus = get_fpstatus_ptr(false);
8621 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
8624 tcg_fpstatus = NULL;
8628 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
8629 TCGv_i64 tcg_rd = tcg_temp_new_i64();
8631 handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus);
8632 write_fp_dreg(s, rd, tcg_rd);
8633 tcg_temp_free_i64(tcg_rd);
8634 tcg_temp_free_i64(tcg_rn);
8636 TCGv_i32 tcg_rn = tcg_temp_new_i32();
8637 TCGv_i32 tcg_rd = tcg_temp_new_i32();
8639 read_vec_element_i32(s, tcg_rn, rn, 0, size);
8642 case 0x7: /* SQABS, SQNEG */
8644 NeonGenOneOpEnvFn *genfn;
8645 static NeonGenOneOpEnvFn * const fns[3][2] = {
8646 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
8647 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
8648 { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 },
8650 genfn = fns[size][u];
8651 genfn(tcg_rd, cpu_env, tcg_rn);
8654 case 0x1a: /* FCVTNS */
8655 case 0x1b: /* FCVTMS */
8656 case 0x1c: /* FCVTAS */
8657 case 0x3a: /* FCVTPS */
8658 case 0x3b: /* FCVTZS */
8660 TCGv_i32 tcg_shift = tcg_const_i32(0);
8661 gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
8662 tcg_temp_free_i32(tcg_shift);
8665 case 0x5a: /* FCVTNU */
8666 case 0x5b: /* FCVTMU */
8667 case 0x5c: /* FCVTAU */
8668 case 0x7a: /* FCVTPU */
8669 case 0x7b: /* FCVTZU */
8671 TCGv_i32 tcg_shift = tcg_const_i32(0);
8672 gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
8673 tcg_temp_free_i32(tcg_shift);
8677 g_assert_not_reached();
8680 write_fp_sreg(s, rd, tcg_rd);
8681 tcg_temp_free_i32(tcg_rd);
8682 tcg_temp_free_i32(tcg_rn);
8686 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
8687 tcg_temp_free_i32(tcg_rmode);
8688 tcg_temp_free_ptr(tcg_fpstatus);
8692 static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
8694 tcg_gen_vec_sar8i_i64(a, a, shift);
8695 tcg_gen_vec_add8_i64(d, d, a);
8698 static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
8700 tcg_gen_vec_sar16i_i64(a, a, shift);
8701 tcg_gen_vec_add16_i64(d, d, a);
8704 static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
8706 tcg_gen_sari_i32(a, a, shift);
8707 tcg_gen_add_i32(d, d, a);
8710 static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
8712 tcg_gen_sari_i64(a, a, shift);
8713 tcg_gen_add_i64(d, d, a);
8716 static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
8718 tcg_gen_sari_vec(vece, a, a, sh);
8719 tcg_gen_add_vec(vece, d, d, a);
8722 static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
8724 tcg_gen_vec_shr8i_i64(a, a, shift);
8725 tcg_gen_vec_add8_i64(d, d, a);
8728 static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
8730 tcg_gen_vec_shr16i_i64(a, a, shift);
8731 tcg_gen_vec_add16_i64(d, d, a);
8734 static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
8736 tcg_gen_shri_i32(a, a, shift);
8737 tcg_gen_add_i32(d, d, a);
8740 static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
8742 tcg_gen_shri_i64(a, a, shift);
8743 tcg_gen_add_i64(d, d, a);
8746 static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
8748 tcg_gen_shri_vec(vece, a, a, sh);
8749 tcg_gen_add_vec(vece, d, d, a);
8752 static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
8754 uint64_t mask = dup_const(MO_8, 0xff >> shift);
8755 TCGv_i64 t = tcg_temp_new_i64();
8757 tcg_gen_shri_i64(t, a, shift);
8758 tcg_gen_andi_i64(t, t, mask);
8759 tcg_gen_andi_i64(d, d, ~mask);
8760 tcg_gen_or_i64(d, d, t);
8761 tcg_temp_free_i64(t);
8764 static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
8766 uint64_t mask = dup_const(MO_16, 0xffff >> shift);
8767 TCGv_i64 t = tcg_temp_new_i64();
8769 tcg_gen_shri_i64(t, a, shift);
8770 tcg_gen_andi_i64(t, t, mask);
8771 tcg_gen_andi_i64(d, d, ~mask);
8772 tcg_gen_or_i64(d, d, t);
8773 tcg_temp_free_i64(t);
8776 static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
8778 tcg_gen_shri_i32(a, a, shift);
8779 tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
8782 static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
8784 tcg_gen_shri_i64(a, a, shift);
8785 tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
8788 static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
8790 uint64_t mask = (2ull << ((8 << vece) - 1)) - 1;
8791 TCGv_vec t = tcg_temp_new_vec_matching(d);
8792 TCGv_vec m = tcg_temp_new_vec_matching(d);
8794 tcg_gen_dupi_vec(vece, m, mask ^ (mask >> sh));
8795 tcg_gen_shri_vec(vece, t, a, sh);
8796 tcg_gen_and_vec(vece, d, d, m);
8797 tcg_gen_or_vec(vece, d, d, t);
8799 tcg_temp_free_vec(t);
8800 tcg_temp_free_vec(m);
8803 /* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
8804 static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
8805 int immh, int immb, int opcode, int rn, int rd)
8807 static const GVecGen2i ssra_op[4] = {
8808 { .fni8 = gen_ssra8_i64,
8809 .fniv = gen_ssra_vec,
8811 .opc = INDEX_op_sari_vec,
8813 { .fni8 = gen_ssra16_i64,
8814 .fniv = gen_ssra_vec,
8816 .opc = INDEX_op_sari_vec,
8818 { .fni4 = gen_ssra32_i32,
8819 .fniv = gen_ssra_vec,
8821 .opc = INDEX_op_sari_vec,
8823 { .fni8 = gen_ssra64_i64,
8824 .fniv = gen_ssra_vec,
8825 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
8827 .opc = INDEX_op_sari_vec,
8830 static const GVecGen2i usra_op[4] = {
8831 { .fni8 = gen_usra8_i64,
8832 .fniv = gen_usra_vec,
8834 .opc = INDEX_op_shri_vec,
8836 { .fni8 = gen_usra16_i64,
8837 .fniv = gen_usra_vec,
8839 .opc = INDEX_op_shri_vec,
8841 { .fni4 = gen_usra32_i32,
8842 .fniv = gen_usra_vec,
8844 .opc = INDEX_op_shri_vec,
8846 { .fni8 = gen_usra64_i64,
8847 .fniv = gen_usra_vec,
8848 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
8850 .opc = INDEX_op_shri_vec,
8853 static const GVecGen2i sri_op[4] = {
8854 { .fni8 = gen_shr8_ins_i64,
8855 .fniv = gen_shr_ins_vec,
8857 .opc = INDEX_op_shri_vec,
8859 { .fni8 = gen_shr16_ins_i64,
8860 .fniv = gen_shr_ins_vec,
8862 .opc = INDEX_op_shri_vec,
8864 { .fni4 = gen_shr32_ins_i32,
8865 .fniv = gen_shr_ins_vec,
8867 .opc = INDEX_op_shri_vec,
8869 { .fni8 = gen_shr64_ins_i64,
8870 .fniv = gen_shr_ins_vec,
8871 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
8873 .opc = INDEX_op_shri_vec,
8877 int size = 32 - clz32(immh) - 1;
8878 int immhb = immh << 3 | immb;
8879 int shift = 2 * (8 << size) - immhb;
8880 bool accumulate = false;
8881 int dsize = is_q ? 128 : 64;
8882 int esize = 8 << size;
8883 int elements = dsize/esize;
8884 TCGMemOp memop = size | (is_u ? 0 : MO_SIGN);
8885 TCGv_i64 tcg_rn = new_tmp_a64(s);
8886 TCGv_i64 tcg_rd = new_tmp_a64(s);
8888 uint64_t round_const;
8891 if (extract32(immh, 3, 1) && !is_q) {
8892 unallocated_encoding(s);
8896 if (size > 3 && !is_q) {
8897 unallocated_encoding(s);
8901 if (!fp_access_check(s)) {
8906 case 0x02: /* SSRA / USRA (accumulate) */
8908 /* Shift count same as element size produces zero to add. */
8909 if (shift == 8 << size) {
8912 gen_gvec_op2i(s, is_q, rd, rn, shift, &usra_op[size]);
8914 /* Shift count same as element size produces all sign to add. */
8915 if (shift == 8 << size) {
8918 gen_gvec_op2i(s, is_q, rd, rn, shift, &ssra_op[size]);
8921 case 0x08: /* SRI */
8922 /* Shift count same as element size is valid but does nothing. */
8923 if (shift == 8 << size) {
8926 gen_gvec_op2i(s, is_q, rd, rn, shift, &sri_op[size]);
8929 case 0x00: /* SSHR / USHR */
8931 if (shift == 8 << size) {
8932 /* Shift count the same size as element size produces zero. */
8933 tcg_gen_gvec_dup8i(vec_full_reg_offset(s, rd),
8934 is_q ? 16 : 8, vec_full_reg_size(s), 0);
8936 gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shri, size);
8939 /* Shift count the same size as element size produces all sign. */
8940 if (shift == 8 << size) {
8943 gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_sari, size);
8947 case 0x04: /* SRSHR / URSHR (rounding) */
8949 case 0x06: /* SRSRA / URSRA (accum + rounding) */
8953 g_assert_not_reached();
8956 round_const = 1ULL << (shift - 1);
8957 tcg_round = tcg_const_i64(round_const);
8959 for (i = 0; i < elements; i++) {
8960 read_vec_element(s, tcg_rn, rn, i, memop);
8962 read_vec_element(s, tcg_rd, rd, i, memop);
8965 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
8966 accumulate, is_u, size, shift);
8968 write_vec_element(s, tcg_rd, rd, i, size);
8970 tcg_temp_free_i64(tcg_round);
8973 clear_vec_high(s, is_q, rd);
8976 static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
8978 uint64_t mask = dup_const(MO_8, 0xff << shift);
8979 TCGv_i64 t = tcg_temp_new_i64();
8981 tcg_gen_shli_i64(t, a, shift);
8982 tcg_gen_andi_i64(t, t, mask);
8983 tcg_gen_andi_i64(d, d, ~mask);
8984 tcg_gen_or_i64(d, d, t);
8985 tcg_temp_free_i64(t);
8988 static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
8990 uint64_t mask = dup_const(MO_16, 0xffff << shift);
8991 TCGv_i64 t = tcg_temp_new_i64();
8993 tcg_gen_shli_i64(t, a, shift);
8994 tcg_gen_andi_i64(t, t, mask);
8995 tcg_gen_andi_i64(d, d, ~mask);
8996 tcg_gen_or_i64(d, d, t);
8997 tcg_temp_free_i64(t);
9000 static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
9002 tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
9005 static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9007 tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
9010 static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
9012 uint64_t mask = (1ull << sh) - 1;
9013 TCGv_vec t = tcg_temp_new_vec_matching(d);
9014 TCGv_vec m = tcg_temp_new_vec_matching(d);
9016 tcg_gen_dupi_vec(vece, m, mask);
9017 tcg_gen_shli_vec(vece, t, a, sh);
9018 tcg_gen_and_vec(vece, d, d, m);
9019 tcg_gen_or_vec(vece, d, d, t);
9021 tcg_temp_free_vec(t);
9022 tcg_temp_free_vec(m);
9025 /* SHL/SLI - Vector shift left */
9026 static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
9027 int immh, int immb, int opcode, int rn, int rd)
9029 static const GVecGen2i shi_op[4] = {
9030 { .fni8 = gen_shl8_ins_i64,
9031 .fniv = gen_shl_ins_vec,
9032 .opc = INDEX_op_shli_vec,
9033 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9036 { .fni8 = gen_shl16_ins_i64,
9037 .fniv = gen_shl_ins_vec,
9038 .opc = INDEX_op_shli_vec,
9039 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9042 { .fni4 = gen_shl32_ins_i32,
9043 .fniv = gen_shl_ins_vec,
9044 .opc = INDEX_op_shli_vec,
9045 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9048 { .fni8 = gen_shl64_ins_i64,
9049 .fniv = gen_shl_ins_vec,
9050 .opc = INDEX_op_shli_vec,
9051 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9055 int size = 32 - clz32(immh) - 1;
9056 int immhb = immh << 3 | immb;
9057 int shift = immhb - (8 << size);
9059 if (extract32(immh, 3, 1) && !is_q) {
9060 unallocated_encoding(s);
9064 if (size > 3 && !is_q) {
9065 unallocated_encoding(s);
9069 if (!fp_access_check(s)) {
9074 gen_gvec_op2i(s, is_q, rd, rn, shift, &shi_op[size]);
9076 gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size);
9080 /* USHLL/SHLL - Vector shift left with widening */
9081 static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
9082 int immh, int immb, int opcode, int rn, int rd)
9084 int size = 32 - clz32(immh) - 1;
9085 int immhb = immh << 3 | immb;
9086 int shift = immhb - (8 << size);
9088 int esize = 8 << size;
9089 int elements = dsize/esize;
9090 TCGv_i64 tcg_rn = new_tmp_a64(s);
9091 TCGv_i64 tcg_rd = new_tmp_a64(s);
9095 unallocated_encoding(s);
9099 if (!fp_access_check(s)) {
9103 /* For the LL variants the store is larger than the load,
9104 * so if rd == rn we would overwrite parts of our input.
9105 * So load everything right now and use shifts in the main loop.
9107 read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64);
9109 for (i = 0; i < elements; i++) {
9110 tcg_gen_shri_i64(tcg_rd, tcg_rn, i * esize);
9111 ext_and_shift_reg(tcg_rd, tcg_rd, size | (!is_u << 2), 0);
9112 tcg_gen_shli_i64(tcg_rd, tcg_rd, shift);
9113 write_vec_element(s, tcg_rd, rd, i, size + 1);
9117 /* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */
9118 static void handle_vec_simd_shrn(DisasContext *s, bool is_q,
9119 int immh, int immb, int opcode, int rn, int rd)
9121 int immhb = immh << 3 | immb;
9122 int size = 32 - clz32(immh) - 1;
9124 int esize = 8 << size;
9125 int elements = dsize/esize;
9126 int shift = (2 * esize) - immhb;
9127 bool round = extract32(opcode, 0, 1);
9128 TCGv_i64 tcg_rn, tcg_rd, tcg_final;
9132 if (extract32(immh, 3, 1)) {
9133 unallocated_encoding(s);
9137 if (!fp_access_check(s)) {
9141 tcg_rn = tcg_temp_new_i64();
9142 tcg_rd = tcg_temp_new_i64();
9143 tcg_final = tcg_temp_new_i64();
9144 read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64);
9147 uint64_t round_const = 1ULL << (shift - 1);
9148 tcg_round = tcg_const_i64(round_const);
9153 for (i = 0; i < elements; i++) {
9154 read_vec_element(s, tcg_rn, rn, i, size+1);
9155 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
9156 false, true, size+1, shift);
9158 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
9162 write_vec_element(s, tcg_final, rd, 0, MO_64);
9164 write_vec_element(s, tcg_final, rd, 1, MO_64);
9167 tcg_temp_free_i64(tcg_round);
9169 tcg_temp_free_i64(tcg_rn);
9170 tcg_temp_free_i64(tcg_rd);
9171 tcg_temp_free_i64(tcg_final);
9173 clear_vec_high(s, is_q, rd);
9177 /* AdvSIMD shift by immediate
9178 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
9179 * +---+---+---+-------------+------+------+--------+---+------+------+
9180 * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
9181 * +---+---+---+-------------+------+------+--------+---+------+------+
9183 static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
9185 int rd = extract32(insn, 0, 5);
9186 int rn = extract32(insn, 5, 5);
9187 int opcode = extract32(insn, 11, 5);
9188 int immb = extract32(insn, 16, 3);
9189 int immh = extract32(insn, 19, 4);
9190 bool is_u = extract32(insn, 29, 1);
9191 bool is_q = extract32(insn, 30, 1);
9194 case 0x08: /* SRI */
9196 unallocated_encoding(s);
9200 case 0x00: /* SSHR / USHR */
9201 case 0x02: /* SSRA / USRA (accumulate) */
9202 case 0x04: /* SRSHR / URSHR (rounding) */
9203 case 0x06: /* SRSRA / URSRA (accum + rounding) */
9204 handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd);
9206 case 0x0a: /* SHL / SLI */
9207 handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd);
9209 case 0x10: /* SHRN */
9210 case 0x11: /* RSHRN / SQRSHRUN */
9212 handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb,
9215 handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd);
9218 case 0x12: /* SQSHRN / UQSHRN */
9219 case 0x13: /* SQRSHRN / UQRSHRN */
9220 handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb,
9223 case 0x14: /* SSHLL / USHLL */
9224 handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd);
9226 case 0x1c: /* SCVTF / UCVTF */
9227 handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb,
9230 case 0xc: /* SQSHLU */
9232 unallocated_encoding(s);
9235 handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd);
9237 case 0xe: /* SQSHL, UQSHL */
9238 handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd);
9240 case 0x1f: /* FCVTZS/ FCVTZU */
9241 handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd);
9244 unallocated_encoding(s);
9249 /* Generate code to do a "long" addition or subtraction, ie one done in
9250 * TCGv_i64 on vector lanes twice the width specified by size.
9252 static void gen_neon_addl(int size, bool is_sub, TCGv_i64 tcg_res,
9253 TCGv_i64 tcg_op1, TCGv_i64 tcg_op2)
9255 static NeonGenTwo64OpFn * const fns[3][2] = {
9256 { gen_helper_neon_addl_u16, gen_helper_neon_subl_u16 },
9257 { gen_helper_neon_addl_u32, gen_helper_neon_subl_u32 },
9258 { tcg_gen_add_i64, tcg_gen_sub_i64 },
9260 NeonGenTwo64OpFn *genfn;
9263 genfn = fns[size][is_sub];
9264 genfn(tcg_res, tcg_op1, tcg_op2);
9267 static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
9268 int opcode, int rd, int rn, int rm)
9270 /* 3-reg-different widening insns: 64 x 64 -> 128 */
9271 TCGv_i64 tcg_res[2];
9274 tcg_res[0] = tcg_temp_new_i64();
9275 tcg_res[1] = tcg_temp_new_i64();
9277 /* Does this op do an adding accumulate, a subtracting accumulate,
9278 * or no accumulate at all?
9296 read_vec_element(s, tcg_res[0], rd, 0, MO_64);
9297 read_vec_element(s, tcg_res[1], rd, 1, MO_64);
9300 /* size == 2 means two 32x32->64 operations; this is worth special
9301 * casing because we can generally handle it inline.
9304 for (pass = 0; pass < 2; pass++) {
9305 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9306 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9307 TCGv_i64 tcg_passres;
9308 TCGMemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
9310 int elt = pass + is_q * 2;
9312 read_vec_element(s, tcg_op1, rn, elt, memop);
9313 read_vec_element(s, tcg_op2, rm, elt, memop);
9316 tcg_passres = tcg_res[pass];
9318 tcg_passres = tcg_temp_new_i64();
9322 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
9323 tcg_gen_add_i64(tcg_passres, tcg_op1, tcg_op2);
9325 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
9326 tcg_gen_sub_i64(tcg_passres, tcg_op1, tcg_op2);
9328 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
9329 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
9331 TCGv_i64 tcg_tmp1 = tcg_temp_new_i64();
9332 TCGv_i64 tcg_tmp2 = tcg_temp_new_i64();
9334 tcg_gen_sub_i64(tcg_tmp1, tcg_op1, tcg_op2);
9335 tcg_gen_sub_i64(tcg_tmp2, tcg_op2, tcg_op1);
9336 tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
9338 tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2);
9339 tcg_temp_free_i64(tcg_tmp1);
9340 tcg_temp_free_i64(tcg_tmp2);
9343 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
9344 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
9345 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
9346 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
9348 case 9: /* SQDMLAL, SQDMLAL2 */
9349 case 11: /* SQDMLSL, SQDMLSL2 */
9350 case 13: /* SQDMULL, SQDMULL2 */
9351 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
9352 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
9353 tcg_passres, tcg_passres);
9356 g_assert_not_reached();
9359 if (opcode == 9 || opcode == 11) {
9360 /* saturating accumulate ops */
9362 tcg_gen_neg_i64(tcg_passres, tcg_passres);
9364 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
9365 tcg_res[pass], tcg_passres);
9366 } else if (accop > 0) {
9367 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
9368 } else if (accop < 0) {
9369 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
9373 tcg_temp_free_i64(tcg_passres);
9376 tcg_temp_free_i64(tcg_op1);
9377 tcg_temp_free_i64(tcg_op2);
9380 /* size 0 or 1, generally helper functions */
9381 for (pass = 0; pass < 2; pass++) {
9382 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
9383 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
9384 TCGv_i64 tcg_passres;
9385 int elt = pass + is_q * 2;
9387 read_vec_element_i32(s, tcg_op1, rn, elt, MO_32);
9388 read_vec_element_i32(s, tcg_op2, rm, elt, MO_32);
9391 tcg_passres = tcg_res[pass];
9393 tcg_passres = tcg_temp_new_i64();
9397 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
9398 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
9400 TCGv_i64 tcg_op2_64 = tcg_temp_new_i64();
9401 static NeonGenWidenFn * const widenfns[2][2] = {
9402 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
9403 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
9405 NeonGenWidenFn *widenfn = widenfns[size][is_u];
9407 widenfn(tcg_op2_64, tcg_op2);
9408 widenfn(tcg_passres, tcg_op1);
9409 gen_neon_addl(size, (opcode == 2), tcg_passres,
9410 tcg_passres, tcg_op2_64);
9411 tcg_temp_free_i64(tcg_op2_64);
9414 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
9415 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
9418 gen_helper_neon_abdl_u16(tcg_passres, tcg_op1, tcg_op2);
9420 gen_helper_neon_abdl_s16(tcg_passres, tcg_op1, tcg_op2);
9424 gen_helper_neon_abdl_u32(tcg_passres, tcg_op1, tcg_op2);
9426 gen_helper_neon_abdl_s32(tcg_passres, tcg_op1, tcg_op2);
9430 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
9431 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
9432 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
9435 gen_helper_neon_mull_u8(tcg_passres, tcg_op1, tcg_op2);
9437 gen_helper_neon_mull_s8(tcg_passres, tcg_op1, tcg_op2);
9441 gen_helper_neon_mull_u16(tcg_passres, tcg_op1, tcg_op2);
9443 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
9447 case 9: /* SQDMLAL, SQDMLAL2 */
9448 case 11: /* SQDMLSL, SQDMLSL2 */
9449 case 13: /* SQDMULL, SQDMULL2 */
9451 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
9452 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
9453 tcg_passres, tcg_passres);
9455 case 14: /* PMULL */
9457 gen_helper_neon_mull_p8(tcg_passres, tcg_op1, tcg_op2);
9460 g_assert_not_reached();
9462 tcg_temp_free_i32(tcg_op1);
9463 tcg_temp_free_i32(tcg_op2);
9466 if (opcode == 9 || opcode == 11) {
9467 /* saturating accumulate ops */
9469 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
9471 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
9475 gen_neon_addl(size, (accop < 0), tcg_res[pass],
9476 tcg_res[pass], tcg_passres);
9478 tcg_temp_free_i64(tcg_passres);
9483 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
9484 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
9485 tcg_temp_free_i64(tcg_res[0]);
9486 tcg_temp_free_i64(tcg_res[1]);
9489 static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size,
9490 int opcode, int rd, int rn, int rm)
9492 TCGv_i64 tcg_res[2];
9493 int part = is_q ? 2 : 0;
9496 for (pass = 0; pass < 2; pass++) {
9497 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9498 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
9499 TCGv_i64 tcg_op2_wide = tcg_temp_new_i64();
9500 static NeonGenWidenFn * const widenfns[3][2] = {
9501 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
9502 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
9503 { tcg_gen_ext_i32_i64, tcg_gen_extu_i32_i64 },
9505 NeonGenWidenFn *widenfn = widenfns[size][is_u];
9507 read_vec_element(s, tcg_op1, rn, pass, MO_64);
9508 read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32);
9509 widenfn(tcg_op2_wide, tcg_op2);
9510 tcg_temp_free_i32(tcg_op2);
9511 tcg_res[pass] = tcg_temp_new_i64();
9512 gen_neon_addl(size, (opcode == 3),
9513 tcg_res[pass], tcg_op1, tcg_op2_wide);
9514 tcg_temp_free_i64(tcg_op1);
9515 tcg_temp_free_i64(tcg_op2_wide);
9518 for (pass = 0; pass < 2; pass++) {
9519 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
9520 tcg_temp_free_i64(tcg_res[pass]);
9524 static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in)
9526 tcg_gen_addi_i64(in, in, 1U << 31);
9527 tcg_gen_extrh_i64_i32(res, in);
9530 static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
9531 int opcode, int rd, int rn, int rm)
9533 TCGv_i32 tcg_res[2];
9534 int part = is_q ? 2 : 0;
9537 for (pass = 0; pass < 2; pass++) {
9538 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9539 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9540 TCGv_i64 tcg_wideres = tcg_temp_new_i64();
9541 static NeonGenNarrowFn * const narrowfns[3][2] = {
9542 { gen_helper_neon_narrow_high_u8,
9543 gen_helper_neon_narrow_round_high_u8 },
9544 { gen_helper_neon_narrow_high_u16,
9545 gen_helper_neon_narrow_round_high_u16 },
9546 { tcg_gen_extrh_i64_i32, do_narrow_round_high_u32 },
9548 NeonGenNarrowFn *gennarrow = narrowfns[size][is_u];
9550 read_vec_element(s, tcg_op1, rn, pass, MO_64);
9551 read_vec_element(s, tcg_op2, rm, pass, MO_64);
9553 gen_neon_addl(size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2);
9555 tcg_temp_free_i64(tcg_op1);
9556 tcg_temp_free_i64(tcg_op2);
9558 tcg_res[pass] = tcg_temp_new_i32();
9559 gennarrow(tcg_res[pass], tcg_wideres);
9560 tcg_temp_free_i64(tcg_wideres);
9563 for (pass = 0; pass < 2; pass++) {
9564 write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32);
9565 tcg_temp_free_i32(tcg_res[pass]);
9567 clear_vec_high(s, is_q, rd);
9570 static void handle_pmull_64(DisasContext *s, int is_q, int rd, int rn, int rm)
9572 /* PMULL of 64 x 64 -> 128 is an odd special case because it
9573 * is the only three-reg-diff instruction which produces a
9574 * 128-bit wide result from a single operation. However since
9575 * it's possible to calculate the two halves more or less
9576 * separately we just use two helper calls.
9578 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9579 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9580 TCGv_i64 tcg_res = tcg_temp_new_i64();
9582 read_vec_element(s, tcg_op1, rn, is_q, MO_64);
9583 read_vec_element(s, tcg_op2, rm, is_q, MO_64);
9584 gen_helper_neon_pmull_64_lo(tcg_res, tcg_op1, tcg_op2);
9585 write_vec_element(s, tcg_res, rd, 0, MO_64);
9586 gen_helper_neon_pmull_64_hi(tcg_res, tcg_op1, tcg_op2);
9587 write_vec_element(s, tcg_res, rd, 1, MO_64);
9589 tcg_temp_free_i64(tcg_op1);
9590 tcg_temp_free_i64(tcg_op2);
9591 tcg_temp_free_i64(tcg_res);
9594 /* AdvSIMD three different
9595 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
9596 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
9597 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
9598 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
9600 static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
9602 /* Instructions in this group fall into three basic classes
9603 * (in each case with the operation working on each element in
9604 * the input vectors):
9605 * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra
9607 * (2) wide 64 x 128 -> 128
9608 * (3) narrowing 128 x 128 -> 64
9609 * Here we do initial decode, catch unallocated cases and
9610 * dispatch to separate functions for each class.
9612 int is_q = extract32(insn, 30, 1);
9613 int is_u = extract32(insn, 29, 1);
9614 int size = extract32(insn, 22, 2);
9615 int opcode = extract32(insn, 12, 4);
9616 int rm = extract32(insn, 16, 5);
9617 int rn = extract32(insn, 5, 5);
9618 int rd = extract32(insn, 0, 5);
9621 case 1: /* SADDW, SADDW2, UADDW, UADDW2 */
9622 case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */
9623 /* 64 x 128 -> 128 */
9625 unallocated_encoding(s);
9628 if (!fp_access_check(s)) {
9631 handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm);
9633 case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */
9634 case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */
9635 /* 128 x 128 -> 64 */
9637 unallocated_encoding(s);
9640 if (!fp_access_check(s)) {
9643 handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm);
9645 case 14: /* PMULL, PMULL2 */
9646 if (is_u || size == 1 || size == 2) {
9647 unallocated_encoding(s);
9651 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
9652 unallocated_encoding(s);
9655 if (!fp_access_check(s)) {
9658 handle_pmull_64(s, is_q, rd, rn, rm);
9662 case 9: /* SQDMLAL, SQDMLAL2 */
9663 case 11: /* SQDMLSL, SQDMLSL2 */
9664 case 13: /* SQDMULL, SQDMULL2 */
9665 if (is_u || size == 0) {
9666 unallocated_encoding(s);
9670 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
9671 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
9672 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
9673 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
9674 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
9675 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
9676 case 12: /* SMULL, SMULL2, UMULL, UMULL2 */
9677 /* 64 x 64 -> 128 */
9679 unallocated_encoding(s);
9683 if (!fp_access_check(s)) {
9687 handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm);
9690 /* opcode 15 not allocated */
9691 unallocated_encoding(s);
9696 static void gen_bsl_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
9698 tcg_gen_xor_i64(rn, rn, rm);
9699 tcg_gen_and_i64(rn, rn, rd);
9700 tcg_gen_xor_i64(rd, rm, rn);
9703 static void gen_bit_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
9705 tcg_gen_xor_i64(rn, rn, rd);
9706 tcg_gen_and_i64(rn, rn, rm);
9707 tcg_gen_xor_i64(rd, rd, rn);
9710 static void gen_bif_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
9712 tcg_gen_xor_i64(rn, rn, rd);
9713 tcg_gen_andc_i64(rn, rn, rm);
9714 tcg_gen_xor_i64(rd, rd, rn);
9717 static void gen_bsl_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
9719 tcg_gen_xor_vec(vece, rn, rn, rm);
9720 tcg_gen_and_vec(vece, rn, rn, rd);
9721 tcg_gen_xor_vec(vece, rd, rm, rn);
9724 static void gen_bit_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
9726 tcg_gen_xor_vec(vece, rn, rn, rd);
9727 tcg_gen_and_vec(vece, rn, rn, rm);
9728 tcg_gen_xor_vec(vece, rd, rd, rn);
9731 static void gen_bif_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
9733 tcg_gen_xor_vec(vece, rn, rn, rd);
9734 tcg_gen_andc_vec(vece, rn, rn, rm);
9735 tcg_gen_xor_vec(vece, rd, rd, rn);
9738 /* Logic op (opcode == 3) subgroup of C3.6.16. */
9739 static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
9741 static const GVecGen3 bsl_op = {
9742 .fni8 = gen_bsl_i64,
9743 .fniv = gen_bsl_vec,
9744 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9747 static const GVecGen3 bit_op = {
9748 .fni8 = gen_bit_i64,
9749 .fniv = gen_bit_vec,
9750 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9753 static const GVecGen3 bif_op = {
9754 .fni8 = gen_bif_i64,
9755 .fniv = gen_bif_vec,
9756 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9760 int rd = extract32(insn, 0, 5);
9761 int rn = extract32(insn, 5, 5);
9762 int rm = extract32(insn, 16, 5);
9763 int size = extract32(insn, 22, 2);
9764 bool is_u = extract32(insn, 29, 1);
9765 bool is_q = extract32(insn, 30, 1);
9767 if (!fp_access_check(s)) {
9771 switch (size + 4 * is_u) {
9773 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_and, 0);
9776 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_andc, 0);
9779 if (rn == rm) { /* MOV */
9780 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_mov, 0);
9782 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_or, 0);
9786 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_orc, 0);
9789 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_xor, 0);
9792 case 5: /* BSL bitwise select */
9793 gen_gvec_op3(s, is_q, rd, rn, rm, &bsl_op);
9795 case 6: /* BIT, bitwise insert if true */
9796 gen_gvec_op3(s, is_q, rd, rn, rm, &bit_op);
9798 case 7: /* BIF, bitwise insert if false */
9799 gen_gvec_op3(s, is_q, rd, rn, rm, &bif_op);
9803 g_assert_not_reached();
9807 /* Helper functions for 32 bit comparisons */
9808 static void gen_max_s32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
9810 tcg_gen_movcond_i32(TCG_COND_GE, res, op1, op2, op1, op2);
9813 static void gen_max_u32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
9815 tcg_gen_movcond_i32(TCG_COND_GEU, res, op1, op2, op1, op2);
9818 static void gen_min_s32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
9820 tcg_gen_movcond_i32(TCG_COND_LE, res, op1, op2, op1, op2);
9823 static void gen_min_u32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
9825 tcg_gen_movcond_i32(TCG_COND_LEU, res, op1, op2, op1, op2);
9828 /* Pairwise op subgroup of C3.6.16.
9830 * This is called directly or via the handle_3same_float for float pairwise
9831 * operations where the opcode and size are calculated differently.
9833 static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
9834 int size, int rn, int rm, int rd)
9839 /* Floating point operations need fpst */
9840 if (opcode >= 0x58) {
9841 fpst = get_fpstatus_ptr(false);
9846 if (!fp_access_check(s)) {
9850 /* These operations work on the concatenated rm:rn, with each pair of
9851 * adjacent elements being operated on to produce an element in the result.
9854 TCGv_i64 tcg_res[2];
9856 for (pass = 0; pass < 2; pass++) {
9857 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9858 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9859 int passreg = (pass == 0) ? rn : rm;
9861 read_vec_element(s, tcg_op1, passreg, 0, MO_64);
9862 read_vec_element(s, tcg_op2, passreg, 1, MO_64);
9863 tcg_res[pass] = tcg_temp_new_i64();
9866 case 0x17: /* ADDP */
9867 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
9869 case 0x58: /* FMAXNMP */
9870 gen_helper_vfp_maxnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9872 case 0x5a: /* FADDP */
9873 gen_helper_vfp_addd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9875 case 0x5e: /* FMAXP */
9876 gen_helper_vfp_maxd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9878 case 0x78: /* FMINNMP */
9879 gen_helper_vfp_minnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9881 case 0x7e: /* FMINP */
9882 gen_helper_vfp_mind(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9885 g_assert_not_reached();
9888 tcg_temp_free_i64(tcg_op1);
9889 tcg_temp_free_i64(tcg_op2);
9892 for (pass = 0; pass < 2; pass++) {
9893 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
9894 tcg_temp_free_i64(tcg_res[pass]);
9897 int maxpass = is_q ? 4 : 2;
9898 TCGv_i32 tcg_res[4];
9900 for (pass = 0; pass < maxpass; pass++) {
9901 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
9902 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
9903 NeonGenTwoOpFn *genfn = NULL;
9904 int passreg = pass < (maxpass / 2) ? rn : rm;
9905 int passelt = (is_q && (pass & 1)) ? 2 : 0;
9907 read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_32);
9908 read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_32);
9909 tcg_res[pass] = tcg_temp_new_i32();
9912 case 0x17: /* ADDP */
9914 static NeonGenTwoOpFn * const fns[3] = {
9915 gen_helper_neon_padd_u8,
9916 gen_helper_neon_padd_u16,
9922 case 0x14: /* SMAXP, UMAXP */
9924 static NeonGenTwoOpFn * const fns[3][2] = {
9925 { gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 },
9926 { gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 },
9927 { gen_max_s32, gen_max_u32 },
9929 genfn = fns[size][u];
9932 case 0x15: /* SMINP, UMINP */
9934 static NeonGenTwoOpFn * const fns[3][2] = {
9935 { gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 },
9936 { gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 },
9937 { gen_min_s32, gen_min_u32 },
9939 genfn = fns[size][u];
9942 /* The FP operations are all on single floats (32 bit) */
9943 case 0x58: /* FMAXNMP */
9944 gen_helper_vfp_maxnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9946 case 0x5a: /* FADDP */
9947 gen_helper_vfp_adds(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9949 case 0x5e: /* FMAXP */
9950 gen_helper_vfp_maxs(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9952 case 0x78: /* FMINNMP */
9953 gen_helper_vfp_minnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9955 case 0x7e: /* FMINP */
9956 gen_helper_vfp_mins(tcg_res[pass], tcg_op1, tcg_op2, fpst);
9959 g_assert_not_reached();
9962 /* FP ops called directly, otherwise call now */
9964 genfn(tcg_res[pass], tcg_op1, tcg_op2);
9967 tcg_temp_free_i32(tcg_op1);
9968 tcg_temp_free_i32(tcg_op2);
9971 for (pass = 0; pass < maxpass; pass++) {
9972 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
9973 tcg_temp_free_i32(tcg_res[pass]);
9975 clear_vec_high(s, is_q, rd);
9979 tcg_temp_free_ptr(fpst);
9983 /* Floating point op subgroup of C3.6.16. */
9984 static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
9986 /* For floating point ops, the U, size[1] and opcode bits
9987 * together indicate the operation. size[0] indicates single
9990 int fpopcode = extract32(insn, 11, 5)
9991 | (extract32(insn, 23, 1) << 5)
9992 | (extract32(insn, 29, 1) << 6);
9993 int is_q = extract32(insn, 30, 1);
9994 int size = extract32(insn, 22, 1);
9995 int rm = extract32(insn, 16, 5);
9996 int rn = extract32(insn, 5, 5);
9997 int rd = extract32(insn, 0, 5);
9999 int datasize = is_q ? 128 : 64;
10000 int esize = 32 << size;
10001 int elements = datasize / esize;
10003 if (size == 1 && !is_q) {
10004 unallocated_encoding(s);
10008 switch (fpopcode) {
10009 case 0x58: /* FMAXNMP */
10010 case 0x5a: /* FADDP */
10011 case 0x5e: /* FMAXP */
10012 case 0x78: /* FMINNMP */
10013 case 0x7e: /* FMINP */
10014 if (size && !is_q) {
10015 unallocated_encoding(s);
10018 handle_simd_3same_pair(s, is_q, 0, fpopcode, size ? MO_64 : MO_32,
10021 case 0x1b: /* FMULX */
10022 case 0x1f: /* FRECPS */
10023 case 0x3f: /* FRSQRTS */
10024 case 0x5d: /* FACGE */
10025 case 0x7d: /* FACGT */
10026 case 0x19: /* FMLA */
10027 case 0x39: /* FMLS */
10028 case 0x18: /* FMAXNM */
10029 case 0x1a: /* FADD */
10030 case 0x1c: /* FCMEQ */
10031 case 0x1e: /* FMAX */
10032 case 0x38: /* FMINNM */
10033 case 0x3a: /* FSUB */
10034 case 0x3e: /* FMIN */
10035 case 0x5b: /* FMUL */
10036 case 0x5c: /* FCMGE */
10037 case 0x5f: /* FDIV */
10038 case 0x7a: /* FABD */
10039 case 0x7c: /* FCMGT */
10040 if (!fp_access_check(s)) {
10044 handle_3same_float(s, size, elements, fpopcode, rd, rn, rm);
10047 unallocated_encoding(s);
10052 static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
10054 gen_helper_neon_mul_u8(a, a, b);
10055 gen_helper_neon_add_u8(d, d, a);
10058 static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
10060 gen_helper_neon_mul_u16(a, a, b);
10061 gen_helper_neon_add_u16(d, d, a);
10064 static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
10066 tcg_gen_mul_i32(a, a, b);
10067 tcg_gen_add_i32(d, d, a);
10070 static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
10072 tcg_gen_mul_i64(a, a, b);
10073 tcg_gen_add_i64(d, d, a);
10076 static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
10078 tcg_gen_mul_vec(vece, a, a, b);
10079 tcg_gen_add_vec(vece, d, d, a);
10082 static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
10084 gen_helper_neon_mul_u8(a, a, b);
10085 gen_helper_neon_sub_u8(d, d, a);
10088 static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
10090 gen_helper_neon_mul_u16(a, a, b);
10091 gen_helper_neon_sub_u16(d, d, a);
10094 static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
10096 tcg_gen_mul_i32(a, a, b);
10097 tcg_gen_sub_i32(d, d, a);
10100 static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
10102 tcg_gen_mul_i64(a, a, b);
10103 tcg_gen_sub_i64(d, d, a);
10106 static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
10108 tcg_gen_mul_vec(vece, a, a, b);
10109 tcg_gen_sub_vec(vece, d, d, a);
10112 /* Integer op subgroup of C3.6.16. */
10113 static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
10115 static const GVecGen3 cmtst_op[4] = {
10116 { .fni4 = gen_helper_neon_tst_u8,
10117 .fniv = gen_cmtst_vec,
10119 { .fni4 = gen_helper_neon_tst_u16,
10120 .fniv = gen_cmtst_vec,
10122 { .fni4 = gen_cmtst_i32,
10123 .fniv = gen_cmtst_vec,
10125 { .fni8 = gen_cmtst_i64,
10126 .fniv = gen_cmtst_vec,
10127 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
10130 static const GVecGen3 mla_op[4] = {
10131 { .fni4 = gen_mla8_i32,
10132 .fniv = gen_mla_vec,
10133 .opc = INDEX_op_mul_vec,
10136 { .fni4 = gen_mla16_i32,
10137 .fniv = gen_mla_vec,
10138 .opc = INDEX_op_mul_vec,
10141 { .fni4 = gen_mla32_i32,
10142 .fniv = gen_mla_vec,
10143 .opc = INDEX_op_mul_vec,
10146 { .fni8 = gen_mla64_i64,
10147 .fniv = gen_mla_vec,
10148 .opc = INDEX_op_mul_vec,
10149 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
10153 static const GVecGen3 mls_op[4] = {
10154 { .fni4 = gen_mls8_i32,
10155 .fniv = gen_mls_vec,
10156 .opc = INDEX_op_mul_vec,
10159 { .fni4 = gen_mls16_i32,
10160 .fniv = gen_mls_vec,
10161 .opc = INDEX_op_mul_vec,
10164 { .fni4 = gen_mls32_i32,
10165 .fniv = gen_mls_vec,
10166 .opc = INDEX_op_mul_vec,
10169 { .fni8 = gen_mls64_i64,
10170 .fniv = gen_mls_vec,
10171 .opc = INDEX_op_mul_vec,
10172 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
10177 int is_q = extract32(insn, 30, 1);
10178 int u = extract32(insn, 29, 1);
10179 int size = extract32(insn, 22, 2);
10180 int opcode = extract32(insn, 11, 5);
10181 int rm = extract32(insn, 16, 5);
10182 int rn = extract32(insn, 5, 5);
10183 int rd = extract32(insn, 0, 5);
10188 case 0x13: /* MUL, PMUL */
10189 if (u && size != 0) {
10190 unallocated_encoding(s);
10194 case 0x0: /* SHADD, UHADD */
10195 case 0x2: /* SRHADD, URHADD */
10196 case 0x4: /* SHSUB, UHSUB */
10197 case 0xc: /* SMAX, UMAX */
10198 case 0xd: /* SMIN, UMIN */
10199 case 0xe: /* SABD, UABD */
10200 case 0xf: /* SABA, UABA */
10201 case 0x12: /* MLA, MLS */
10203 unallocated_encoding(s);
10207 case 0x16: /* SQDMULH, SQRDMULH */
10208 if (size == 0 || size == 3) {
10209 unallocated_encoding(s);
10214 if (size == 3 && !is_q) {
10215 unallocated_encoding(s);
10221 if (!fp_access_check(s)) {
10226 case 0x10: /* ADD, SUB */
10228 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_sub, size);
10230 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_add, size);
10233 case 0x13: /* MUL, PMUL */
10234 if (!u) { /* MUL */
10235 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_mul, size);
10239 case 0x12: /* MLA, MLS */
10241 gen_gvec_op3(s, is_q, rd, rn, rm, &mls_op[size]);
10243 gen_gvec_op3(s, is_q, rd, rn, rm, &mla_op[size]);
10247 if (!u) { /* CMTST */
10248 gen_gvec_op3(s, is_q, rd, rn, rm, &cmtst_op[size]);
10252 cond = TCG_COND_EQ;
10254 case 0x06: /* CMGT, CMHI */
10255 cond = u ? TCG_COND_GTU : TCG_COND_GT;
10257 case 0x07: /* CMGE, CMHS */
10258 cond = u ? TCG_COND_GEU : TCG_COND_GE;
10260 tcg_gen_gvec_cmp(cond, size, vec_full_reg_offset(s, rd),
10261 vec_full_reg_offset(s, rn),
10262 vec_full_reg_offset(s, rm),
10263 is_q ? 16 : 8, vec_full_reg_size(s));
10269 for (pass = 0; pass < 2; pass++) {
10270 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10271 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10272 TCGv_i64 tcg_res = tcg_temp_new_i64();
10274 read_vec_element(s, tcg_op1, rn, pass, MO_64);
10275 read_vec_element(s, tcg_op2, rm, pass, MO_64);
10277 handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2);
10279 write_vec_element(s, tcg_res, rd, pass, MO_64);
10281 tcg_temp_free_i64(tcg_res);
10282 tcg_temp_free_i64(tcg_op1);
10283 tcg_temp_free_i64(tcg_op2);
10286 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
10287 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10288 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10289 TCGv_i32 tcg_res = tcg_temp_new_i32();
10290 NeonGenTwoOpFn *genfn = NULL;
10291 NeonGenTwoOpEnvFn *genenvfn = NULL;
10293 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
10294 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
10297 case 0x0: /* SHADD, UHADD */
10299 static NeonGenTwoOpFn * const fns[3][2] = {
10300 { gen_helper_neon_hadd_s8, gen_helper_neon_hadd_u8 },
10301 { gen_helper_neon_hadd_s16, gen_helper_neon_hadd_u16 },
10302 { gen_helper_neon_hadd_s32, gen_helper_neon_hadd_u32 },
10304 genfn = fns[size][u];
10307 case 0x1: /* SQADD, UQADD */
10309 static NeonGenTwoOpEnvFn * const fns[3][2] = {
10310 { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
10311 { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
10312 { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
10314 genenvfn = fns[size][u];
10317 case 0x2: /* SRHADD, URHADD */
10319 static NeonGenTwoOpFn * const fns[3][2] = {
10320 { gen_helper_neon_rhadd_s8, gen_helper_neon_rhadd_u8 },
10321 { gen_helper_neon_rhadd_s16, gen_helper_neon_rhadd_u16 },
10322 { gen_helper_neon_rhadd_s32, gen_helper_neon_rhadd_u32 },
10324 genfn = fns[size][u];
10327 case 0x4: /* SHSUB, UHSUB */
10329 static NeonGenTwoOpFn * const fns[3][2] = {
10330 { gen_helper_neon_hsub_s8, gen_helper_neon_hsub_u8 },
10331 { gen_helper_neon_hsub_s16, gen_helper_neon_hsub_u16 },
10332 { gen_helper_neon_hsub_s32, gen_helper_neon_hsub_u32 },
10334 genfn = fns[size][u];
10337 case 0x5: /* SQSUB, UQSUB */
10339 static NeonGenTwoOpEnvFn * const fns[3][2] = {
10340 { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
10341 { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
10342 { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
10344 genenvfn = fns[size][u];
10347 case 0x8: /* SSHL, USHL */
10349 static NeonGenTwoOpFn * const fns[3][2] = {
10350 { gen_helper_neon_shl_s8, gen_helper_neon_shl_u8 },
10351 { gen_helper_neon_shl_s16, gen_helper_neon_shl_u16 },
10352 { gen_helper_neon_shl_s32, gen_helper_neon_shl_u32 },
10354 genfn = fns[size][u];
10357 case 0x9: /* SQSHL, UQSHL */
10359 static NeonGenTwoOpEnvFn * const fns[3][2] = {
10360 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
10361 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
10362 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
10364 genenvfn = fns[size][u];
10367 case 0xa: /* SRSHL, URSHL */
10369 static NeonGenTwoOpFn * const fns[3][2] = {
10370 { gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 },
10371 { gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 },
10372 { gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 },
10374 genfn = fns[size][u];
10377 case 0xb: /* SQRSHL, UQRSHL */
10379 static NeonGenTwoOpEnvFn * const fns[3][2] = {
10380 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
10381 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
10382 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
10384 genenvfn = fns[size][u];
10387 case 0xc: /* SMAX, UMAX */
10389 static NeonGenTwoOpFn * const fns[3][2] = {
10390 { gen_helper_neon_max_s8, gen_helper_neon_max_u8 },
10391 { gen_helper_neon_max_s16, gen_helper_neon_max_u16 },
10392 { gen_max_s32, gen_max_u32 },
10394 genfn = fns[size][u];
10398 case 0xd: /* SMIN, UMIN */
10400 static NeonGenTwoOpFn * const fns[3][2] = {
10401 { gen_helper_neon_min_s8, gen_helper_neon_min_u8 },
10402 { gen_helper_neon_min_s16, gen_helper_neon_min_u16 },
10403 { gen_min_s32, gen_min_u32 },
10405 genfn = fns[size][u];
10408 case 0xe: /* SABD, UABD */
10409 case 0xf: /* SABA, UABA */
10411 static NeonGenTwoOpFn * const fns[3][2] = {
10412 { gen_helper_neon_abd_s8, gen_helper_neon_abd_u8 },
10413 { gen_helper_neon_abd_s16, gen_helper_neon_abd_u16 },
10414 { gen_helper_neon_abd_s32, gen_helper_neon_abd_u32 },
10416 genfn = fns[size][u];
10419 case 0x13: /* MUL, PMUL */
10420 assert(u); /* PMUL */
10422 genfn = gen_helper_neon_mul_p8;
10424 case 0x16: /* SQDMULH, SQRDMULH */
10426 static NeonGenTwoOpEnvFn * const fns[2][2] = {
10427 { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
10428 { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
10430 assert(size == 1 || size == 2);
10431 genenvfn = fns[size - 1][u];
10435 g_assert_not_reached();
10439 genenvfn(tcg_res, cpu_env, tcg_op1, tcg_op2);
10441 genfn(tcg_res, tcg_op1, tcg_op2);
10444 if (opcode == 0xf) {
10445 /* SABA, UABA: accumulating ops */
10446 static NeonGenTwoOpFn * const fns[3] = {
10447 gen_helper_neon_add_u8,
10448 gen_helper_neon_add_u16,
10452 read_vec_element_i32(s, tcg_op1, rd, pass, MO_32);
10453 fns[size](tcg_res, tcg_op1, tcg_res);
10456 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
10458 tcg_temp_free_i32(tcg_res);
10459 tcg_temp_free_i32(tcg_op1);
10460 tcg_temp_free_i32(tcg_op2);
10463 clear_vec_high(s, is_q, rd);
10466 /* AdvSIMD three same
10467 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
10468 * +---+---+---+-----------+------+---+------+--------+---+------+------+
10469 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
10470 * +---+---+---+-----------+------+---+------+--------+---+------+------+
10472 static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn)
10474 int opcode = extract32(insn, 11, 5);
10477 case 0x3: /* logic ops */
10478 disas_simd_3same_logic(s, insn);
10480 case 0x17: /* ADDP */
10481 case 0x14: /* SMAXP, UMAXP */
10482 case 0x15: /* SMINP, UMINP */
10484 /* Pairwise operations */
10485 int is_q = extract32(insn, 30, 1);
10486 int u = extract32(insn, 29, 1);
10487 int size = extract32(insn, 22, 2);
10488 int rm = extract32(insn, 16, 5);
10489 int rn = extract32(insn, 5, 5);
10490 int rd = extract32(insn, 0, 5);
10491 if (opcode == 0x17) {
10492 if (u || (size == 3 && !is_q)) {
10493 unallocated_encoding(s);
10498 unallocated_encoding(s);
10502 handle_simd_3same_pair(s, is_q, u, opcode, size, rn, rm, rd);
10505 case 0x18 ... 0x31:
10506 /* floating point ops, sz[1] and U are part of opcode */
10507 disas_simd_3same_float(s, insn);
10510 disas_simd_3same_int(s, insn);
10516 * Advanced SIMD three same (ARMv8.2 FP16 variants)
10518 * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0
10519 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
10520 * | 0 | Q | U | 0 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd |
10521 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
10523 * This includes FMULX, FCMEQ (register), FRECPS, FRSQRTS, FCMGE
10524 * (register), FACGE, FABD, FCMGT (register) and FACGT.
10527 static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
10529 int opcode, fpopcode;
10530 int is_q, u, a, rm, rn, rd;
10531 int datasize, elements;
10534 bool pairwise = false;
10536 if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
10537 unallocated_encoding(s);
10541 if (!fp_access_check(s)) {
10545 /* For these floating point ops, the U, a and opcode bits
10546 * together indicate the operation.
10548 opcode = extract32(insn, 11, 3);
10549 u = extract32(insn, 29, 1);
10550 a = extract32(insn, 23, 1);
10551 is_q = extract32(insn, 30, 1);
10552 rm = extract32(insn, 16, 5);
10553 rn = extract32(insn, 5, 5);
10554 rd = extract32(insn, 0, 5);
10556 fpopcode = opcode | (a << 3) | (u << 4);
10557 datasize = is_q ? 128 : 64;
10558 elements = datasize / 16;
10560 switch (fpopcode) {
10561 case 0x10: /* FMAXNMP */
10562 case 0x12: /* FADDP */
10563 case 0x16: /* FMAXP */
10564 case 0x18: /* FMINNMP */
10565 case 0x1e: /* FMINP */
10570 fpst = get_fpstatus_ptr(true);
10573 int maxpass = is_q ? 8 : 4;
10574 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10575 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10576 TCGv_i32 tcg_res[8];
10578 for (pass = 0; pass < maxpass; pass++) {
10579 int passreg = pass < (maxpass / 2) ? rn : rm;
10580 int passelt = (pass << 1) & (maxpass - 1);
10582 read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_16);
10583 read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_16);
10584 tcg_res[pass] = tcg_temp_new_i32();
10586 switch (fpopcode) {
10587 case 0x10: /* FMAXNMP */
10588 gen_helper_advsimd_maxnumh(tcg_res[pass], tcg_op1, tcg_op2,
10591 case 0x12: /* FADDP */
10592 gen_helper_advsimd_addh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10594 case 0x16: /* FMAXP */
10595 gen_helper_advsimd_maxh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10597 case 0x18: /* FMINNMP */
10598 gen_helper_advsimd_minnumh(tcg_res[pass], tcg_op1, tcg_op2,
10601 case 0x1e: /* FMINP */
10602 gen_helper_advsimd_minh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10605 g_assert_not_reached();
10609 for (pass = 0; pass < maxpass; pass++) {
10610 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_16);
10611 tcg_temp_free_i32(tcg_res[pass]);
10614 tcg_temp_free_i32(tcg_op1);
10615 tcg_temp_free_i32(tcg_op2);
10618 for (pass = 0; pass < elements; pass++) {
10619 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10620 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10621 TCGv_i32 tcg_res = tcg_temp_new_i32();
10623 read_vec_element_i32(s, tcg_op1, rn, pass, MO_16);
10624 read_vec_element_i32(s, tcg_op2, rm, pass, MO_16);
10626 switch (fpopcode) {
10627 case 0x0: /* FMAXNM */
10628 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
10630 case 0x1: /* FMLA */
10631 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
10632 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
10635 case 0x2: /* FADD */
10636 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
10638 case 0x3: /* FMULX */
10639 gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
10641 case 0x4: /* FCMEQ */
10642 gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
10644 case 0x6: /* FMAX */
10645 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
10647 case 0x7: /* FRECPS */
10648 gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
10650 case 0x8: /* FMINNM */
10651 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
10653 case 0x9: /* FMLS */
10654 /* As usual for ARM, separate negation for fused multiply-add */
10655 tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
10656 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
10657 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
10660 case 0xa: /* FSUB */
10661 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
10663 case 0xe: /* FMIN */
10664 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
10666 case 0xf: /* FRSQRTS */
10667 gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
10669 case 0x13: /* FMUL */
10670 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
10672 case 0x14: /* FCMGE */
10673 gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
10675 case 0x15: /* FACGE */
10676 gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
10678 case 0x17: /* FDIV */
10679 gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
10681 case 0x1a: /* FABD */
10682 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
10683 tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
10685 case 0x1c: /* FCMGT */
10686 gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
10688 case 0x1d: /* FACGT */
10689 gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
10692 fprintf(stderr, "%s: insn %#04x, fpop %#2x @ %#" PRIx64 "\n",
10693 __func__, insn, fpopcode, s->pc);
10694 g_assert_not_reached();
10697 write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
10698 tcg_temp_free_i32(tcg_res);
10699 tcg_temp_free_i32(tcg_op1);
10700 tcg_temp_free_i32(tcg_op2);
10704 tcg_temp_free_ptr(fpst);
10706 clear_vec_high(s, is_q, rd);
10709 static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q,
10710 int size, int rn, int rd)
10712 /* Handle 2-reg-misc ops which are widening (so each size element
10713 * in the source becomes a 2*size element in the destination.
10714 * The only instruction like this is FCVTL.
10719 /* 32 -> 64 bit fp conversion */
10720 TCGv_i64 tcg_res[2];
10721 int srcelt = is_q ? 2 : 0;
10723 for (pass = 0; pass < 2; pass++) {
10724 TCGv_i32 tcg_op = tcg_temp_new_i32();
10725 tcg_res[pass] = tcg_temp_new_i64();
10727 read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32);
10728 gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, cpu_env);
10729 tcg_temp_free_i32(tcg_op);
10731 for (pass = 0; pass < 2; pass++) {
10732 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10733 tcg_temp_free_i64(tcg_res[pass]);
10736 /* 16 -> 32 bit fp conversion */
10737 int srcelt = is_q ? 4 : 0;
10738 TCGv_i32 tcg_res[4];
10740 for (pass = 0; pass < 4; pass++) {
10741 tcg_res[pass] = tcg_temp_new_i32();
10743 read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16);
10744 gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass],
10747 for (pass = 0; pass < 4; pass++) {
10748 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
10749 tcg_temp_free_i32(tcg_res[pass]);
10754 static void handle_rev(DisasContext *s, int opcode, bool u,
10755 bool is_q, int size, int rn, int rd)
10757 int op = (opcode << 1) | u;
10758 int opsz = op + size;
10759 int grp_size = 3 - opsz;
10760 int dsize = is_q ? 128 : 64;
10764 unallocated_encoding(s);
10768 if (!fp_access_check(s)) {
10773 /* Special case bytes, use bswap op on each group of elements */
10774 int groups = dsize / (8 << grp_size);
10776 for (i = 0; i < groups; i++) {
10777 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
10779 read_vec_element(s, tcg_tmp, rn, i, grp_size);
10780 switch (grp_size) {
10782 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
10785 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
10788 tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp);
10791 g_assert_not_reached();
10793 write_vec_element(s, tcg_tmp, rd, i, grp_size);
10794 tcg_temp_free_i64(tcg_tmp);
10796 clear_vec_high(s, is_q, rd);
10798 int revmask = (1 << grp_size) - 1;
10799 int esize = 8 << size;
10800 int elements = dsize / esize;
10801 TCGv_i64 tcg_rn = tcg_temp_new_i64();
10802 TCGv_i64 tcg_rd = tcg_const_i64(0);
10803 TCGv_i64 tcg_rd_hi = tcg_const_i64(0);
10805 for (i = 0; i < elements; i++) {
10806 int e_rev = (i & 0xf) ^ revmask;
10807 int off = e_rev * esize;
10808 read_vec_element(s, tcg_rn, rn, i, size);
10810 tcg_gen_deposit_i64(tcg_rd_hi, tcg_rd_hi,
10811 tcg_rn, off - 64, esize);
10813 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, off, esize);
10816 write_vec_element(s, tcg_rd, rd, 0, MO_64);
10817 write_vec_element(s, tcg_rd_hi, rd, 1, MO_64);
10819 tcg_temp_free_i64(tcg_rd_hi);
10820 tcg_temp_free_i64(tcg_rd);
10821 tcg_temp_free_i64(tcg_rn);
10825 static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u,
10826 bool is_q, int size, int rn, int rd)
10828 /* Implement the pairwise operations from 2-misc:
10829 * SADDLP, UADDLP, SADALP, UADALP.
10830 * These all add pairs of elements in the input to produce a
10831 * double-width result element in the output (possibly accumulating).
10833 bool accum = (opcode == 0x6);
10834 int maxpass = is_q ? 2 : 1;
10836 TCGv_i64 tcg_res[2];
10839 /* 32 + 32 -> 64 op */
10840 TCGMemOp memop = size + (u ? 0 : MO_SIGN);
10842 for (pass = 0; pass < maxpass; pass++) {
10843 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10844 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10846 tcg_res[pass] = tcg_temp_new_i64();
10848 read_vec_element(s, tcg_op1, rn, pass * 2, memop);
10849 read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop);
10850 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
10852 read_vec_element(s, tcg_op1, rd, pass, MO_64);
10853 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
10856 tcg_temp_free_i64(tcg_op1);
10857 tcg_temp_free_i64(tcg_op2);
10860 for (pass = 0; pass < maxpass; pass++) {
10861 TCGv_i64 tcg_op = tcg_temp_new_i64();
10862 NeonGenOneOpFn *genfn;
10863 static NeonGenOneOpFn * const fns[2][2] = {
10864 { gen_helper_neon_addlp_s8, gen_helper_neon_addlp_u8 },
10865 { gen_helper_neon_addlp_s16, gen_helper_neon_addlp_u16 },
10868 genfn = fns[size][u];
10870 tcg_res[pass] = tcg_temp_new_i64();
10872 read_vec_element(s, tcg_op, rn, pass, MO_64);
10873 genfn(tcg_res[pass], tcg_op);
10876 read_vec_element(s, tcg_op, rd, pass, MO_64);
10878 gen_helper_neon_addl_u16(tcg_res[pass],
10879 tcg_res[pass], tcg_op);
10881 gen_helper_neon_addl_u32(tcg_res[pass],
10882 tcg_res[pass], tcg_op);
10885 tcg_temp_free_i64(tcg_op);
10889 tcg_res[1] = tcg_const_i64(0);
10891 for (pass = 0; pass < 2; pass++) {
10892 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10893 tcg_temp_free_i64(tcg_res[pass]);
10897 static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd)
10899 /* Implement SHLL and SHLL2 */
10901 int part = is_q ? 2 : 0;
10902 TCGv_i64 tcg_res[2];
10904 for (pass = 0; pass < 2; pass++) {
10905 static NeonGenWidenFn * const widenfns[3] = {
10906 gen_helper_neon_widen_u8,
10907 gen_helper_neon_widen_u16,
10908 tcg_gen_extu_i32_i64,
10910 NeonGenWidenFn *widenfn = widenfns[size];
10911 TCGv_i32 tcg_op = tcg_temp_new_i32();
10913 read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32);
10914 tcg_res[pass] = tcg_temp_new_i64();
10915 widenfn(tcg_res[pass], tcg_op);
10916 tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size);
10918 tcg_temp_free_i32(tcg_op);
10921 for (pass = 0; pass < 2; pass++) {
10922 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10923 tcg_temp_free_i64(tcg_res[pass]);
10927 /* AdvSIMD two reg misc
10928 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
10929 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
10930 * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
10931 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
10933 static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
10935 int size = extract32(insn, 22, 2);
10936 int opcode = extract32(insn, 12, 5);
10937 bool u = extract32(insn, 29, 1);
10938 bool is_q = extract32(insn, 30, 1);
10939 int rn = extract32(insn, 5, 5);
10940 int rd = extract32(insn, 0, 5);
10941 bool need_fpstatus = false;
10942 bool need_rmode = false;
10944 TCGv_i32 tcg_rmode;
10945 TCGv_ptr tcg_fpstatus;
10948 case 0x0: /* REV64, REV32 */
10949 case 0x1: /* REV16 */
10950 handle_rev(s, opcode, u, is_q, size, rn, rd);
10952 case 0x5: /* CNT, NOT, RBIT */
10953 if (u && size == 0) {
10956 } else if (u && size == 1) {
10959 } else if (!u && size == 0) {
10963 unallocated_encoding(s);
10965 case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */
10966 case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */
10968 unallocated_encoding(s);
10971 if (!fp_access_check(s)) {
10975 handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd);
10977 case 0x4: /* CLS, CLZ */
10979 unallocated_encoding(s);
10983 case 0x2: /* SADDLP, UADDLP */
10984 case 0x6: /* SADALP, UADALP */
10986 unallocated_encoding(s);
10989 if (!fp_access_check(s)) {
10992 handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd);
10994 case 0x13: /* SHLL, SHLL2 */
10995 if (u == 0 || size == 3) {
10996 unallocated_encoding(s);
10999 if (!fp_access_check(s)) {
11002 handle_shll(s, is_q, size, rn, rd);
11004 case 0xa: /* CMLT */
11006 unallocated_encoding(s);
11010 case 0x8: /* CMGT, CMGE */
11011 case 0x9: /* CMEQ, CMLE */
11012 case 0xb: /* ABS, NEG */
11013 if (size == 3 && !is_q) {
11014 unallocated_encoding(s);
11018 case 0x3: /* SUQADD, USQADD */
11019 if (size == 3 && !is_q) {
11020 unallocated_encoding(s);
11023 if (!fp_access_check(s)) {
11026 handle_2misc_satacc(s, false, u, is_q, size, rn, rd);
11028 case 0x7: /* SQABS, SQNEG */
11029 if (size == 3 && !is_q) {
11030 unallocated_encoding(s);
11035 case 0x16 ... 0x1d:
11038 /* Floating point: U, size[1] and opcode indicate operation;
11039 * size[0] indicates single or double precision.
11041 int is_double = extract32(size, 0, 1);
11042 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
11043 size = is_double ? 3 : 2;
11045 case 0x2f: /* FABS */
11046 case 0x6f: /* FNEG */
11047 if (size == 3 && !is_q) {
11048 unallocated_encoding(s);
11052 case 0x1d: /* SCVTF */
11053 case 0x5d: /* UCVTF */
11055 bool is_signed = (opcode == 0x1d) ? true : false;
11056 int elements = is_double ? 2 : is_q ? 4 : 2;
11057 if (is_double && !is_q) {
11058 unallocated_encoding(s);
11061 if (!fp_access_check(s)) {
11064 handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size);
11067 case 0x2c: /* FCMGT (zero) */
11068 case 0x2d: /* FCMEQ (zero) */
11069 case 0x2e: /* FCMLT (zero) */
11070 case 0x6c: /* FCMGE (zero) */
11071 case 0x6d: /* FCMLE (zero) */
11072 if (size == 3 && !is_q) {
11073 unallocated_encoding(s);
11076 handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd);
11078 case 0x7f: /* FSQRT */
11079 if (size == 3 && !is_q) {
11080 unallocated_encoding(s);
11084 case 0x1a: /* FCVTNS */
11085 case 0x1b: /* FCVTMS */
11086 case 0x3a: /* FCVTPS */
11087 case 0x3b: /* FCVTZS */
11088 case 0x5a: /* FCVTNU */
11089 case 0x5b: /* FCVTMU */
11090 case 0x7a: /* FCVTPU */
11091 case 0x7b: /* FCVTZU */
11092 need_fpstatus = true;
11094 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
11095 if (size == 3 && !is_q) {
11096 unallocated_encoding(s);
11100 case 0x5c: /* FCVTAU */
11101 case 0x1c: /* FCVTAS */
11102 need_fpstatus = true;
11104 rmode = FPROUNDING_TIEAWAY;
11105 if (size == 3 && !is_q) {
11106 unallocated_encoding(s);
11110 case 0x3c: /* URECPE */
11112 unallocated_encoding(s);
11116 case 0x3d: /* FRECPE */
11117 case 0x7d: /* FRSQRTE */
11118 if (size == 3 && !is_q) {
11119 unallocated_encoding(s);
11122 if (!fp_access_check(s)) {
11125 handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd);
11127 case 0x56: /* FCVTXN, FCVTXN2 */
11129 unallocated_encoding(s);
11133 case 0x16: /* FCVTN, FCVTN2 */
11134 /* handle_2misc_narrow does a 2*size -> size operation, but these
11135 * instructions encode the source size rather than dest size.
11137 if (!fp_access_check(s)) {
11140 handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
11142 case 0x17: /* FCVTL, FCVTL2 */
11143 if (!fp_access_check(s)) {
11146 handle_2misc_widening(s, opcode, is_q, size, rn, rd);
11148 case 0x18: /* FRINTN */
11149 case 0x19: /* FRINTM */
11150 case 0x38: /* FRINTP */
11151 case 0x39: /* FRINTZ */
11153 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
11155 case 0x59: /* FRINTX */
11156 case 0x79: /* FRINTI */
11157 need_fpstatus = true;
11158 if (size == 3 && !is_q) {
11159 unallocated_encoding(s);
11163 case 0x58: /* FRINTA */
11165 rmode = FPROUNDING_TIEAWAY;
11166 need_fpstatus = true;
11167 if (size == 3 && !is_q) {
11168 unallocated_encoding(s);
11172 case 0x7c: /* URSQRTE */
11174 unallocated_encoding(s);
11177 need_fpstatus = true;
11180 unallocated_encoding(s);
11186 unallocated_encoding(s);
11190 if (!fp_access_check(s)) {
11194 if (need_fpstatus || need_rmode) {
11195 tcg_fpstatus = get_fpstatus_ptr(false);
11197 tcg_fpstatus = NULL;
11200 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
11201 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
11208 if (u && size == 0) { /* NOT */
11209 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_not, 0);
11215 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size);
11222 /* All 64-bit element operations can be shared with scalar 2misc */
11225 for (pass = 0; pass < (is_q ? 2 : 1); pass++) {
11226 TCGv_i64 tcg_op = tcg_temp_new_i64();
11227 TCGv_i64 tcg_res = tcg_temp_new_i64();
11229 read_vec_element(s, tcg_op, rn, pass, MO_64);
11231 handle_2misc_64(s, opcode, u, tcg_res, tcg_op,
11232 tcg_rmode, tcg_fpstatus);
11234 write_vec_element(s, tcg_res, rd, pass, MO_64);
11236 tcg_temp_free_i64(tcg_res);
11237 tcg_temp_free_i64(tcg_op);
11242 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
11243 TCGv_i32 tcg_op = tcg_temp_new_i32();
11244 TCGv_i32 tcg_res = tcg_temp_new_i32();
11247 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
11250 /* Special cases for 32 bit elements */
11252 case 0xa: /* CMLT */
11253 /* 32 bit integer comparison against zero, result is
11254 * test ? (2^32 - 1) : 0. We implement via setcond(test)
11257 cond = TCG_COND_LT;
11259 tcg_gen_setcondi_i32(cond, tcg_res, tcg_op, 0);
11260 tcg_gen_neg_i32(tcg_res, tcg_res);
11262 case 0x8: /* CMGT, CMGE */
11263 cond = u ? TCG_COND_GE : TCG_COND_GT;
11265 case 0x9: /* CMEQ, CMLE */
11266 cond = u ? TCG_COND_LE : TCG_COND_EQ;
11268 case 0x4: /* CLS */
11270 tcg_gen_clzi_i32(tcg_res, tcg_op, 32);
11272 tcg_gen_clrsb_i32(tcg_res, tcg_op);
11275 case 0x7: /* SQABS, SQNEG */
11277 gen_helper_neon_qneg_s32(tcg_res, cpu_env, tcg_op);
11279 gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op);
11282 case 0xb: /* ABS, NEG */
11284 tcg_gen_neg_i32(tcg_res, tcg_op);
11286 TCGv_i32 tcg_zero = tcg_const_i32(0);
11287 tcg_gen_neg_i32(tcg_res, tcg_op);
11288 tcg_gen_movcond_i32(TCG_COND_GT, tcg_res, tcg_op,
11289 tcg_zero, tcg_op, tcg_res);
11290 tcg_temp_free_i32(tcg_zero);
11293 case 0x2f: /* FABS */
11294 gen_helper_vfp_abss(tcg_res, tcg_op);
11296 case 0x6f: /* FNEG */
11297 gen_helper_vfp_negs(tcg_res, tcg_op);
11299 case 0x7f: /* FSQRT */
11300 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
11302 case 0x1a: /* FCVTNS */
11303 case 0x1b: /* FCVTMS */
11304 case 0x1c: /* FCVTAS */
11305 case 0x3a: /* FCVTPS */
11306 case 0x3b: /* FCVTZS */
11308 TCGv_i32 tcg_shift = tcg_const_i32(0);
11309 gen_helper_vfp_tosls(tcg_res, tcg_op,
11310 tcg_shift, tcg_fpstatus);
11311 tcg_temp_free_i32(tcg_shift);
11314 case 0x5a: /* FCVTNU */
11315 case 0x5b: /* FCVTMU */
11316 case 0x5c: /* FCVTAU */
11317 case 0x7a: /* FCVTPU */
11318 case 0x7b: /* FCVTZU */
11320 TCGv_i32 tcg_shift = tcg_const_i32(0);
11321 gen_helper_vfp_touls(tcg_res, tcg_op,
11322 tcg_shift, tcg_fpstatus);
11323 tcg_temp_free_i32(tcg_shift);
11326 case 0x18: /* FRINTN */
11327 case 0x19: /* FRINTM */
11328 case 0x38: /* FRINTP */
11329 case 0x39: /* FRINTZ */
11330 case 0x58: /* FRINTA */
11331 case 0x79: /* FRINTI */
11332 gen_helper_rints(tcg_res, tcg_op, tcg_fpstatus);
11334 case 0x59: /* FRINTX */
11335 gen_helper_rints_exact(tcg_res, tcg_op, tcg_fpstatus);
11337 case 0x7c: /* URSQRTE */
11338 gen_helper_rsqrte_u32(tcg_res, tcg_op, tcg_fpstatus);
11341 g_assert_not_reached();
11344 /* Use helpers for 8 and 16 bit elements */
11346 case 0x5: /* CNT, RBIT */
11347 /* For these two insns size is part of the opcode specifier
11348 * (handled earlier); they always operate on byte elements.
11351 gen_helper_neon_rbit_u8(tcg_res, tcg_op);
11353 gen_helper_neon_cnt_u8(tcg_res, tcg_op);
11356 case 0x7: /* SQABS, SQNEG */
11358 NeonGenOneOpEnvFn *genfn;
11359 static NeonGenOneOpEnvFn * const fns[2][2] = {
11360 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
11361 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
11363 genfn = fns[size][u];
11364 genfn(tcg_res, cpu_env, tcg_op);
11367 case 0x8: /* CMGT, CMGE */
11368 case 0x9: /* CMEQ, CMLE */
11369 case 0xa: /* CMLT */
11371 static NeonGenTwoOpFn * const fns[3][2] = {
11372 { gen_helper_neon_cgt_s8, gen_helper_neon_cgt_s16 },
11373 { gen_helper_neon_cge_s8, gen_helper_neon_cge_s16 },
11374 { gen_helper_neon_ceq_u8, gen_helper_neon_ceq_u16 },
11376 NeonGenTwoOpFn *genfn;
11379 TCGv_i32 tcg_zero = tcg_const_i32(0);
11381 /* comp = index into [CMGT, CMGE, CMEQ, CMLE, CMLT] */
11382 comp = (opcode - 0x8) * 2 + u;
11383 /* ...but LE, LT are implemented as reverse GE, GT */
11384 reverse = (comp > 2);
11388 genfn = fns[comp][size];
11390 genfn(tcg_res, tcg_zero, tcg_op);
11392 genfn(tcg_res, tcg_op, tcg_zero);
11394 tcg_temp_free_i32(tcg_zero);
11397 case 0xb: /* ABS, NEG */
11399 TCGv_i32 tcg_zero = tcg_const_i32(0);
11401 gen_helper_neon_sub_u16(tcg_res, tcg_zero, tcg_op);
11403 gen_helper_neon_sub_u8(tcg_res, tcg_zero, tcg_op);
11405 tcg_temp_free_i32(tcg_zero);
11408 gen_helper_neon_abs_s16(tcg_res, tcg_op);
11410 gen_helper_neon_abs_s8(tcg_res, tcg_op);
11414 case 0x4: /* CLS, CLZ */
11417 gen_helper_neon_clz_u8(tcg_res, tcg_op);
11419 gen_helper_neon_clz_u16(tcg_res, tcg_op);
11423 gen_helper_neon_cls_s8(tcg_res, tcg_op);
11425 gen_helper_neon_cls_s16(tcg_res, tcg_op);
11430 g_assert_not_reached();
11434 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
11436 tcg_temp_free_i32(tcg_res);
11437 tcg_temp_free_i32(tcg_op);
11440 clear_vec_high(s, is_q, rd);
11443 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
11444 tcg_temp_free_i32(tcg_rmode);
11446 if (need_fpstatus) {
11447 tcg_temp_free_ptr(tcg_fpstatus);
11451 /* AdvSIMD [scalar] two register miscellaneous (FP16)
11453 * 31 30 29 28 27 24 23 22 21 17 16 12 11 10 9 5 4 0
11454 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
11455 * | 0 | Q | U | S | 1 1 1 0 | a | 1 1 1 1 0 0 | opcode | 1 0 | Rn | Rd |
11456 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
11457 * mask: 1000 1111 0111 1110 0000 1100 0000 0000 0x8f7e 0c00
11458 * val: 0000 1110 0111 1000 0000 1000 0000 0000 0x0e78 0800
11460 * This actually covers two groups where scalar access is governed by
11461 * bit 28. A bunch of the instructions (float to integral) only exist
11462 * in the vector form and are un-allocated for the scalar decode. Also
11463 * in the scalar decode Q is always 1.
11465 static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn)
11467 int fpop, opcode, a, u;
11471 bool only_in_vector = false;
11474 TCGv_i32 tcg_rmode = NULL;
11475 TCGv_ptr tcg_fpstatus = NULL;
11476 bool need_rmode = false;
11477 bool need_fpst = true;
11480 if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
11481 unallocated_encoding(s);
11485 rd = extract32(insn, 0, 5);
11486 rn = extract32(insn, 5, 5);
11488 a = extract32(insn, 23, 1);
11489 u = extract32(insn, 29, 1);
11490 is_scalar = extract32(insn, 28, 1);
11491 is_q = extract32(insn, 30, 1);
11493 opcode = extract32(insn, 12, 5);
11494 fpop = deposit32(opcode, 5, 1, a);
11495 fpop = deposit32(fpop, 6, 1, u);
11497 rd = extract32(insn, 0, 5);
11498 rn = extract32(insn, 5, 5);
11501 case 0x1d: /* SCVTF */
11502 case 0x5d: /* UCVTF */
11509 elements = (is_q ? 8 : 4);
11512 if (!fp_access_check(s)) {
11515 handle_simd_intfp_conv(s, rd, rn, elements, !u, 0, MO_16);
11519 case 0x2c: /* FCMGT (zero) */
11520 case 0x2d: /* FCMEQ (zero) */
11521 case 0x2e: /* FCMLT (zero) */
11522 case 0x6c: /* FCMGE (zero) */
11523 case 0x6d: /* FCMLE (zero) */
11524 handle_2misc_fcmp_zero(s, fpop, is_scalar, 0, is_q, MO_16, rn, rd);
11526 case 0x3d: /* FRECPE */
11527 case 0x3f: /* FRECPX */
11529 case 0x18: /* FRINTN */
11531 only_in_vector = true;
11532 rmode = FPROUNDING_TIEEVEN;
11534 case 0x19: /* FRINTM */
11536 only_in_vector = true;
11537 rmode = FPROUNDING_NEGINF;
11539 case 0x38: /* FRINTP */
11541 only_in_vector = true;
11542 rmode = FPROUNDING_POSINF;
11544 case 0x39: /* FRINTZ */
11546 only_in_vector = true;
11547 rmode = FPROUNDING_ZERO;
11549 case 0x58: /* FRINTA */
11551 only_in_vector = true;
11552 rmode = FPROUNDING_TIEAWAY;
11554 case 0x59: /* FRINTX */
11555 case 0x79: /* FRINTI */
11556 only_in_vector = true;
11557 /* current rounding mode */
11559 case 0x1a: /* FCVTNS */
11561 rmode = FPROUNDING_TIEEVEN;
11563 case 0x1b: /* FCVTMS */
11565 rmode = FPROUNDING_NEGINF;
11567 case 0x1c: /* FCVTAS */
11569 rmode = FPROUNDING_TIEAWAY;
11571 case 0x3a: /* FCVTPS */
11573 rmode = FPROUNDING_POSINF;
11575 case 0x3b: /* FCVTZS */
11577 rmode = FPROUNDING_ZERO;
11579 case 0x5a: /* FCVTNU */
11581 rmode = FPROUNDING_TIEEVEN;
11583 case 0x5b: /* FCVTMU */
11585 rmode = FPROUNDING_NEGINF;
11587 case 0x5c: /* FCVTAU */
11589 rmode = FPROUNDING_TIEAWAY;
11591 case 0x7a: /* FCVTPU */
11593 rmode = FPROUNDING_POSINF;
11595 case 0x7b: /* FCVTZU */
11597 rmode = FPROUNDING_ZERO;
11599 case 0x2f: /* FABS */
11600 case 0x6f: /* FNEG */
11603 case 0x7d: /* FRSQRTE */
11604 case 0x7f: /* FSQRT (vector) */
11607 fprintf(stderr, "%s: insn %#04x fpop %#2x\n", __func__, insn, fpop);
11608 g_assert_not_reached();
11612 /* Check additional constraints for the scalar encoding */
11615 unallocated_encoding(s);
11618 /* FRINTxx is only in the vector form */
11619 if (only_in_vector) {
11620 unallocated_encoding(s);
11625 if (!fp_access_check(s)) {
11629 if (need_rmode || need_fpst) {
11630 tcg_fpstatus = get_fpstatus_ptr(true);
11634 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
11635 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
11639 TCGv_i32 tcg_op = tcg_temp_new_i32();
11640 TCGv_i32 tcg_res = tcg_temp_new_i32();
11642 read_vec_element_i32(s, tcg_op, rn, 0, MO_16);
11645 case 0x1a: /* FCVTNS */
11646 case 0x1b: /* FCVTMS */
11647 case 0x1c: /* FCVTAS */
11648 case 0x3a: /* FCVTPS */
11649 case 0x3b: /* FCVTZS */
11650 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
11652 case 0x3d: /* FRECPE */
11653 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
11655 case 0x3f: /* FRECPX */
11656 gen_helper_frecpx_f16(tcg_res, tcg_op, tcg_fpstatus);
11658 case 0x5a: /* FCVTNU */
11659 case 0x5b: /* FCVTMU */
11660 case 0x5c: /* FCVTAU */
11661 case 0x7a: /* FCVTPU */
11662 case 0x7b: /* FCVTZU */
11663 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
11665 case 0x6f: /* FNEG */
11666 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
11668 case 0x7d: /* FRSQRTE */
11669 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
11672 g_assert_not_reached();
11675 /* limit any sign extension going on */
11676 tcg_gen_andi_i32(tcg_res, tcg_res, 0xffff);
11677 write_fp_sreg(s, rd, tcg_res);
11679 tcg_temp_free_i32(tcg_res);
11680 tcg_temp_free_i32(tcg_op);
11682 for (pass = 0; pass < (is_q ? 8 : 4); pass++) {
11683 TCGv_i32 tcg_op = tcg_temp_new_i32();
11684 TCGv_i32 tcg_res = tcg_temp_new_i32();
11686 read_vec_element_i32(s, tcg_op, rn, pass, MO_16);
11689 case 0x1a: /* FCVTNS */
11690 case 0x1b: /* FCVTMS */
11691 case 0x1c: /* FCVTAS */
11692 case 0x3a: /* FCVTPS */
11693 case 0x3b: /* FCVTZS */
11694 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
11696 case 0x3d: /* FRECPE */
11697 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
11699 case 0x5a: /* FCVTNU */
11700 case 0x5b: /* FCVTMU */
11701 case 0x5c: /* FCVTAU */
11702 case 0x7a: /* FCVTPU */
11703 case 0x7b: /* FCVTZU */
11704 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
11706 case 0x18: /* FRINTN */
11707 case 0x19: /* FRINTM */
11708 case 0x38: /* FRINTP */
11709 case 0x39: /* FRINTZ */
11710 case 0x58: /* FRINTA */
11711 case 0x79: /* FRINTI */
11712 gen_helper_advsimd_rinth(tcg_res, tcg_op, tcg_fpstatus);
11714 case 0x59: /* FRINTX */
11715 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, tcg_fpstatus);
11717 case 0x2f: /* FABS */
11718 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
11720 case 0x6f: /* FNEG */
11721 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
11723 case 0x7d: /* FRSQRTE */
11724 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
11726 case 0x7f: /* FSQRT */
11727 gen_helper_sqrt_f16(tcg_res, tcg_op, tcg_fpstatus);
11730 g_assert_not_reached();
11733 write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
11735 tcg_temp_free_i32(tcg_res);
11736 tcg_temp_free_i32(tcg_op);
11739 clear_vec_high(s, is_q, rd);
11743 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
11744 tcg_temp_free_i32(tcg_rmode);
11747 if (tcg_fpstatus) {
11748 tcg_temp_free_ptr(tcg_fpstatus);
11752 /* AdvSIMD scalar x indexed element
11753 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
11754 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
11755 * | 0 1 | U | 1 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
11756 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
11757 * AdvSIMD vector x indexed element
11758 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
11759 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
11760 * | 0 | Q | U | 0 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
11761 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
11763 static void disas_simd_indexed(DisasContext *s, uint32_t insn)
11765 /* This encoding has two kinds of instruction:
11766 * normal, where we perform elt x idxelt => elt for each
11767 * element in the vector
11768 * long, where we perform elt x idxelt and generate a result of
11769 * double the width of the input element
11770 * The long ops have a 'part' specifier (ie come in INSN, INSN2 pairs).
11772 bool is_scalar = extract32(insn, 28, 1);
11773 bool is_q = extract32(insn, 30, 1);
11774 bool u = extract32(insn, 29, 1);
11775 int size = extract32(insn, 22, 2);
11776 int l = extract32(insn, 21, 1);
11777 int m = extract32(insn, 20, 1);
11778 /* Note that the Rm field here is only 4 bits, not 5 as it usually is */
11779 int rm = extract32(insn, 16, 4);
11780 int opcode = extract32(insn, 12, 4);
11781 int h = extract32(insn, 11, 1);
11782 int rn = extract32(insn, 5, 5);
11783 int rd = extract32(insn, 0, 5);
11784 bool is_long = false;
11785 bool is_fp = false;
11786 bool is_fp16 = false;
11791 case 0x0: /* MLA */
11792 case 0x4: /* MLS */
11793 if (!u || is_scalar) {
11794 unallocated_encoding(s);
11798 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
11799 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
11800 case 0xa: /* SMULL, SMULL2, UMULL, UMULL2 */
11802 unallocated_encoding(s);
11807 case 0x3: /* SQDMLAL, SQDMLAL2 */
11808 case 0x7: /* SQDMLSL, SQDMLSL2 */
11809 case 0xb: /* SQDMULL, SQDMULL2 */
11812 case 0xc: /* SQDMULH */
11813 case 0xd: /* SQRDMULH */
11815 unallocated_encoding(s);
11819 case 0x8: /* MUL */
11820 if (u || is_scalar) {
11821 unallocated_encoding(s);
11825 case 0x1: /* FMLA */
11826 case 0x5: /* FMLS */
11828 unallocated_encoding(s);
11832 case 0x9: /* FMUL, FMULX */
11834 unallocated_encoding(s);
11840 unallocated_encoding(s);
11845 /* convert insn encoded size to TCGMemOp size */
11847 case 2: /* single precision */
11849 index = h << 1 | l;
11852 case 3: /* double precision */
11855 unallocated_encoding(s);
11861 case 0: /* half precision */
11863 index = h << 2 | l << 1 | m;
11865 if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
11869 default: /* unallocated */
11870 unallocated_encoding(s);
11876 index = h << 2 | l << 1 | m;
11879 index = h << 1 | l;
11883 unallocated_encoding(s);
11888 if (!fp_access_check(s)) {
11893 fpst = get_fpstatus_ptr(is_fp16);
11899 TCGv_i64 tcg_idx = tcg_temp_new_i64();
11902 assert(is_fp && is_q && !is_long);
11904 read_vec_element(s, tcg_idx, rm, index, MO_64);
11906 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
11907 TCGv_i64 tcg_op = tcg_temp_new_i64();
11908 TCGv_i64 tcg_res = tcg_temp_new_i64();
11910 read_vec_element(s, tcg_op, rn, pass, MO_64);
11913 case 0x5: /* FMLS */
11914 /* As usual for ARM, separate negation for fused multiply-add */
11915 gen_helper_vfp_negd(tcg_op, tcg_op);
11917 case 0x1: /* FMLA */
11918 read_vec_element(s, tcg_res, rd, pass, MO_64);
11919 gen_helper_vfp_muladdd(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
11921 case 0x9: /* FMUL, FMULX */
11923 gen_helper_vfp_mulxd(tcg_res, tcg_op, tcg_idx, fpst);
11925 gen_helper_vfp_muld(tcg_res, tcg_op, tcg_idx, fpst);
11929 g_assert_not_reached();
11932 write_vec_element(s, tcg_res, rd, pass, MO_64);
11933 tcg_temp_free_i64(tcg_op);
11934 tcg_temp_free_i64(tcg_res);
11937 tcg_temp_free_i64(tcg_idx);
11938 clear_vec_high(s, !is_scalar, rd);
11939 } else if (!is_long) {
11940 /* 32 bit floating point, or 16 or 32 bit integer.
11941 * For the 16 bit scalar case we use the usual Neon helpers and
11942 * rely on the fact that 0 op 0 == 0 with no side effects.
11944 TCGv_i32 tcg_idx = tcg_temp_new_i32();
11945 int pass, maxpasses;
11950 maxpasses = is_q ? 4 : 2;
11953 read_vec_element_i32(s, tcg_idx, rm, index, size);
11955 if (size == 1 && !is_scalar) {
11956 /* The simplest way to handle the 16x16 indexed ops is to duplicate
11957 * the index into both halves of the 32 bit tcg_idx and then use
11958 * the usual Neon helpers.
11960 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
11963 for (pass = 0; pass < maxpasses; pass++) {
11964 TCGv_i32 tcg_op = tcg_temp_new_i32();
11965 TCGv_i32 tcg_res = tcg_temp_new_i32();
11967 read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32);
11970 case 0x0: /* MLA */
11971 case 0x4: /* MLS */
11972 case 0x8: /* MUL */
11974 static NeonGenTwoOpFn * const fns[2][2] = {
11975 { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
11976 { tcg_gen_add_i32, tcg_gen_sub_i32 },
11978 NeonGenTwoOpFn *genfn;
11979 bool is_sub = opcode == 0x4;
11982 gen_helper_neon_mul_u16(tcg_res, tcg_op, tcg_idx);
11984 tcg_gen_mul_i32(tcg_res, tcg_op, tcg_idx);
11986 if (opcode == 0x8) {
11989 read_vec_element_i32(s, tcg_op, rd, pass, MO_32);
11990 genfn = fns[size - 1][is_sub];
11991 genfn(tcg_res, tcg_op, tcg_res);
11994 case 0x5: /* FMLS */
11995 case 0x1: /* FMLA */
11996 read_vec_element_i32(s, tcg_res, rd, pass,
11997 is_scalar ? size : MO_32);
12000 if (opcode == 0x5) {
12001 /* As usual for ARM, separate negation for fused
12003 tcg_gen_xori_i32(tcg_op, tcg_op, 0x80008000);
12006 gen_helper_advsimd_muladdh(tcg_res, tcg_op, tcg_idx,
12009 gen_helper_advsimd_muladd2h(tcg_res, tcg_op, tcg_idx,
12014 if (opcode == 0x5) {
12015 /* As usual for ARM, separate negation for
12016 * fused multiply-add */
12017 tcg_gen_xori_i32(tcg_op, tcg_op, 0x80000000);
12019 gen_helper_vfp_muladds(tcg_res, tcg_op, tcg_idx,
12023 g_assert_not_reached();
12026 case 0x9: /* FMUL, FMULX */
12031 gen_helper_advsimd_mulxh(tcg_res, tcg_op,
12034 gen_helper_advsimd_mulx2h(tcg_res, tcg_op,
12039 gen_helper_advsimd_mulh(tcg_res, tcg_op,
12042 gen_helper_advsimd_mul2h(tcg_res, tcg_op,
12049 gen_helper_vfp_mulxs(tcg_res, tcg_op, tcg_idx, fpst);
12051 gen_helper_vfp_muls(tcg_res, tcg_op, tcg_idx, fpst);
12055 g_assert_not_reached();
12058 case 0xc: /* SQDMULH */
12060 gen_helper_neon_qdmulh_s16(tcg_res, cpu_env,
12063 gen_helper_neon_qdmulh_s32(tcg_res, cpu_env,
12067 case 0xd: /* SQRDMULH */
12069 gen_helper_neon_qrdmulh_s16(tcg_res, cpu_env,
12072 gen_helper_neon_qrdmulh_s32(tcg_res, cpu_env,
12077 g_assert_not_reached();
12081 write_fp_sreg(s, rd, tcg_res);
12083 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
12086 tcg_temp_free_i32(tcg_op);
12087 tcg_temp_free_i32(tcg_res);
12090 tcg_temp_free_i32(tcg_idx);
12091 clear_vec_high(s, is_q, rd);
12093 /* long ops: 16x16->32 or 32x32->64 */
12094 TCGv_i64 tcg_res[2];
12096 bool satop = extract32(opcode, 0, 1);
12097 TCGMemOp memop = MO_32;
12104 TCGv_i64 tcg_idx = tcg_temp_new_i64();
12106 read_vec_element(s, tcg_idx, rm, index, memop);
12108 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
12109 TCGv_i64 tcg_op = tcg_temp_new_i64();
12110 TCGv_i64 tcg_passres;
12116 passelt = pass + (is_q * 2);
12119 read_vec_element(s, tcg_op, rn, passelt, memop);
12121 tcg_res[pass] = tcg_temp_new_i64();
12123 if (opcode == 0xa || opcode == 0xb) {
12124 /* Non-accumulating ops */
12125 tcg_passres = tcg_res[pass];
12127 tcg_passres = tcg_temp_new_i64();
12130 tcg_gen_mul_i64(tcg_passres, tcg_op, tcg_idx);
12131 tcg_temp_free_i64(tcg_op);
12134 /* saturating, doubling */
12135 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
12136 tcg_passres, tcg_passres);
12139 if (opcode == 0xa || opcode == 0xb) {
12143 /* Accumulating op: handle accumulate step */
12144 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12147 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
12148 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
12150 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
12151 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
12153 case 0x7: /* SQDMLSL, SQDMLSL2 */
12154 tcg_gen_neg_i64(tcg_passres, tcg_passres);
12156 case 0x3: /* SQDMLAL, SQDMLAL2 */
12157 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
12162 g_assert_not_reached();
12164 tcg_temp_free_i64(tcg_passres);
12166 tcg_temp_free_i64(tcg_idx);
12168 clear_vec_high(s, !is_scalar, rd);
12170 TCGv_i32 tcg_idx = tcg_temp_new_i32();
12173 read_vec_element_i32(s, tcg_idx, rm, index, size);
12176 /* The simplest way to handle the 16x16 indexed ops is to
12177 * duplicate the index into both halves of the 32 bit tcg_idx
12178 * and then use the usual Neon helpers.
12180 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
12183 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
12184 TCGv_i32 tcg_op = tcg_temp_new_i32();
12185 TCGv_i64 tcg_passres;
12188 read_vec_element_i32(s, tcg_op, rn, pass, size);
12190 read_vec_element_i32(s, tcg_op, rn,
12191 pass + (is_q * 2), MO_32);
12194 tcg_res[pass] = tcg_temp_new_i64();
12196 if (opcode == 0xa || opcode == 0xb) {
12197 /* Non-accumulating ops */
12198 tcg_passres = tcg_res[pass];
12200 tcg_passres = tcg_temp_new_i64();
12203 if (memop & MO_SIGN) {
12204 gen_helper_neon_mull_s16(tcg_passres, tcg_op, tcg_idx);
12206 gen_helper_neon_mull_u16(tcg_passres, tcg_op, tcg_idx);
12209 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
12210 tcg_passres, tcg_passres);
12212 tcg_temp_free_i32(tcg_op);
12214 if (opcode == 0xa || opcode == 0xb) {
12218 /* Accumulating op: handle accumulate step */
12219 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12222 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
12223 gen_helper_neon_addl_u32(tcg_res[pass], tcg_res[pass],
12226 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
12227 gen_helper_neon_subl_u32(tcg_res[pass], tcg_res[pass],
12230 case 0x7: /* SQDMLSL, SQDMLSL2 */
12231 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
12233 case 0x3: /* SQDMLAL, SQDMLAL2 */
12234 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
12239 g_assert_not_reached();
12241 tcg_temp_free_i64(tcg_passres);
12243 tcg_temp_free_i32(tcg_idx);
12246 tcg_gen_ext32u_i64(tcg_res[0], tcg_res[0]);
12251 tcg_res[1] = tcg_const_i64(0);
12254 for (pass = 0; pass < 2; pass++) {
12255 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12256 tcg_temp_free_i64(tcg_res[pass]);
12261 tcg_temp_free_ptr(fpst);
12266 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
12267 * +-----------------+------+-----------+--------+-----+------+------+
12268 * | 0 1 0 0 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
12269 * +-----------------+------+-----------+--------+-----+------+------+
12271 static void disas_crypto_aes(DisasContext *s, uint32_t insn)
12273 int size = extract32(insn, 22, 2);
12274 int opcode = extract32(insn, 12, 5);
12275 int rn = extract32(insn, 5, 5);
12276 int rd = extract32(insn, 0, 5);
12278 TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
12279 TCGv_i32 tcg_decrypt;
12280 CryptoThreeOpIntFn *genfn;
12282 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
12284 unallocated_encoding(s);
12289 case 0x4: /* AESE */
12291 genfn = gen_helper_crypto_aese;
12293 case 0x6: /* AESMC */
12295 genfn = gen_helper_crypto_aesmc;
12297 case 0x5: /* AESD */
12299 genfn = gen_helper_crypto_aese;
12301 case 0x7: /* AESIMC */
12303 genfn = gen_helper_crypto_aesmc;
12306 unallocated_encoding(s);
12310 if (!fp_access_check(s)) {
12314 tcg_rd_ptr = vec_full_reg_ptr(s, rd);
12315 tcg_rn_ptr = vec_full_reg_ptr(s, rn);
12316 tcg_decrypt = tcg_const_i32(decrypt);
12318 genfn(tcg_rd_ptr, tcg_rn_ptr, tcg_decrypt);
12320 tcg_temp_free_ptr(tcg_rd_ptr);
12321 tcg_temp_free_ptr(tcg_rn_ptr);
12322 tcg_temp_free_i32(tcg_decrypt);
12325 /* Crypto three-reg SHA
12326 * 31 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
12327 * +-----------------+------+---+------+---+--------+-----+------+------+
12328 * | 0 1 0 1 1 1 1 0 | size | 0 | Rm | 0 | opcode | 0 0 | Rn | Rd |
12329 * +-----------------+------+---+------+---+--------+-----+------+------+
12331 static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
12333 int size = extract32(insn, 22, 2);
12334 int opcode = extract32(insn, 12, 3);
12335 int rm = extract32(insn, 16, 5);
12336 int rn = extract32(insn, 5, 5);
12337 int rd = extract32(insn, 0, 5);
12338 CryptoThreeOpFn *genfn;
12339 TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
12340 int feature = ARM_FEATURE_V8_SHA256;
12343 unallocated_encoding(s);
12348 case 0: /* SHA1C */
12349 case 1: /* SHA1P */
12350 case 2: /* SHA1M */
12351 case 3: /* SHA1SU0 */
12353 feature = ARM_FEATURE_V8_SHA1;
12355 case 4: /* SHA256H */
12356 genfn = gen_helper_crypto_sha256h;
12358 case 5: /* SHA256H2 */
12359 genfn = gen_helper_crypto_sha256h2;
12361 case 6: /* SHA256SU1 */
12362 genfn = gen_helper_crypto_sha256su1;
12365 unallocated_encoding(s);
12369 if (!arm_dc_feature(s, feature)) {
12370 unallocated_encoding(s);
12374 if (!fp_access_check(s)) {
12378 tcg_rd_ptr = vec_full_reg_ptr(s, rd);
12379 tcg_rn_ptr = vec_full_reg_ptr(s, rn);
12380 tcg_rm_ptr = vec_full_reg_ptr(s, rm);
12383 genfn(tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr);
12385 TCGv_i32 tcg_opcode = tcg_const_i32(opcode);
12387 gen_helper_crypto_sha1_3reg(tcg_rd_ptr, tcg_rn_ptr,
12388 tcg_rm_ptr, tcg_opcode);
12389 tcg_temp_free_i32(tcg_opcode);
12392 tcg_temp_free_ptr(tcg_rd_ptr);
12393 tcg_temp_free_ptr(tcg_rn_ptr);
12394 tcg_temp_free_ptr(tcg_rm_ptr);
12397 /* Crypto two-reg SHA
12398 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
12399 * +-----------------+------+-----------+--------+-----+------+------+
12400 * | 0 1 0 1 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
12401 * +-----------------+------+-----------+--------+-----+------+------+
12403 static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
12405 int size = extract32(insn, 22, 2);
12406 int opcode = extract32(insn, 12, 5);
12407 int rn = extract32(insn, 5, 5);
12408 int rd = extract32(insn, 0, 5);
12409 CryptoTwoOpFn *genfn;
12411 TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
12414 unallocated_encoding(s);
12419 case 0: /* SHA1H */
12420 feature = ARM_FEATURE_V8_SHA1;
12421 genfn = gen_helper_crypto_sha1h;
12423 case 1: /* SHA1SU1 */
12424 feature = ARM_FEATURE_V8_SHA1;
12425 genfn = gen_helper_crypto_sha1su1;
12427 case 2: /* SHA256SU0 */
12428 feature = ARM_FEATURE_V8_SHA256;
12429 genfn = gen_helper_crypto_sha256su0;
12432 unallocated_encoding(s);
12436 if (!arm_dc_feature(s, feature)) {
12437 unallocated_encoding(s);
12441 if (!fp_access_check(s)) {
12445 tcg_rd_ptr = vec_full_reg_ptr(s, rd);
12446 tcg_rn_ptr = vec_full_reg_ptr(s, rn);
12448 genfn(tcg_rd_ptr, tcg_rn_ptr);
12450 tcg_temp_free_ptr(tcg_rd_ptr);
12451 tcg_temp_free_ptr(tcg_rn_ptr);
12454 /* Crypto three-reg SHA512
12455 * 31 21 20 16 15 14 13 12 11 10 9 5 4 0
12456 * +-----------------------+------+---+---+-----+--------+------+------+
12457 * | 1 1 0 0 1 1 1 0 0 1 1 | Rm | 1 | O | 0 0 | opcode | Rn | Rd |
12458 * +-----------------------+------+---+---+-----+--------+------+------+
12460 static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
12462 int opcode = extract32(insn, 10, 2);
12463 int o = extract32(insn, 14, 1);
12464 int rm = extract32(insn, 16, 5);
12465 int rn = extract32(insn, 5, 5);
12466 int rd = extract32(insn, 0, 5);
12468 CryptoThreeOpFn *genfn;
12472 case 0: /* SHA512H */
12473 feature = ARM_FEATURE_V8_SHA512;
12474 genfn = gen_helper_crypto_sha512h;
12476 case 1: /* SHA512H2 */
12477 feature = ARM_FEATURE_V8_SHA512;
12478 genfn = gen_helper_crypto_sha512h2;
12480 case 2: /* SHA512SU1 */
12481 feature = ARM_FEATURE_V8_SHA512;
12482 genfn = gen_helper_crypto_sha512su1;
12485 feature = ARM_FEATURE_V8_SHA3;
12491 case 0: /* SM3PARTW1 */
12492 feature = ARM_FEATURE_V8_SM3;
12493 genfn = gen_helper_crypto_sm3partw1;
12495 case 1: /* SM3PARTW2 */
12496 feature = ARM_FEATURE_V8_SM3;
12497 genfn = gen_helper_crypto_sm3partw2;
12499 case 2: /* SM4EKEY */
12500 feature = ARM_FEATURE_V8_SM4;
12501 genfn = gen_helper_crypto_sm4ekey;
12504 unallocated_encoding(s);
12509 if (!arm_dc_feature(s, feature)) {
12510 unallocated_encoding(s);
12514 if (!fp_access_check(s)) {
12519 TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
12521 tcg_rd_ptr = vec_full_reg_ptr(s, rd);
12522 tcg_rn_ptr = vec_full_reg_ptr(s, rn);
12523 tcg_rm_ptr = vec_full_reg_ptr(s, rm);
12525 genfn(tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr);
12527 tcg_temp_free_ptr(tcg_rd_ptr);
12528 tcg_temp_free_ptr(tcg_rn_ptr);
12529 tcg_temp_free_ptr(tcg_rm_ptr);
12531 TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
12534 tcg_op1 = tcg_temp_new_i64();
12535 tcg_op2 = tcg_temp_new_i64();
12536 tcg_res[0] = tcg_temp_new_i64();
12537 tcg_res[1] = tcg_temp_new_i64();
12539 for (pass = 0; pass < 2; pass++) {
12540 read_vec_element(s, tcg_op1, rn, pass, MO_64);
12541 read_vec_element(s, tcg_op2, rm, pass, MO_64);
12543 tcg_gen_rotli_i64(tcg_res[pass], tcg_op2, 1);
12544 tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
12546 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
12547 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
12549 tcg_temp_free_i64(tcg_op1);
12550 tcg_temp_free_i64(tcg_op2);
12551 tcg_temp_free_i64(tcg_res[0]);
12552 tcg_temp_free_i64(tcg_res[1]);
12556 /* Crypto two-reg SHA512
12557 * 31 12 11 10 9 5 4 0
12558 * +-----------------------------------------+--------+------+------+
12559 * | 1 1 0 0 1 1 1 0 1 1 0 0 0 0 0 0 1 0 0 0 | opcode | Rn | Rd |
12560 * +-----------------------------------------+--------+------+------+
12562 static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn)
12564 int opcode = extract32(insn, 10, 2);
12565 int rn = extract32(insn, 5, 5);
12566 int rd = extract32(insn, 0, 5);
12567 TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
12569 CryptoTwoOpFn *genfn;
12572 case 0: /* SHA512SU0 */
12573 feature = ARM_FEATURE_V8_SHA512;
12574 genfn = gen_helper_crypto_sha512su0;
12577 feature = ARM_FEATURE_V8_SM4;
12578 genfn = gen_helper_crypto_sm4e;
12581 unallocated_encoding(s);
12585 if (!arm_dc_feature(s, feature)) {
12586 unallocated_encoding(s);
12590 if (!fp_access_check(s)) {
12594 tcg_rd_ptr = vec_full_reg_ptr(s, rd);
12595 tcg_rn_ptr = vec_full_reg_ptr(s, rn);
12597 genfn(tcg_rd_ptr, tcg_rn_ptr);
12599 tcg_temp_free_ptr(tcg_rd_ptr);
12600 tcg_temp_free_ptr(tcg_rn_ptr);
12603 /* Crypto four-register
12604 * 31 23 22 21 20 16 15 14 10 9 5 4 0
12605 * +-------------------+-----+------+---+------+------+------+
12606 * | 1 1 0 0 1 1 1 0 0 | Op0 | Rm | 0 | Ra | Rn | Rd |
12607 * +-------------------+-----+------+---+------+------+------+
12609 static void disas_crypto_four_reg(DisasContext *s, uint32_t insn)
12611 int op0 = extract32(insn, 21, 2);
12612 int rm = extract32(insn, 16, 5);
12613 int ra = extract32(insn, 10, 5);
12614 int rn = extract32(insn, 5, 5);
12615 int rd = extract32(insn, 0, 5);
12621 feature = ARM_FEATURE_V8_SHA3;
12623 case 2: /* SM3SS1 */
12624 feature = ARM_FEATURE_V8_SM3;
12627 unallocated_encoding(s);
12631 if (!arm_dc_feature(s, feature)) {
12632 unallocated_encoding(s);
12636 if (!fp_access_check(s)) {
12641 TCGv_i64 tcg_op1, tcg_op2, tcg_op3, tcg_res[2];
12644 tcg_op1 = tcg_temp_new_i64();
12645 tcg_op2 = tcg_temp_new_i64();
12646 tcg_op3 = tcg_temp_new_i64();
12647 tcg_res[0] = tcg_temp_new_i64();
12648 tcg_res[1] = tcg_temp_new_i64();
12650 for (pass = 0; pass < 2; pass++) {
12651 read_vec_element(s, tcg_op1, rn, pass, MO_64);
12652 read_vec_element(s, tcg_op2, rm, pass, MO_64);
12653 read_vec_element(s, tcg_op3, ra, pass, MO_64);
12657 tcg_gen_xor_i64(tcg_res[pass], tcg_op2, tcg_op3);
12660 tcg_gen_andc_i64(tcg_res[pass], tcg_op2, tcg_op3);
12662 tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
12664 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
12665 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
12667 tcg_temp_free_i64(tcg_op1);
12668 tcg_temp_free_i64(tcg_op2);
12669 tcg_temp_free_i64(tcg_op3);
12670 tcg_temp_free_i64(tcg_res[0]);
12671 tcg_temp_free_i64(tcg_res[1]);
12673 TCGv_i32 tcg_op1, tcg_op2, tcg_op3, tcg_res, tcg_zero;
12675 tcg_op1 = tcg_temp_new_i32();
12676 tcg_op2 = tcg_temp_new_i32();
12677 tcg_op3 = tcg_temp_new_i32();
12678 tcg_res = tcg_temp_new_i32();
12679 tcg_zero = tcg_const_i32(0);
12681 read_vec_element_i32(s, tcg_op1, rn, 3, MO_32);
12682 read_vec_element_i32(s, tcg_op2, rm, 3, MO_32);
12683 read_vec_element_i32(s, tcg_op3, ra, 3, MO_32);
12685 tcg_gen_rotri_i32(tcg_res, tcg_op1, 20);
12686 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op2);
12687 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op3);
12688 tcg_gen_rotri_i32(tcg_res, tcg_res, 25);
12690 write_vec_element_i32(s, tcg_zero, rd, 0, MO_32);
12691 write_vec_element_i32(s, tcg_zero, rd, 1, MO_32);
12692 write_vec_element_i32(s, tcg_zero, rd, 2, MO_32);
12693 write_vec_element_i32(s, tcg_res, rd, 3, MO_32);
12695 tcg_temp_free_i32(tcg_op1);
12696 tcg_temp_free_i32(tcg_op2);
12697 tcg_temp_free_i32(tcg_op3);
12698 tcg_temp_free_i32(tcg_res);
12699 tcg_temp_free_i32(tcg_zero);
12704 * 31 21 20 16 15 10 9 5 4 0
12705 * +-----------------------+------+--------+------+------+
12706 * | 1 1 0 0 1 1 1 0 1 0 0 | Rm | imm6 | Rn | Rd |
12707 * +-----------------------+------+--------+------+------+
12709 static void disas_crypto_xar(DisasContext *s, uint32_t insn)
12711 int rm = extract32(insn, 16, 5);
12712 int imm6 = extract32(insn, 10, 6);
12713 int rn = extract32(insn, 5, 5);
12714 int rd = extract32(insn, 0, 5);
12715 TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
12718 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA3)) {
12719 unallocated_encoding(s);
12723 if (!fp_access_check(s)) {
12727 tcg_op1 = tcg_temp_new_i64();
12728 tcg_op2 = tcg_temp_new_i64();
12729 tcg_res[0] = tcg_temp_new_i64();
12730 tcg_res[1] = tcg_temp_new_i64();
12732 for (pass = 0; pass < 2; pass++) {
12733 read_vec_element(s, tcg_op1, rn, pass, MO_64);
12734 read_vec_element(s, tcg_op2, rm, pass, MO_64);
12736 tcg_gen_xor_i64(tcg_res[pass], tcg_op1, tcg_op2);
12737 tcg_gen_rotri_i64(tcg_res[pass], tcg_res[pass], imm6);
12739 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
12740 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
12742 tcg_temp_free_i64(tcg_op1);
12743 tcg_temp_free_i64(tcg_op2);
12744 tcg_temp_free_i64(tcg_res[0]);
12745 tcg_temp_free_i64(tcg_res[1]);
12748 /* Crypto three-reg imm2
12749 * 31 21 20 16 15 14 13 12 11 10 9 5 4 0
12750 * +-----------------------+------+-----+------+--------+------+------+
12751 * | 1 1 0 0 1 1 1 0 0 1 0 | Rm | 1 0 | imm2 | opcode | Rn | Rd |
12752 * +-----------------------+------+-----+------+--------+------+------+
12754 static void disas_crypto_three_reg_imm2(DisasContext *s, uint32_t insn)
12756 int opcode = extract32(insn, 10, 2);
12757 int imm2 = extract32(insn, 12, 2);
12758 int rm = extract32(insn, 16, 5);
12759 int rn = extract32(insn, 5, 5);
12760 int rd = extract32(insn, 0, 5);
12761 TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
12762 TCGv_i32 tcg_imm2, tcg_opcode;
12764 if (!arm_dc_feature(s, ARM_FEATURE_V8_SM3)) {
12765 unallocated_encoding(s);
12769 if (!fp_access_check(s)) {
12773 tcg_rd_ptr = vec_full_reg_ptr(s, rd);
12774 tcg_rn_ptr = vec_full_reg_ptr(s, rn);
12775 tcg_rm_ptr = vec_full_reg_ptr(s, rm);
12776 tcg_imm2 = tcg_const_i32(imm2);
12777 tcg_opcode = tcg_const_i32(opcode);
12779 gen_helper_crypto_sm3tt(tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr, tcg_imm2,
12782 tcg_temp_free_ptr(tcg_rd_ptr);
12783 tcg_temp_free_ptr(tcg_rn_ptr);
12784 tcg_temp_free_ptr(tcg_rm_ptr);
12785 tcg_temp_free_i32(tcg_imm2);
12786 tcg_temp_free_i32(tcg_opcode);
12789 /* C3.6 Data processing - SIMD, inc Crypto
12791 * As the decode gets a little complex we are using a table based
12792 * approach for this part of the decode.
12794 static const AArch64DecodeTable data_proc_simd[] = {
12795 /* pattern , mask , fn */
12796 { 0x0e200400, 0x9f200400, disas_simd_three_reg_same },
12797 { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff },
12798 { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
12799 { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes },
12800 { 0x0e000400, 0x9fe08400, disas_simd_copy },
12801 { 0x0f000000, 0x9f000400, disas_simd_indexed }, /* vector indexed */
12802 /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
12803 { 0x0f000400, 0x9ff80400, disas_simd_mod_imm },
12804 { 0x0f000400, 0x9f800400, disas_simd_shift_imm },
12805 { 0x0e000000, 0xbf208c00, disas_simd_tb },
12806 { 0x0e000800, 0xbf208c00, disas_simd_zip_trn },
12807 { 0x2e000000, 0xbf208400, disas_simd_ext },
12808 { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same },
12809 { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff },
12810 { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
12811 { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise },
12812 { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy },
12813 { 0x5f000000, 0xdf000400, disas_simd_indexed }, /* scalar indexed */
12814 { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
12815 { 0x4e280800, 0xff3e0c00, disas_crypto_aes },
12816 { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha },
12817 { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha },
12818 { 0xce608000, 0xffe0b000, disas_crypto_three_reg_sha512 },
12819 { 0xcec08000, 0xfffff000, disas_crypto_two_reg_sha512 },
12820 { 0xce000000, 0xff808000, disas_crypto_four_reg },
12821 { 0xce800000, 0xffe00000, disas_crypto_xar },
12822 { 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2 },
12823 { 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16 },
12824 { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16 },
12825 { 0x5e400400, 0xdf60c400, disas_simd_scalar_three_reg_same_fp16 },
12826 { 0x00000000, 0x00000000, NULL }
12829 static void disas_data_proc_simd(DisasContext *s, uint32_t insn)
12831 /* Note that this is called with all non-FP cases from
12832 * table C3-6 so it must UNDEF for entries not specifically
12833 * allocated to instructions in that table.
12835 AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn);
12839 unallocated_encoding(s);
12843 /* C3.6 Data processing - SIMD and floating point */
12844 static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
12846 if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) {
12847 disas_data_proc_fp(s, insn);
12849 /* SIMD, including crypto */
12850 disas_data_proc_simd(s, insn);
12854 /* C3.1 A64 instruction index by encoding */
12855 static void disas_a64_insn(CPUARMState *env, DisasContext *s)
12859 insn = arm_ldl_code(env, s->pc, s->sctlr_b);
12863 s->fp_access_checked = false;
12865 switch (extract32(insn, 25, 4)) {
12866 case 0x0: case 0x1: case 0x2: case 0x3: /* UNALLOCATED */
12867 unallocated_encoding(s);
12869 case 0x8: case 0x9: /* Data processing - immediate */
12870 disas_data_proc_imm(s, insn);
12872 case 0xa: case 0xb: /* Branch, exception generation and system insns */
12873 disas_b_exc_sys(s, insn);
12878 case 0xe: /* Loads and stores */
12879 disas_ldst(s, insn);
12882 case 0xd: /* Data processing - register */
12883 disas_data_proc_reg(s, insn);
12886 case 0xf: /* Data processing - SIMD and floating point */
12887 disas_data_proc_simd_fp(s, insn);
12890 assert(FALSE); /* all 15 cases should be handled above */
12894 /* if we allocated any temporaries, free them here */
12898 static int aarch64_tr_init_disas_context(DisasContextBase *dcbase,
12899 CPUState *cpu, int max_insns)
12901 DisasContext *dc = container_of(dcbase, DisasContext, base);
12902 CPUARMState *env = cpu->env_ptr;
12903 ARMCPU *arm_cpu = arm_env_get_cpu(env);
12906 dc->pc = dc->base.pc_first;
12910 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
12911 * there is no secure EL1, so we route exceptions to EL3.
12913 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
12914 !arm_el_is_aa64(env, 3);
12917 dc->be_data = ARM_TBFLAG_BE_DATA(dc->base.tb->flags) ? MO_BE : MO_LE;
12918 dc->condexec_mask = 0;
12919 dc->condexec_cond = 0;
12920 dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(dc->base.tb->flags));
12921 dc->tbi0 = ARM_TBFLAG_TBI0(dc->base.tb->flags);
12922 dc->tbi1 = ARM_TBFLAG_TBI1(dc->base.tb->flags);
12923 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
12924 #if !defined(CONFIG_USER_ONLY)
12925 dc->user = (dc->current_el == 0);
12927 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(dc->base.tb->flags);
12928 dc->sve_excp_el = ARM_TBFLAG_SVEEXC_EL(dc->base.tb->flags);
12929 dc->sve_len = (ARM_TBFLAG_ZCR_LEN(dc->base.tb->flags) + 1) * 16;
12931 dc->vec_stride = 0;
12932 dc->cp_regs = arm_cpu->cp_regs;
12933 dc->features = env->features;
12935 /* Single step state. The code-generation logic here is:
12937 * generate code with no special handling for single-stepping (except
12938 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
12939 * this happens anyway because those changes are all system register or
12941 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
12942 * emit code for one insn
12943 * emit code to clear PSTATE.SS
12944 * emit code to generate software step exception for completed step
12945 * end TB (as usual for having generated an exception)
12946 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
12947 * emit code to generate a software step exception
12950 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(dc->base.tb->flags);
12951 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(dc->base.tb->flags);
12952 dc->is_ldex = false;
12953 dc->ss_same_el = (arm_debug_target_el(env) == dc->current_el);
12955 /* Bound the number of insns to execute to those left on the page. */
12956 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
12958 /* If architectural single step active, limit to 1. */
12959 if (dc->ss_active) {
12962 max_insns = MIN(max_insns, bound);
12964 init_tmp_a64_array(dc);
12969 static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
12971 tcg_clear_temp_count();
12974 static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
12976 DisasContext *dc = container_of(dcbase, DisasContext, base);
12978 tcg_gen_insn_start(dc->pc, 0, 0);
12979 dc->insn_start = tcg_last_op();
12982 static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
12983 const CPUBreakpoint *bp)
12985 DisasContext *dc = container_of(dcbase, DisasContext, base);
12987 if (bp->flags & BP_CPU) {
12988 gen_a64_set_pc_im(dc->pc);
12989 gen_helper_check_breakpoints(cpu_env);
12990 /* End the TB early; it likely won't be executed */
12991 dc->base.is_jmp = DISAS_TOO_MANY;
12993 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
12994 /* The address covered by the breakpoint must be
12995 included in [tb->pc, tb->pc + tb->size) in order
12996 to for it to be properly cleared -- thus we
12997 increment the PC here so that the logic setting
12998 tb->size below does the right thing. */
13000 dc->base.is_jmp = DISAS_NORETURN;
13006 static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
13008 DisasContext *dc = container_of(dcbase, DisasContext, base);
13009 CPUARMState *env = cpu->env_ptr;
13011 if (dc->ss_active && !dc->pstate_ss) {
13012 /* Singlestep state is Active-pending.
13013 * If we're in this state at the start of a TB then either
13014 * a) we just took an exception to an EL which is being debugged
13015 * and this is the first insn in the exception handler
13016 * b) debug exceptions were masked and we just unmasked them
13017 * without changing EL (eg by clearing PSTATE.D)
13018 * In either case we're going to take a swstep exception in the
13019 * "did not step an insn" case, and so the syndrome ISV and EX
13020 * bits should be zero.
13022 assert(dc->base.num_insns == 1);
13023 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
13024 default_exception_el(dc));
13025 dc->base.is_jmp = DISAS_NORETURN;
13027 disas_a64_insn(env, dc);
13030 dc->base.pc_next = dc->pc;
13031 translator_loop_temp_check(&dc->base);
13034 static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
13036 DisasContext *dc = container_of(dcbase, DisasContext, base);
13038 if (unlikely(dc->base.singlestep_enabled || dc->ss_active)) {
13039 /* Note that this means single stepping WFI doesn't halt the CPU.
13040 * For conditional branch insns this is harmless unreachable code as
13041 * gen_goto_tb() has already handled emitting the debug exception
13042 * (and thus a tb-jump is not possible when singlestepping).
13044 switch (dc->base.is_jmp) {
13046 gen_a64_set_pc_im(dc->pc);
13050 if (dc->base.singlestep_enabled) {
13051 gen_exception_internal(EXCP_DEBUG);
13053 gen_step_complete_exception(dc);
13056 case DISAS_NORETURN:
13060 switch (dc->base.is_jmp) {
13062 case DISAS_TOO_MANY:
13063 gen_goto_tb(dc, 1, dc->pc);
13067 gen_a64_set_pc_im(dc->pc);
13070 tcg_gen_lookup_and_goto_ptr();
13073 tcg_gen_exit_tb(0);
13075 case DISAS_NORETURN:
13079 gen_a64_set_pc_im(dc->pc);
13080 gen_helper_wfe(cpu_env);
13083 gen_a64_set_pc_im(dc->pc);
13084 gen_helper_yield(cpu_env);
13088 /* This is a special case because we don't want to just halt the CPU
13089 * if trying to debug across a WFI.
13091 TCGv_i32 tmp = tcg_const_i32(4);
13093 gen_a64_set_pc_im(dc->pc);
13094 gen_helper_wfi(cpu_env, tmp);
13095 tcg_temp_free_i32(tmp);
13096 /* The helper doesn't necessarily throw an exception, but we
13097 * must go back to the main loop to check for interrupts anyway.
13099 tcg_gen_exit_tb(0);
13105 /* Functions above can change dc->pc, so re-align db->pc_next */
13106 dc->base.pc_next = dc->pc;
13109 static void aarch64_tr_disas_log(const DisasContextBase *dcbase,
13112 DisasContext *dc = container_of(dcbase, DisasContext, base);
13114 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
13115 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
13118 const TranslatorOps aarch64_translator_ops = {
13119 .init_disas_context = aarch64_tr_init_disas_context,
13120 .tb_start = aarch64_tr_tb_start,
13121 .insn_start = aarch64_tr_insn_start,
13122 .breakpoint_check = aarch64_tr_breakpoint_check,
13123 .translate_insn = aarch64_tr_translate_insn,
13124 .tb_stop = aarch64_tr_tb_stop,
13125 .disas_log = aarch64_tr_disas_log,