6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
22 #include "exec/exec-all.h"
24 #include "tcg-op-gvec.h"
27 #include "translate.h"
28 #include "internals.h"
29 #include "qemu/host-utils.h"
31 #include "exec/semihost.h"
32 #include "exec/gen-icount.h"
34 #include "exec/helper-proto.h"
35 #include "exec/helper-gen.h"
38 #include "trace-tcg.h"
40 static TCGv_i64 cpu_X[32];
41 static TCGv_i64 cpu_pc;
43 /* Load/store exclusive handling */
44 static TCGv_i64 cpu_exclusive_high;
45 static TCGv_i64 cpu_reg(DisasContext *s, int reg);
47 static const char *regnames[] = {
48 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
49 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
50 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
51 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
55 A64_SHIFT_TYPE_LSL = 0,
56 A64_SHIFT_TYPE_LSR = 1,
57 A64_SHIFT_TYPE_ASR = 2,
58 A64_SHIFT_TYPE_ROR = 3
61 /* Table based decoder typedefs - used when the relevant bits for decode
62 * are too awkwardly scattered across the instruction (eg SIMD).
64 typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn);
66 typedef struct AArch64DecodeTable {
69 AArch64DecodeFn *disas_fn;
72 /* Function prototype for gen_ functions for calling Neon helpers */
73 typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32);
74 typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32);
75 typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
76 typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64);
77 typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64);
78 typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64);
79 typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64);
80 typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32);
81 typedef void NeonGenTwoSingleOPFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
82 typedef void NeonGenTwoDoubleOPFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
83 typedef void NeonGenOneOpFn(TCGv_i64, TCGv_i64);
84 typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
85 typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
86 typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
87 typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, TCGMemOp);
89 /* Note that the gvec expanders operate on offsets + sizes. */
90 typedef void GVecGen2Fn(unsigned, uint32_t, uint32_t, uint32_t, uint32_t);
91 typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
93 typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
94 uint32_t, uint32_t, uint32_t);
96 /* initialize TCG globals. */
97 void a64_translate_init(void)
101 cpu_pc = tcg_global_mem_new_i64(cpu_env,
102 offsetof(CPUARMState, pc),
104 for (i = 0; i < 32; i++) {
105 cpu_X[i] = tcg_global_mem_new_i64(cpu_env,
106 offsetof(CPUARMState, xregs[i]),
110 cpu_exclusive_high = tcg_global_mem_new_i64(cpu_env,
111 offsetof(CPUARMState, exclusive_high), "exclusive_high");
114 static inline int get_a64_user_mem_index(DisasContext *s)
116 /* Return the core mmu_idx to use for A64 "unprivileged load/store" insns:
117 * if EL1, access as if EL0; otherwise access at current EL
121 switch (s->mmu_idx) {
122 case ARMMMUIdx_S12NSE1:
123 useridx = ARMMMUIdx_S12NSE0;
125 case ARMMMUIdx_S1SE1:
126 useridx = ARMMMUIdx_S1SE0;
129 g_assert_not_reached();
131 useridx = s->mmu_idx;
134 return arm_to_core_mmu_idx(useridx);
137 void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
138 fprintf_function cpu_fprintf, int flags)
140 ARMCPU *cpu = ARM_CPU(cs);
141 CPUARMState *env = &cpu->env;
142 uint32_t psr = pstate_read(env);
144 int el = arm_current_el(env);
145 const char *ns_status;
147 cpu_fprintf(f, "PC=%016"PRIx64" SP=%016"PRIx64"\n",
148 env->pc, env->xregs[31]);
149 for (i = 0; i < 31; i++) {
150 cpu_fprintf(f, "X%02d=%016"PRIx64, i, env->xregs[i]);
152 cpu_fprintf(f, "\n");
158 if (arm_feature(env, ARM_FEATURE_EL3) && el != 3) {
159 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
164 cpu_fprintf(f, "\nPSTATE=%08x %c%c%c%c %sEL%d%c\n",
166 psr & PSTATE_N ? 'N' : '-',
167 psr & PSTATE_Z ? 'Z' : '-',
168 psr & PSTATE_C ? 'C' : '-',
169 psr & PSTATE_V ? 'V' : '-',
172 psr & PSTATE_SP ? 'h' : 't');
174 if (flags & CPU_DUMP_FPU) {
176 for (i = 0; i < numvfpregs; i++) {
177 uint64_t *q = aa64_vfp_qreg(env, i);
180 cpu_fprintf(f, "q%02d=%016" PRIx64 ":%016" PRIx64 "%c",
181 i, vhi, vlo, (i & 1 ? '\n' : ' '));
183 cpu_fprintf(f, "FPCR: %08x FPSR: %08x\n",
184 vfp_get_fpcr(env), vfp_get_fpsr(env));
188 void gen_a64_set_pc_im(uint64_t val)
190 tcg_gen_movi_i64(cpu_pc, val);
193 /* Load the PC from a generic TCG variable.
195 * If address tagging is enabled via the TCR TBI bits, then loading
196 * an address into the PC will clear out any tag in the it:
197 * + for EL2 and EL3 there is only one TBI bit, and if it is set
198 * then the address is zero-extended, clearing bits [63:56]
199 * + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
200 * and TBI1 controls addressses with bit 55 == 1.
201 * If the appropriate TBI bit is set for the address then
202 * the address is sign-extended from bit 55 into bits [63:56]
204 * We can avoid doing this for relative-branches, because the
205 * PC + offset can never overflow into the tag bits (assuming
206 * that virtual addresses are less than 56 bits wide, as they
207 * are currently), but we must handle it for branch-to-register.
209 static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
212 if (s->current_el <= 1) {
213 /* Test if NEITHER or BOTH TBI values are set. If so, no need to
214 * examine bit 55 of address, can just generate code.
215 * If mixed, then test via generated code
217 if (s->tbi0 && s->tbi1) {
218 TCGv_i64 tmp_reg = tcg_temp_new_i64();
219 /* Both bits set, sign extension from bit 55 into [63:56] will
222 tcg_gen_shli_i64(tmp_reg, src, 8);
223 tcg_gen_sari_i64(cpu_pc, tmp_reg, 8);
224 tcg_temp_free_i64(tmp_reg);
225 } else if (!s->tbi0 && !s->tbi1) {
226 /* Neither bit set, just load it as-is */
227 tcg_gen_mov_i64(cpu_pc, src);
229 TCGv_i64 tcg_tmpval = tcg_temp_new_i64();
230 TCGv_i64 tcg_bit55 = tcg_temp_new_i64();
231 TCGv_i64 tcg_zero = tcg_const_i64(0);
233 tcg_gen_andi_i64(tcg_bit55, src, (1ull << 55));
236 /* tbi0==1, tbi1==0, so 0-fill upper byte if bit 55 = 0 */
237 tcg_gen_andi_i64(tcg_tmpval, src,
238 0x00FFFFFFFFFFFFFFull);
239 tcg_gen_movcond_i64(TCG_COND_EQ, cpu_pc, tcg_bit55, tcg_zero,
242 /* tbi0==0, tbi1==1, so 1-fill upper byte if bit 55 = 1 */
243 tcg_gen_ori_i64(tcg_tmpval, src,
244 0xFF00000000000000ull);
245 tcg_gen_movcond_i64(TCG_COND_NE, cpu_pc, tcg_bit55, tcg_zero,
248 tcg_temp_free_i64(tcg_zero);
249 tcg_temp_free_i64(tcg_bit55);
250 tcg_temp_free_i64(tcg_tmpval);
252 } else { /* EL > 1 */
254 /* Force tag byte to all zero */
255 tcg_gen_andi_i64(cpu_pc, src, 0x00FFFFFFFFFFFFFFull);
257 /* Load unmodified address */
258 tcg_gen_mov_i64(cpu_pc, src);
263 typedef struct DisasCompare64 {
268 static void a64_test_cc(DisasCompare64 *c64, int cc)
272 arm_test_cc(&c32, cc);
274 /* Sign-extend the 32-bit value so that the GE/LT comparisons work
275 * properly. The NE/EQ comparisons are also fine with this choice. */
276 c64->cond = c32.cond;
277 c64->value = tcg_temp_new_i64();
278 tcg_gen_ext_i32_i64(c64->value, c32.value);
283 static void a64_free_cc(DisasCompare64 *c64)
285 tcg_temp_free_i64(c64->value);
288 static void gen_exception_internal(int excp)
290 TCGv_i32 tcg_excp = tcg_const_i32(excp);
292 assert(excp_is_internal(excp));
293 gen_helper_exception_internal(cpu_env, tcg_excp);
294 tcg_temp_free_i32(tcg_excp);
297 static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
299 TCGv_i32 tcg_excp = tcg_const_i32(excp);
300 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
301 TCGv_i32 tcg_el = tcg_const_i32(target_el);
303 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
305 tcg_temp_free_i32(tcg_el);
306 tcg_temp_free_i32(tcg_syn);
307 tcg_temp_free_i32(tcg_excp);
310 static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
312 gen_a64_set_pc_im(s->pc - offset);
313 gen_exception_internal(excp);
314 s->base.is_jmp = DISAS_NORETURN;
317 static void gen_exception_insn(DisasContext *s, int offset, int excp,
318 uint32_t syndrome, uint32_t target_el)
320 gen_a64_set_pc_im(s->pc - offset);
321 gen_exception(excp, syndrome, target_el);
322 s->base.is_jmp = DISAS_NORETURN;
325 static void gen_exception_bkpt_insn(DisasContext *s, int offset,
330 gen_a64_set_pc_im(s->pc - offset);
331 tcg_syn = tcg_const_i32(syndrome);
332 gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
333 tcg_temp_free_i32(tcg_syn);
334 s->base.is_jmp = DISAS_NORETURN;
337 static void gen_ss_advance(DisasContext *s)
339 /* If the singlestep state is Active-not-pending, advance to
344 gen_helper_clear_pstate_ss(cpu_env);
348 static void gen_step_complete_exception(DisasContext *s)
350 /* We just completed step of an insn. Move from Active-not-pending
351 * to Active-pending, and then also take the swstep exception.
352 * This corresponds to making the (IMPDEF) choice to prioritize
353 * swstep exceptions over asynchronous exceptions taken to an exception
354 * level where debug is disabled. This choice has the advantage that
355 * we do not need to maintain internal state corresponding to the
356 * ISV/EX syndrome bits between completion of the step and generation
357 * of the exception, and our syndrome information is always correct.
360 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
361 default_exception_el(s));
362 s->base.is_jmp = DISAS_NORETURN;
365 static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
367 /* No direct tb linking with singlestep (either QEMU's or the ARM
368 * debug architecture kind) or deterministic io
370 if (s->base.singlestep_enabled || s->ss_active ||
371 (tb_cflags(s->base.tb) & CF_LAST_IO)) {
375 #ifndef CONFIG_USER_ONLY
376 /* Only link tbs from inside the same guest page */
377 if ((s->base.tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
385 static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
387 TranslationBlock *tb;
390 if (use_goto_tb(s, n, dest)) {
392 gen_a64_set_pc_im(dest);
393 tcg_gen_exit_tb((intptr_t)tb + n);
394 s->base.is_jmp = DISAS_NORETURN;
396 gen_a64_set_pc_im(dest);
398 gen_step_complete_exception(s);
399 } else if (s->base.singlestep_enabled) {
400 gen_exception_internal(EXCP_DEBUG);
402 tcg_gen_lookup_and_goto_ptr();
403 s->base.is_jmp = DISAS_NORETURN;
408 static void unallocated_encoding(DisasContext *s)
410 /* Unallocated and reserved encodings are uncategorized */
411 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
412 default_exception_el(s));
415 #define unsupported_encoding(s, insn) \
417 qemu_log_mask(LOG_UNIMP, \
418 "%s:%d: unsupported instruction encoding 0x%08x " \
419 "at pc=%016" PRIx64 "\n", \
420 __FILE__, __LINE__, insn, s->pc - 4); \
421 unallocated_encoding(s); \
424 static void init_tmp_a64_array(DisasContext *s)
426 #ifdef CONFIG_DEBUG_TCG
427 memset(s->tmp_a64, 0, sizeof(s->tmp_a64));
429 s->tmp_a64_count = 0;
432 static void free_tmp_a64(DisasContext *s)
435 for (i = 0; i < s->tmp_a64_count; i++) {
436 tcg_temp_free_i64(s->tmp_a64[i]);
438 init_tmp_a64_array(s);
441 static TCGv_i64 new_tmp_a64(DisasContext *s)
443 assert(s->tmp_a64_count < TMP_A64_MAX);
444 return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
447 static TCGv_i64 new_tmp_a64_zero(DisasContext *s)
449 TCGv_i64 t = new_tmp_a64(s);
450 tcg_gen_movi_i64(t, 0);
455 * Register access functions
457 * These functions are used for directly accessing a register in where
458 * changes to the final register value are likely to be made. If you
459 * need to use a register for temporary calculation (e.g. index type
460 * operations) use the read_* form.
462 * B1.2.1 Register mappings
464 * In instruction register encoding 31 can refer to ZR (zero register) or
465 * the SP (stack pointer) depending on context. In QEMU's case we map SP
466 * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
467 * This is the point of the _sp forms.
469 static TCGv_i64 cpu_reg(DisasContext *s, int reg)
472 return new_tmp_a64_zero(s);
478 /* register access for when 31 == SP */
479 static TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
484 /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
485 * representing the register contents. This TCGv is an auto-freed
486 * temporary so it need not be explicitly freed, and may be modified.
488 static TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
490 TCGv_i64 v = new_tmp_a64(s);
493 tcg_gen_mov_i64(v, cpu_X[reg]);
495 tcg_gen_ext32u_i64(v, cpu_X[reg]);
498 tcg_gen_movi_i64(v, 0);
503 static TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
505 TCGv_i64 v = new_tmp_a64(s);
507 tcg_gen_mov_i64(v, cpu_X[reg]);
509 tcg_gen_ext32u_i64(v, cpu_X[reg]);
514 /* We should have at some point before trying to access an FP register
515 * done the necessary access check, so assert that
516 * (a) we did the check and
517 * (b) we didn't then just plough ahead anyway if it failed.
518 * Print the instruction pattern in the abort message so we can figure
519 * out what we need to fix if a user encounters this problem in the wild.
521 static inline void assert_fp_access_checked(DisasContext *s)
523 #ifdef CONFIG_DEBUG_TCG
524 if (unlikely(!s->fp_access_checked || s->fp_excp_el)) {
525 fprintf(stderr, "target-arm: FP access check missing for "
526 "instruction 0x%08x\n", s->insn);
532 /* Return the offset into CPUARMState of an element of specified
533 * size, 'element' places in from the least significant end of
534 * the FP/vector register Qn.
536 static inline int vec_reg_offset(DisasContext *s, int regno,
537 int element, TCGMemOp size)
540 #ifdef HOST_WORDS_BIGENDIAN
541 /* This is complicated slightly because vfp.zregs[n].d[0] is
542 * still the low half and vfp.zregs[n].d[1] the high half
543 * of the 128 bit vector, even on big endian systems.
544 * Calculate the offset assuming a fully bigendian 128 bits,
545 * then XOR to account for the order of the two 64 bit halves.
547 offs += (16 - ((element + 1) * (1 << size)));
550 offs += element * (1 << size);
552 offs += offsetof(CPUARMState, vfp.zregs[regno]);
553 assert_fp_access_checked(s);
557 /* Return the offset info CPUARMState of the "whole" vector register Qn. */
558 static inline int vec_full_reg_offset(DisasContext *s, int regno)
560 assert_fp_access_checked(s);
561 return offsetof(CPUARMState, vfp.zregs[regno]);
564 /* Return a newly allocated pointer to the vector register. */
565 static TCGv_ptr vec_full_reg_ptr(DisasContext *s, int regno)
567 TCGv_ptr ret = tcg_temp_new_ptr();
568 tcg_gen_addi_ptr(ret, cpu_env, vec_full_reg_offset(s, regno));
572 /* Return the byte size of the "whole" vector register, VL / 8. */
573 static inline int vec_full_reg_size(DisasContext *s)
575 /* FIXME SVE: We should put the composite ZCR_EL* value into tb->flags.
576 In the meantime this is just the AdvSIMD length of 128. */
580 /* Return the offset into CPUARMState of a slice (from
581 * the least significant end) of FP register Qn (ie
583 * (Note that this is not the same mapping as for A32; see cpu.h)
585 static inline int fp_reg_offset(DisasContext *s, int regno, TCGMemOp size)
587 return vec_reg_offset(s, regno, 0, size);
590 /* Offset of the high half of the 128 bit vector Qn */
591 static inline int fp_reg_hi_offset(DisasContext *s, int regno)
593 return vec_reg_offset(s, regno, 1, MO_64);
596 /* Convenience accessors for reading and writing single and double
597 * FP registers. Writing clears the upper parts of the associated
598 * 128 bit vector register, as required by the architecture.
599 * Note that unlike the GP register accessors, the values returned
600 * by the read functions must be manually freed.
602 static TCGv_i64 read_fp_dreg(DisasContext *s, int reg)
604 TCGv_i64 v = tcg_temp_new_i64();
606 tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
610 static TCGv_i32 read_fp_sreg(DisasContext *s, int reg)
612 TCGv_i32 v = tcg_temp_new_i32();
614 tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(s, reg, MO_32));
618 /* Clear the bits above an N-bit vector, for N = (is_q ? 128 : 64).
619 * If SVE is not enabled, then there are only 128 bits in the vector.
621 static void clear_vec_high(DisasContext *s, bool is_q, int rd)
623 unsigned ofs = fp_reg_offset(s, rd, MO_64);
624 unsigned vsz = vec_full_reg_size(s);
627 TCGv_i64 tcg_zero = tcg_const_i64(0);
628 tcg_gen_st_i64(tcg_zero, cpu_env, ofs + 8);
629 tcg_temp_free_i64(tcg_zero);
632 tcg_gen_gvec_dup8i(ofs + 16, vsz - 16, vsz - 16, 0);
636 static void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
638 unsigned ofs = fp_reg_offset(s, reg, MO_64);
640 tcg_gen_st_i64(v, cpu_env, ofs);
641 clear_vec_high(s, false, reg);
644 static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v)
646 TCGv_i64 tmp = tcg_temp_new_i64();
648 tcg_gen_extu_i32_i64(tmp, v);
649 write_fp_dreg(s, reg, tmp);
650 tcg_temp_free_i64(tmp);
653 static TCGv_ptr get_fpstatus_ptr(bool is_f16)
655 TCGv_ptr statusptr = tcg_temp_new_ptr();
658 /* In A64 all instructions (both FP and Neon) use the FPCR; there
659 * is no equivalent of the A32 Neon "standard FPSCR value".
660 * However half-precision operations operate under a different
661 * FZ16 flag and use vfp.fp_status_f16 instead of vfp.fp_status.
664 offset = offsetof(CPUARMState, vfp.fp_status_f16);
666 offset = offsetof(CPUARMState, vfp.fp_status);
668 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
672 /* Expand a 2-operand AdvSIMD vector operation using an expander function. */
673 static void gen_gvec_fn2(DisasContext *s, bool is_q, int rd, int rn,
674 GVecGen2Fn *gvec_fn, int vece)
676 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
677 is_q ? 16 : 8, vec_full_reg_size(s));
680 /* Expand a 2-operand + immediate AdvSIMD vector operation using
681 * an expander function.
683 static void gen_gvec_fn2i(DisasContext *s, bool is_q, int rd, int rn,
684 int64_t imm, GVecGen2iFn *gvec_fn, int vece)
686 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
687 imm, is_q ? 16 : 8, vec_full_reg_size(s));
690 /* Expand a 3-operand AdvSIMD vector operation using an expander function. */
691 static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm,
692 GVecGen3Fn *gvec_fn, int vece)
694 gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
695 vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s));
698 /* Expand a 2-operand + immediate AdvSIMD vector operation using
701 static void gen_gvec_op2i(DisasContext *s, bool is_q, int rd,
702 int rn, int64_t imm, const GVecGen2i *gvec_op)
704 tcg_gen_gvec_2i(vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
705 is_q ? 16 : 8, vec_full_reg_size(s), imm, gvec_op);
708 /* Expand a 3-operand AdvSIMD vector operation using an op descriptor. */
709 static void gen_gvec_op3(DisasContext *s, bool is_q, int rd,
710 int rn, int rm, const GVecGen3 *gvec_op)
712 tcg_gen_gvec_3(vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
713 vec_full_reg_offset(s, rm), is_q ? 16 : 8,
714 vec_full_reg_size(s), gvec_op);
717 /* Expand a 3-operand + env pointer operation using
718 * an out-of-line helper.
720 static void gen_gvec_op3_env(DisasContext *s, bool is_q, int rd,
721 int rn, int rm, gen_helper_gvec_3_ptr *fn)
723 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
724 vec_full_reg_offset(s, rn),
725 vec_full_reg_offset(s, rm), cpu_env,
726 is_q ? 16 : 8, vec_full_reg_size(s), 0, fn);
729 /* Expand a 3-operand + fpstatus pointer + simd data value operation using
730 * an out-of-line helper.
732 static void gen_gvec_op3_fpst(DisasContext *s, bool is_q, int rd, int rn,
733 int rm, bool is_fp16, int data,
734 gen_helper_gvec_3_ptr *fn)
736 TCGv_ptr fpst = get_fpstatus_ptr(is_fp16);
737 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
738 vec_full_reg_offset(s, rn),
739 vec_full_reg_offset(s, rm), fpst,
740 is_q ? 16 : 8, vec_full_reg_size(s), data, fn);
741 tcg_temp_free_ptr(fpst);
744 /* Set ZF and NF based on a 64 bit result. This is alas fiddlier
745 * than the 32 bit equivalent.
747 static inline void gen_set_NZ64(TCGv_i64 result)
749 tcg_gen_extr_i64_i32(cpu_ZF, cpu_NF, result);
750 tcg_gen_or_i32(cpu_ZF, cpu_ZF, cpu_NF);
753 /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
754 static inline void gen_logic_CC(int sf, TCGv_i64 result)
757 gen_set_NZ64(result);
759 tcg_gen_extrl_i64_i32(cpu_ZF, result);
760 tcg_gen_mov_i32(cpu_NF, cpu_ZF);
762 tcg_gen_movi_i32(cpu_CF, 0);
763 tcg_gen_movi_i32(cpu_VF, 0);
766 /* dest = T0 + T1; compute C, N, V and Z flags */
767 static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
770 TCGv_i64 result, flag, tmp;
771 result = tcg_temp_new_i64();
772 flag = tcg_temp_new_i64();
773 tmp = tcg_temp_new_i64();
775 tcg_gen_movi_i64(tmp, 0);
776 tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
778 tcg_gen_extrl_i64_i32(cpu_CF, flag);
780 gen_set_NZ64(result);
782 tcg_gen_xor_i64(flag, result, t0);
783 tcg_gen_xor_i64(tmp, t0, t1);
784 tcg_gen_andc_i64(flag, flag, tmp);
785 tcg_temp_free_i64(tmp);
786 tcg_gen_extrh_i64_i32(cpu_VF, flag);
788 tcg_gen_mov_i64(dest, result);
789 tcg_temp_free_i64(result);
790 tcg_temp_free_i64(flag);
792 /* 32 bit arithmetic */
793 TCGv_i32 t0_32 = tcg_temp_new_i32();
794 TCGv_i32 t1_32 = tcg_temp_new_i32();
795 TCGv_i32 tmp = tcg_temp_new_i32();
797 tcg_gen_movi_i32(tmp, 0);
798 tcg_gen_extrl_i64_i32(t0_32, t0);
799 tcg_gen_extrl_i64_i32(t1_32, t1);
800 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
801 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
802 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
803 tcg_gen_xor_i32(tmp, t0_32, t1_32);
804 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
805 tcg_gen_extu_i32_i64(dest, cpu_NF);
807 tcg_temp_free_i32(tmp);
808 tcg_temp_free_i32(t0_32);
809 tcg_temp_free_i32(t1_32);
813 /* dest = T0 - T1; compute C, N, V and Z flags */
814 static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
817 /* 64 bit arithmetic */
818 TCGv_i64 result, flag, tmp;
820 result = tcg_temp_new_i64();
821 flag = tcg_temp_new_i64();
822 tcg_gen_sub_i64(result, t0, t1);
824 gen_set_NZ64(result);
826 tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
827 tcg_gen_extrl_i64_i32(cpu_CF, flag);
829 tcg_gen_xor_i64(flag, result, t0);
830 tmp = tcg_temp_new_i64();
831 tcg_gen_xor_i64(tmp, t0, t1);
832 tcg_gen_and_i64(flag, flag, tmp);
833 tcg_temp_free_i64(tmp);
834 tcg_gen_extrh_i64_i32(cpu_VF, flag);
835 tcg_gen_mov_i64(dest, result);
836 tcg_temp_free_i64(flag);
837 tcg_temp_free_i64(result);
839 /* 32 bit arithmetic */
840 TCGv_i32 t0_32 = tcg_temp_new_i32();
841 TCGv_i32 t1_32 = tcg_temp_new_i32();
844 tcg_gen_extrl_i64_i32(t0_32, t0);
845 tcg_gen_extrl_i64_i32(t1_32, t1);
846 tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
847 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
848 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
849 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
850 tmp = tcg_temp_new_i32();
851 tcg_gen_xor_i32(tmp, t0_32, t1_32);
852 tcg_temp_free_i32(t0_32);
853 tcg_temp_free_i32(t1_32);
854 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
855 tcg_temp_free_i32(tmp);
856 tcg_gen_extu_i32_i64(dest, cpu_NF);
860 /* dest = T0 + T1 + CF; do not compute flags. */
861 static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
863 TCGv_i64 flag = tcg_temp_new_i64();
864 tcg_gen_extu_i32_i64(flag, cpu_CF);
865 tcg_gen_add_i64(dest, t0, t1);
866 tcg_gen_add_i64(dest, dest, flag);
867 tcg_temp_free_i64(flag);
870 tcg_gen_ext32u_i64(dest, dest);
874 /* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
875 static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
878 TCGv_i64 result, cf_64, vf_64, tmp;
879 result = tcg_temp_new_i64();
880 cf_64 = tcg_temp_new_i64();
881 vf_64 = tcg_temp_new_i64();
882 tmp = tcg_const_i64(0);
884 tcg_gen_extu_i32_i64(cf_64, cpu_CF);
885 tcg_gen_add2_i64(result, cf_64, t0, tmp, cf_64, tmp);
886 tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, tmp);
887 tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
888 gen_set_NZ64(result);
890 tcg_gen_xor_i64(vf_64, result, t0);
891 tcg_gen_xor_i64(tmp, t0, t1);
892 tcg_gen_andc_i64(vf_64, vf_64, tmp);
893 tcg_gen_extrh_i64_i32(cpu_VF, vf_64);
895 tcg_gen_mov_i64(dest, result);
897 tcg_temp_free_i64(tmp);
898 tcg_temp_free_i64(vf_64);
899 tcg_temp_free_i64(cf_64);
900 tcg_temp_free_i64(result);
902 TCGv_i32 t0_32, t1_32, tmp;
903 t0_32 = tcg_temp_new_i32();
904 t1_32 = tcg_temp_new_i32();
905 tmp = tcg_const_i32(0);
907 tcg_gen_extrl_i64_i32(t0_32, t0);
908 tcg_gen_extrl_i64_i32(t1_32, t1);
909 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, cpu_CF, tmp);
910 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, tmp);
912 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
913 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
914 tcg_gen_xor_i32(tmp, t0_32, t1_32);
915 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
916 tcg_gen_extu_i32_i64(dest, cpu_NF);
918 tcg_temp_free_i32(tmp);
919 tcg_temp_free_i32(t1_32);
920 tcg_temp_free_i32(t0_32);
925 * Load/Store generators
929 * Store from GPR register to memory.
931 static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
932 TCGv_i64 tcg_addr, int size, int memidx,
934 unsigned int iss_srt,
935 bool iss_sf, bool iss_ar)
938 tcg_gen_qemu_st_i64(source, tcg_addr, memidx, s->be_data + size);
943 syn = syn_data_abort_with_iss(0,
949 0, 0, 0, 0, 0, false);
950 disas_set_insn_syndrome(s, syn);
954 static void do_gpr_st(DisasContext *s, TCGv_i64 source,
955 TCGv_i64 tcg_addr, int size,
957 unsigned int iss_srt,
958 bool iss_sf, bool iss_ar)
960 do_gpr_st_memidx(s, source, tcg_addr, size, get_mem_index(s),
961 iss_valid, iss_srt, iss_sf, iss_ar);
965 * Load from memory to GPR register
967 static void do_gpr_ld_memidx(DisasContext *s,
968 TCGv_i64 dest, TCGv_i64 tcg_addr,
969 int size, bool is_signed,
970 bool extend, int memidx,
971 bool iss_valid, unsigned int iss_srt,
972 bool iss_sf, bool iss_ar)
974 TCGMemOp memop = s->be_data + size;
982 tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
984 if (extend && is_signed) {
986 tcg_gen_ext32u_i64(dest, dest);
992 syn = syn_data_abort_with_iss(0,
998 0, 0, 0, 0, 0, false);
999 disas_set_insn_syndrome(s, syn);
1003 static void do_gpr_ld(DisasContext *s,
1004 TCGv_i64 dest, TCGv_i64 tcg_addr,
1005 int size, bool is_signed, bool extend,
1006 bool iss_valid, unsigned int iss_srt,
1007 bool iss_sf, bool iss_ar)
1009 do_gpr_ld_memidx(s, dest, tcg_addr, size, is_signed, extend,
1011 iss_valid, iss_srt, iss_sf, iss_ar);
1015 * Store from FP register to memory
1017 static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
1019 /* This writes the bottom N bits of a 128 bit wide vector to memory */
1020 TCGv_i64 tmp = tcg_temp_new_i64();
1021 tcg_gen_ld_i64(tmp, cpu_env, fp_reg_offset(s, srcidx, MO_64));
1023 tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s),
1026 bool be = s->be_data == MO_BE;
1027 TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
1029 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
1030 tcg_gen_qemu_st_i64(tmp, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
1032 tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(s, srcidx));
1033 tcg_gen_qemu_st_i64(tmp, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
1035 tcg_temp_free_i64(tcg_hiaddr);
1038 tcg_temp_free_i64(tmp);
1042 * Load from memory to FP register
1044 static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
1046 /* This always zero-extends and writes to a full 128 bit wide vector */
1047 TCGv_i64 tmplo = tcg_temp_new_i64();
1051 TCGMemOp memop = s->be_data + size;
1052 tmphi = tcg_const_i64(0);
1053 tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
1055 bool be = s->be_data == MO_BE;
1056 TCGv_i64 tcg_hiaddr;
1058 tmphi = tcg_temp_new_i64();
1059 tcg_hiaddr = tcg_temp_new_i64();
1061 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
1062 tcg_gen_qemu_ld_i64(tmplo, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
1064 tcg_gen_qemu_ld_i64(tmphi, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
1066 tcg_temp_free_i64(tcg_hiaddr);
1069 tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64));
1070 tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(s, destidx));
1072 tcg_temp_free_i64(tmplo);
1073 tcg_temp_free_i64(tmphi);
1075 clear_vec_high(s, true, destidx);
1079 * Vector load/store helpers.
1081 * The principal difference between this and a FP load is that we don't
1082 * zero extend as we are filling a partial chunk of the vector register.
1083 * These functions don't support 128 bit loads/stores, which would be
1084 * normal load/store operations.
1086 * The _i32 versions are useful when operating on 32 bit quantities
1087 * (eg for floating point single or using Neon helper functions).
1090 /* Get value of an element within a vector register */
1091 static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
1092 int element, TCGMemOp memop)
1094 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1097 tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
1100 tcg_gen_ld16u_i64(tcg_dest, cpu_env, vect_off);
1103 tcg_gen_ld32u_i64(tcg_dest, cpu_env, vect_off);
1106 tcg_gen_ld8s_i64(tcg_dest, cpu_env, vect_off);
1109 tcg_gen_ld16s_i64(tcg_dest, cpu_env, vect_off);
1112 tcg_gen_ld32s_i64(tcg_dest, cpu_env, vect_off);
1116 tcg_gen_ld_i64(tcg_dest, cpu_env, vect_off);
1119 g_assert_not_reached();
1123 static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
1124 int element, TCGMemOp memop)
1126 int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
1129 tcg_gen_ld8u_i32(tcg_dest, cpu_env, vect_off);
1132 tcg_gen_ld16u_i32(tcg_dest, cpu_env, vect_off);
1135 tcg_gen_ld8s_i32(tcg_dest, cpu_env, vect_off);
1138 tcg_gen_ld16s_i32(tcg_dest, cpu_env, vect_off);
1142 tcg_gen_ld_i32(tcg_dest, cpu_env, vect_off);
1145 g_assert_not_reached();
1149 /* Set value of an element within a vector register */
1150 static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
1151 int element, TCGMemOp memop)
1153 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1156 tcg_gen_st8_i64(tcg_src, cpu_env, vect_off);
1159 tcg_gen_st16_i64(tcg_src, cpu_env, vect_off);
1162 tcg_gen_st32_i64(tcg_src, cpu_env, vect_off);
1165 tcg_gen_st_i64(tcg_src, cpu_env, vect_off);
1168 g_assert_not_reached();
1172 static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
1173 int destidx, int element, TCGMemOp memop)
1175 int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
1178 tcg_gen_st8_i32(tcg_src, cpu_env, vect_off);
1181 tcg_gen_st16_i32(tcg_src, cpu_env, vect_off);
1184 tcg_gen_st_i32(tcg_src, cpu_env, vect_off);
1187 g_assert_not_reached();
1191 /* Store from vector register to memory */
1192 static void do_vec_st(DisasContext *s, int srcidx, int element,
1193 TCGv_i64 tcg_addr, int size)
1195 TCGMemOp memop = s->be_data + size;
1196 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1198 read_vec_element(s, tcg_tmp, srcidx, element, size);
1199 tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop);
1201 tcg_temp_free_i64(tcg_tmp);
1204 /* Load from memory to vector register */
1205 static void do_vec_ld(DisasContext *s, int destidx, int element,
1206 TCGv_i64 tcg_addr, int size)
1208 TCGMemOp memop = s->be_data + size;
1209 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
1211 tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop);
1212 write_vec_element(s, tcg_tmp, destidx, element, size);
1214 tcg_temp_free_i64(tcg_tmp);
1217 /* Check that FP/Neon access is enabled. If it is, return
1218 * true. If not, emit code to generate an appropriate exception,
1219 * and return false; the caller should not emit any code for
1220 * the instruction. Note that this check must happen after all
1221 * unallocated-encoding checks (otherwise the syndrome information
1222 * for the resulting exception will be incorrect).
1224 static inline bool fp_access_check(DisasContext *s)
1226 assert(!s->fp_access_checked);
1227 s->fp_access_checked = true;
1229 if (!s->fp_excp_el) {
1233 gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false),
1238 /* Check that SVE access is enabled. If it is, return true.
1239 * If not, emit code to generate an appropriate exception and return false.
1241 static inline bool sve_access_check(DisasContext *s)
1243 if (s->sve_excp_el) {
1244 gen_exception_insn(s, 4, EXCP_UDEF, syn_sve_access_trap(),
1252 * This utility function is for doing register extension with an
1253 * optional shift. You will likely want to pass a temporary for the
1254 * destination register. See DecodeRegExtend() in the ARM ARM.
1256 static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
1257 int option, unsigned int shift)
1259 int extsize = extract32(option, 0, 2);
1260 bool is_signed = extract32(option, 2, 1);
1265 tcg_gen_ext8s_i64(tcg_out, tcg_in);
1268 tcg_gen_ext16s_i64(tcg_out, tcg_in);
1271 tcg_gen_ext32s_i64(tcg_out, tcg_in);
1274 tcg_gen_mov_i64(tcg_out, tcg_in);
1280 tcg_gen_ext8u_i64(tcg_out, tcg_in);
1283 tcg_gen_ext16u_i64(tcg_out, tcg_in);
1286 tcg_gen_ext32u_i64(tcg_out, tcg_in);
1289 tcg_gen_mov_i64(tcg_out, tcg_in);
1295 tcg_gen_shli_i64(tcg_out, tcg_out, shift);
1299 static inline void gen_check_sp_alignment(DisasContext *s)
1301 /* The AArch64 architecture mandates that (if enabled via PSTATE
1302 * or SCTLR bits) there is a check that SP is 16-aligned on every
1303 * SP-relative load or store (with an exception generated if it is not).
1304 * In line with general QEMU practice regarding misaligned accesses,
1305 * we omit these checks for the sake of guest program performance.
1306 * This function is provided as a hook so we can more easily add these
1307 * checks in future (possibly as a "favour catching guest program bugs
1308 * over speed" user selectable option).
1313 * This provides a simple table based table lookup decoder. It is
1314 * intended to be used when the relevant bits for decode are too
1315 * awkwardly placed and switch/if based logic would be confusing and
1316 * deeply nested. Since it's a linear search through the table, tables
1317 * should be kept small.
1319 * It returns the first handler where insn & mask == pattern, or
1320 * NULL if there is no match.
1321 * The table is terminated by an empty mask (i.e. 0)
1323 static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
1326 const AArch64DecodeTable *tptr = table;
1328 while (tptr->mask) {
1329 if ((insn & tptr->mask) == tptr->pattern) {
1330 return tptr->disas_fn;
1338 * The instruction disassembly implemented here matches
1339 * the instruction encoding classifications in chapter C4
1340 * of the ARM Architecture Reference Manual (DDI0487B_a);
1341 * classification names and decode diagrams here should generally
1342 * match up with those in the manual.
1345 /* Unconditional branch (immediate)
1347 * +----+-----------+-------------------------------------+
1348 * | op | 0 0 1 0 1 | imm26 |
1349 * +----+-----------+-------------------------------------+
1351 static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
1353 uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4;
1355 if (insn & (1U << 31)) {
1356 /* BL Branch with link */
1357 tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
1360 /* B Branch / BL Branch with link */
1361 gen_goto_tb(s, 0, addr);
1364 /* Compare and branch (immediate)
1365 * 31 30 25 24 23 5 4 0
1366 * +----+-------------+----+---------------------+--------+
1367 * | sf | 0 1 1 0 1 0 | op | imm19 | Rt |
1368 * +----+-------------+----+---------------------+--------+
1370 static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
1372 unsigned int sf, op, rt;
1374 TCGLabel *label_match;
1377 sf = extract32(insn, 31, 1);
1378 op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
1379 rt = extract32(insn, 0, 5);
1380 addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
1382 tcg_cmp = read_cpu_reg(s, rt, sf);
1383 label_match = gen_new_label();
1385 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1386 tcg_cmp, 0, label_match);
1388 gen_goto_tb(s, 0, s->pc);
1389 gen_set_label(label_match);
1390 gen_goto_tb(s, 1, addr);
1393 /* Test and branch (immediate)
1394 * 31 30 25 24 23 19 18 5 4 0
1395 * +----+-------------+----+-------+-------------+------+
1396 * | b5 | 0 1 1 0 1 1 | op | b40 | imm14 | Rt |
1397 * +----+-------------+----+-------+-------------+------+
1399 static void disas_test_b_imm(DisasContext *s, uint32_t insn)
1401 unsigned int bit_pos, op, rt;
1403 TCGLabel *label_match;
1406 bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
1407 op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
1408 addr = s->pc + sextract32(insn, 5, 14) * 4 - 4;
1409 rt = extract32(insn, 0, 5);
1411 tcg_cmp = tcg_temp_new_i64();
1412 tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
1413 label_match = gen_new_label();
1414 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1415 tcg_cmp, 0, label_match);
1416 tcg_temp_free_i64(tcg_cmp);
1417 gen_goto_tb(s, 0, s->pc);
1418 gen_set_label(label_match);
1419 gen_goto_tb(s, 1, addr);
1422 /* Conditional branch (immediate)
1423 * 31 25 24 23 5 4 3 0
1424 * +---------------+----+---------------------+----+------+
1425 * | 0 1 0 1 0 1 0 | o1 | imm19 | o0 | cond |
1426 * +---------------+----+---------------------+----+------+
1428 static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
1433 if ((insn & (1 << 4)) || (insn & (1 << 24))) {
1434 unallocated_encoding(s);
1437 addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
1438 cond = extract32(insn, 0, 4);
1441 /* genuinely conditional branches */
1442 TCGLabel *label_match = gen_new_label();
1443 arm_gen_test_cc(cond, label_match);
1444 gen_goto_tb(s, 0, s->pc);
1445 gen_set_label(label_match);
1446 gen_goto_tb(s, 1, addr);
1448 /* 0xe and 0xf are both "always" conditions */
1449 gen_goto_tb(s, 0, addr);
1453 /* HINT instruction group, including various allocated HINTs */
1454 static void handle_hint(DisasContext *s, uint32_t insn,
1455 unsigned int op1, unsigned int op2, unsigned int crm)
1457 unsigned int selector = crm << 3 | op2;
1460 unallocated_encoding(s);
1468 s->base.is_jmp = DISAS_WFI;
1470 /* When running in MTTCG we don't generate jumps to the yield and
1471 * WFE helpers as it won't affect the scheduling of other vCPUs.
1472 * If we wanted to more completely model WFE/SEV so we don't busy
1473 * spin unnecessarily we would need to do something more involved.
1476 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1477 s->base.is_jmp = DISAS_YIELD;
1481 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
1482 s->base.is_jmp = DISAS_WFE;
1487 /* we treat all as NOP at least for now */
1490 /* default specified as NOP equivalent */
1495 static void gen_clrex(DisasContext *s, uint32_t insn)
1497 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
1500 /* CLREX, DSB, DMB, ISB */
1501 static void handle_sync(DisasContext *s, uint32_t insn,
1502 unsigned int op1, unsigned int op2, unsigned int crm)
1507 unallocated_encoding(s);
1518 case 1: /* MBReqTypes_Reads */
1519 bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST;
1521 case 2: /* MBReqTypes_Writes */
1522 bar = TCG_BAR_SC | TCG_MO_ST_ST;
1524 default: /* MBReqTypes_All */
1525 bar = TCG_BAR_SC | TCG_MO_ALL;
1531 /* We need to break the TB after this insn to execute
1532 * a self-modified code correctly and also to take
1533 * any pending interrupts immediately.
1535 gen_goto_tb(s, 0, s->pc);
1538 unallocated_encoding(s);
1543 /* MSR (immediate) - move immediate to processor state field */
1544 static void handle_msr_i(DisasContext *s, uint32_t insn,
1545 unsigned int op1, unsigned int op2, unsigned int crm)
1547 int op = op1 << 3 | op2;
1549 case 0x05: /* SPSel */
1550 if (s->current_el == 0) {
1551 unallocated_encoding(s);
1555 case 0x1e: /* DAIFSet */
1556 case 0x1f: /* DAIFClear */
1558 TCGv_i32 tcg_imm = tcg_const_i32(crm);
1559 TCGv_i32 tcg_op = tcg_const_i32(op);
1560 gen_a64_set_pc_im(s->pc - 4);
1561 gen_helper_msr_i_pstate(cpu_env, tcg_op, tcg_imm);
1562 tcg_temp_free_i32(tcg_imm);
1563 tcg_temp_free_i32(tcg_op);
1564 /* For DAIFClear, exit the cpu loop to re-evaluate pending IRQs. */
1565 gen_a64_set_pc_im(s->pc);
1566 s->base.is_jmp = (op == 0x1f ? DISAS_EXIT : DISAS_JUMP);
1570 unallocated_encoding(s);
1575 static void gen_get_nzcv(TCGv_i64 tcg_rt)
1577 TCGv_i32 tmp = tcg_temp_new_i32();
1578 TCGv_i32 nzcv = tcg_temp_new_i32();
1580 /* build bit 31, N */
1581 tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31));
1582 /* build bit 30, Z */
1583 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
1584 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
1585 /* build bit 29, C */
1586 tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
1587 /* build bit 28, V */
1588 tcg_gen_shri_i32(tmp, cpu_VF, 31);
1589 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
1590 /* generate result */
1591 tcg_gen_extu_i32_i64(tcg_rt, nzcv);
1593 tcg_temp_free_i32(nzcv);
1594 tcg_temp_free_i32(tmp);
1597 static void gen_set_nzcv(TCGv_i64 tcg_rt)
1600 TCGv_i32 nzcv = tcg_temp_new_i32();
1602 /* take NZCV from R[t] */
1603 tcg_gen_extrl_i64_i32(nzcv, tcg_rt);
1606 tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
1608 tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
1609 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
1611 tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
1612 tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
1614 tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
1615 tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
1616 tcg_temp_free_i32(nzcv);
1619 /* MRS - move from system register
1620 * MSR (register) - move to system register
1623 * These are all essentially the same insn in 'read' and 'write'
1624 * versions, with varying op0 fields.
1626 static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
1627 unsigned int op0, unsigned int op1, unsigned int op2,
1628 unsigned int crn, unsigned int crm, unsigned int rt)
1630 const ARMCPRegInfo *ri;
1633 ri = get_arm_cp_reginfo(s->cp_regs,
1634 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
1635 crn, crm, op0, op1, op2));
1638 /* Unknown register; this might be a guest error or a QEMU
1639 * unimplemented feature.
1641 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 "
1642 "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
1643 isread ? "read" : "write", op0, op1, crn, crm, op2);
1644 unallocated_encoding(s);
1648 /* Check access permissions */
1649 if (!cp_access_ok(s->current_el, ri, isread)) {
1650 unallocated_encoding(s);
1655 /* Emit code to perform further access permissions checks at
1656 * runtime; this may result in an exception.
1659 TCGv_i32 tcg_syn, tcg_isread;
1662 gen_a64_set_pc_im(s->pc - 4);
1663 tmpptr = tcg_const_ptr(ri);
1664 syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
1665 tcg_syn = tcg_const_i32(syndrome);
1666 tcg_isread = tcg_const_i32(isread);
1667 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn, tcg_isread);
1668 tcg_temp_free_ptr(tmpptr);
1669 tcg_temp_free_i32(tcg_syn);
1670 tcg_temp_free_i32(tcg_isread);
1673 /* Handle special cases first */
1674 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
1678 tcg_rt = cpu_reg(s, rt);
1680 gen_get_nzcv(tcg_rt);
1682 gen_set_nzcv(tcg_rt);
1685 case ARM_CP_CURRENTEL:
1686 /* Reads as current EL value from pstate, which is
1687 * guaranteed to be constant by the tb flags.
1689 tcg_rt = cpu_reg(s, rt);
1690 tcg_gen_movi_i64(tcg_rt, s->current_el << 2);
1693 /* Writes clear the aligned block of memory which rt points into. */
1694 tcg_rt = cpu_reg(s, rt);
1695 gen_helper_dc_zva(cpu_env, tcg_rt);
1700 if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) {
1703 if ((ri->type & ARM_CP_FPU) && !fp_access_check(s)) {
1707 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1711 tcg_rt = cpu_reg(s, rt);
1714 if (ri->type & ARM_CP_CONST) {
1715 tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
1716 } else if (ri->readfn) {
1718 tmpptr = tcg_const_ptr(ri);
1719 gen_helper_get_cp_reg64(tcg_rt, cpu_env, tmpptr);
1720 tcg_temp_free_ptr(tmpptr);
1722 tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
1725 if (ri->type & ARM_CP_CONST) {
1726 /* If not forbidden by access permissions, treat as WI */
1728 } else if (ri->writefn) {
1730 tmpptr = tcg_const_ptr(ri);
1731 gen_helper_set_cp_reg64(cpu_env, tmpptr, tcg_rt);
1732 tcg_temp_free_ptr(tmpptr);
1734 tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
1738 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
1739 /* I/O operations must end the TB here (whether read or write) */
1741 s->base.is_jmp = DISAS_UPDATE;
1742 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
1743 /* We default to ending the TB on a coprocessor register write,
1744 * but allow this to be suppressed by the register definition
1745 * (usually only necessary to work around guest bugs).
1747 s->base.is_jmp = DISAS_UPDATE;
1752 * 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0
1753 * +---------------------+---+-----+-----+-------+-------+-----+------+
1754 * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt |
1755 * +---------------------+---+-----+-----+-------+-------+-----+------+
1757 static void disas_system(DisasContext *s, uint32_t insn)
1759 unsigned int l, op0, op1, crn, crm, op2, rt;
1760 l = extract32(insn, 21, 1);
1761 op0 = extract32(insn, 19, 2);
1762 op1 = extract32(insn, 16, 3);
1763 crn = extract32(insn, 12, 4);
1764 crm = extract32(insn, 8, 4);
1765 op2 = extract32(insn, 5, 3);
1766 rt = extract32(insn, 0, 5);
1769 if (l || rt != 31) {
1770 unallocated_encoding(s);
1774 case 2: /* HINT (including allocated hints like NOP, YIELD, etc) */
1775 handle_hint(s, insn, op1, op2, crm);
1777 case 3: /* CLREX, DSB, DMB, ISB */
1778 handle_sync(s, insn, op1, op2, crm);
1780 case 4: /* MSR (immediate) */
1781 handle_msr_i(s, insn, op1, op2, crm);
1784 unallocated_encoding(s);
1789 handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
1792 /* Exception generation
1794 * 31 24 23 21 20 5 4 2 1 0
1795 * +-----------------+-----+------------------------+-----+----+
1796 * | 1 1 0 1 0 1 0 0 | opc | imm16 | op2 | LL |
1797 * +-----------------------+------------------------+----------+
1799 static void disas_exc(DisasContext *s, uint32_t insn)
1801 int opc = extract32(insn, 21, 3);
1802 int op2_ll = extract32(insn, 0, 5);
1803 int imm16 = extract32(insn, 5, 16);
1808 /* For SVC, HVC and SMC we advance the single-step state
1809 * machine before taking the exception. This is architecturally
1810 * mandated, to ensure that single-stepping a system call
1811 * instruction works properly.
1816 gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16),
1817 default_exception_el(s));
1820 if (s->current_el == 0) {
1821 unallocated_encoding(s);
1824 /* The pre HVC helper handles cases when HVC gets trapped
1825 * as an undefined insn by runtime configuration.
1827 gen_a64_set_pc_im(s->pc - 4);
1828 gen_helper_pre_hvc(cpu_env);
1830 gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16), 2);
1833 if (s->current_el == 0) {
1834 unallocated_encoding(s);
1837 gen_a64_set_pc_im(s->pc - 4);
1838 tmp = tcg_const_i32(syn_aa64_smc(imm16));
1839 gen_helper_pre_smc(cpu_env, tmp);
1840 tcg_temp_free_i32(tmp);
1842 gen_exception_insn(s, 0, EXCP_SMC, syn_aa64_smc(imm16), 3);
1845 unallocated_encoding(s);
1851 unallocated_encoding(s);
1855 gen_exception_bkpt_insn(s, 4, syn_aa64_bkpt(imm16));
1859 unallocated_encoding(s);
1862 /* HLT. This has two purposes.
1863 * Architecturally, it is an external halting debug instruction.
1864 * Since QEMU doesn't implement external debug, we treat this as
1865 * it is required for halting debug disabled: it will UNDEF.
1866 * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
1868 if (semihosting_enabled() && imm16 == 0xf000) {
1869 #ifndef CONFIG_USER_ONLY
1870 /* In system mode, don't allow userspace access to semihosting,
1871 * to provide some semblance of security (and for consistency
1872 * with our 32-bit semihosting).
1874 if (s->current_el == 0) {
1875 unsupported_encoding(s, insn);
1879 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1881 unsupported_encoding(s, insn);
1885 if (op2_ll < 1 || op2_ll > 3) {
1886 unallocated_encoding(s);
1889 /* DCPS1, DCPS2, DCPS3 */
1890 unsupported_encoding(s, insn);
1893 unallocated_encoding(s);
1898 /* Unconditional branch (register)
1899 * 31 25 24 21 20 16 15 10 9 5 4 0
1900 * +---------------+-------+-------+-------+------+-------+
1901 * | 1 1 0 1 0 1 1 | opc | op2 | op3 | Rn | op4 |
1902 * +---------------+-------+-------+-------+------+-------+
1904 static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
1906 unsigned int opc, op2, op3, rn, op4;
1908 opc = extract32(insn, 21, 4);
1909 op2 = extract32(insn, 16, 5);
1910 op3 = extract32(insn, 10, 6);
1911 rn = extract32(insn, 5, 5);
1912 op4 = extract32(insn, 0, 5);
1914 if (op4 != 0x0 || op3 != 0x0 || op2 != 0x1f) {
1915 unallocated_encoding(s);
1923 gen_a64_set_pc(s, cpu_reg(s, rn));
1924 /* BLR also needs to load return address */
1926 tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
1930 if (s->current_el == 0) {
1931 unallocated_encoding(s);
1934 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
1937 gen_helper_exception_return(cpu_env);
1938 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
1941 /* Must exit loop to check un-masked IRQs */
1942 s->base.is_jmp = DISAS_EXIT;
1946 unallocated_encoding(s);
1948 unsupported_encoding(s, insn);
1952 unallocated_encoding(s);
1956 s->base.is_jmp = DISAS_JUMP;
1959 /* Branches, exception generating and system instructions */
1960 static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
1962 switch (extract32(insn, 25, 7)) {
1963 case 0x0a: case 0x0b:
1964 case 0x4a: case 0x4b: /* Unconditional branch (immediate) */
1965 disas_uncond_b_imm(s, insn);
1967 case 0x1a: case 0x5a: /* Compare & branch (immediate) */
1968 disas_comp_b_imm(s, insn);
1970 case 0x1b: case 0x5b: /* Test & branch (immediate) */
1971 disas_test_b_imm(s, insn);
1973 case 0x2a: /* Conditional branch (immediate) */
1974 disas_cond_b_imm(s, insn);
1976 case 0x6a: /* Exception generation / System */
1977 if (insn & (1 << 24)) {
1978 disas_system(s, insn);
1983 case 0x6b: /* Unconditional branch (register) */
1984 disas_uncond_b_reg(s, insn);
1987 unallocated_encoding(s);
1993 * Load/Store exclusive instructions are implemented by remembering
1994 * the value/address loaded, and seeing if these are the same
1995 * when the store is performed. This is not actually the architecturally
1996 * mandated semantics, but it works for typical guest code sequences
1997 * and avoids having to monitor regular stores.
1999 * The store exclusive uses the atomic cmpxchg primitives to avoid
2000 * races in multi-threaded linux-user and when MTTCG softmmu is
2003 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
2004 TCGv_i64 addr, int size, bool is_pair)
2006 int idx = get_mem_index(s);
2007 TCGMemOp memop = s->be_data;
2009 g_assert(size <= 3);
2011 g_assert(size >= 2);
2013 /* The pair must be single-copy atomic for the doubleword. */
2014 memop |= MO_64 | MO_ALIGN;
2015 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
2016 if (s->be_data == MO_LE) {
2017 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32);
2018 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 32, 32);
2020 tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 32, 32);
2021 tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 0, 32);
2024 /* The pair must be single-copy atomic for *each* doubleword, not
2025 the entire quadword, however it must be quadword aligned. */
2027 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx,
2028 memop | MO_ALIGN_16);
2030 TCGv_i64 addr2 = tcg_temp_new_i64();
2031 tcg_gen_addi_i64(addr2, addr, 8);
2032 tcg_gen_qemu_ld_i64(cpu_exclusive_high, addr2, idx, memop);
2033 tcg_temp_free_i64(addr2);
2035 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2036 tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high);
2039 memop |= size | MO_ALIGN;
2040 tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
2041 tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
2043 tcg_gen_mov_i64(cpu_exclusive_addr, addr);
2046 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
2047 TCGv_i64 addr, int size, int is_pair)
2049 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]
2050 * && (!is_pair || env->exclusive_high == [addr + datasize])) {
2053 * [addr + datasize] = {Rt2};
2059 * env->exclusive_addr = -1;
2061 TCGLabel *fail_label = gen_new_label();
2062 TCGLabel *done_label = gen_new_label();
2065 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
2067 tmp = tcg_temp_new_i64();
2070 if (s->be_data == MO_LE) {
2071 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt), cpu_reg(s, rt2));
2073 tcg_gen_concat32_i64(tmp, cpu_reg(s, rt2), cpu_reg(s, rt));
2075 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr,
2076 cpu_exclusive_val, tmp,
2078 MO_64 | MO_ALIGN | s->be_data);
2079 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2080 } else if (s->be_data == MO_LE) {
2081 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2082 gen_helper_paired_cmpxchg64_le_parallel(tmp, cpu_env,
2087 gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
2088 cpu_reg(s, rt), cpu_reg(s, rt2));
2091 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2092 gen_helper_paired_cmpxchg64_be_parallel(tmp, cpu_env,
2097 gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
2098 cpu_reg(s, rt), cpu_reg(s, rt2));
2102 tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,
2103 cpu_reg(s, rt), get_mem_index(s),
2104 size | MO_ALIGN | s->be_data);
2105 tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
2107 tcg_gen_mov_i64(cpu_reg(s, rd), tmp);
2108 tcg_temp_free_i64(tmp);
2109 tcg_gen_br(done_label);
2111 gen_set_label(fail_label);
2112 tcg_gen_movi_i64(cpu_reg(s, rd), 1);
2113 gen_set_label(done_label);
2114 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
2117 static void gen_compare_and_swap(DisasContext *s, int rs, int rt,
2120 TCGv_i64 tcg_rs = cpu_reg(s, rs);
2121 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2122 int memidx = get_mem_index(s);
2123 TCGv_i64 addr = cpu_reg_sp(s, rn);
2126 gen_check_sp_alignment(s);
2128 tcg_gen_atomic_cmpxchg_i64(tcg_rs, addr, tcg_rs, tcg_rt, memidx,
2129 size | MO_ALIGN | s->be_data);
2132 static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt,
2135 TCGv_i64 s1 = cpu_reg(s, rs);
2136 TCGv_i64 s2 = cpu_reg(s, rs + 1);
2137 TCGv_i64 t1 = cpu_reg(s, rt);
2138 TCGv_i64 t2 = cpu_reg(s, rt + 1);
2139 TCGv_i64 addr = cpu_reg_sp(s, rn);
2140 int memidx = get_mem_index(s);
2143 gen_check_sp_alignment(s);
2147 TCGv_i64 cmp = tcg_temp_new_i64();
2148 TCGv_i64 val = tcg_temp_new_i64();
2150 if (s->be_data == MO_LE) {
2151 tcg_gen_concat32_i64(val, t1, t2);
2152 tcg_gen_concat32_i64(cmp, s1, s2);
2154 tcg_gen_concat32_i64(val, t2, t1);
2155 tcg_gen_concat32_i64(cmp, s2, s1);
2158 tcg_gen_atomic_cmpxchg_i64(cmp, addr, cmp, val, memidx,
2159 MO_64 | MO_ALIGN | s->be_data);
2160 tcg_temp_free_i64(val);
2162 if (s->be_data == MO_LE) {
2163 tcg_gen_extr32_i64(s1, s2, cmp);
2165 tcg_gen_extr32_i64(s2, s1, cmp);
2167 tcg_temp_free_i64(cmp);
2168 } else if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2169 TCGv_i32 tcg_rs = tcg_const_i32(rs);
2171 if (s->be_data == MO_LE) {
2172 gen_helper_casp_le_parallel(cpu_env, tcg_rs, addr, t1, t2);
2174 gen_helper_casp_be_parallel(cpu_env, tcg_rs, addr, t1, t2);
2176 tcg_temp_free_i32(tcg_rs);
2178 TCGv_i64 d1 = tcg_temp_new_i64();
2179 TCGv_i64 d2 = tcg_temp_new_i64();
2180 TCGv_i64 a2 = tcg_temp_new_i64();
2181 TCGv_i64 c1 = tcg_temp_new_i64();
2182 TCGv_i64 c2 = tcg_temp_new_i64();
2183 TCGv_i64 zero = tcg_const_i64(0);
2185 /* Load the two words, in memory order. */
2186 tcg_gen_qemu_ld_i64(d1, addr, memidx,
2187 MO_64 | MO_ALIGN_16 | s->be_data);
2188 tcg_gen_addi_i64(a2, addr, 8);
2189 tcg_gen_qemu_ld_i64(d2, addr, memidx, MO_64 | s->be_data);
2191 /* Compare the two words, also in memory order. */
2192 tcg_gen_setcond_i64(TCG_COND_EQ, c1, d1, s1);
2193 tcg_gen_setcond_i64(TCG_COND_EQ, c2, d2, s2);
2194 tcg_gen_and_i64(c2, c2, c1);
2196 /* If compare equal, write back new data, else write back old data. */
2197 tcg_gen_movcond_i64(TCG_COND_NE, c1, c2, zero, t1, d1);
2198 tcg_gen_movcond_i64(TCG_COND_NE, c2, c2, zero, t2, d2);
2199 tcg_gen_qemu_st_i64(c1, addr, memidx, MO_64 | s->be_data);
2200 tcg_gen_qemu_st_i64(c2, a2, memidx, MO_64 | s->be_data);
2201 tcg_temp_free_i64(a2);
2202 tcg_temp_free_i64(c1);
2203 tcg_temp_free_i64(c2);
2204 tcg_temp_free_i64(zero);
2206 /* Write back the data from memory to Rs. */
2207 tcg_gen_mov_i64(s1, d1);
2208 tcg_gen_mov_i64(s2, d2);
2209 tcg_temp_free_i64(d1);
2210 tcg_temp_free_i64(d2);
2214 /* Update the Sixty-Four bit (SF) registersize. This logic is derived
2215 * from the ARMv8 specs for LDR (Shared decode for all encodings).
2217 static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc)
2219 int opc0 = extract32(opc, 0, 1);
2223 regsize = opc0 ? 32 : 64;
2225 regsize = size == 3 ? 64 : 32;
2227 return regsize == 64;
2230 /* Load/store exclusive
2232 * 31 30 29 24 23 22 21 20 16 15 14 10 9 5 4 0
2233 * +-----+-------------+----+---+----+------+----+-------+------+------+
2234 * | sz | 0 0 1 0 0 0 | o2 | L | o1 | Rs | o0 | Rt2 | Rn | Rt |
2235 * +-----+-------------+----+---+----+------+----+-------+------+------+
2237 * sz: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64 bit
2238 * L: 0 -> store, 1 -> load
2239 * o2: 0 -> exclusive, 1 -> not
2240 * o1: 0 -> single register, 1 -> register pair
2241 * o0: 1 -> load-acquire/store-release, 0 -> not
2243 static void disas_ldst_excl(DisasContext *s, uint32_t insn)
2245 int rt = extract32(insn, 0, 5);
2246 int rn = extract32(insn, 5, 5);
2247 int rt2 = extract32(insn, 10, 5);
2248 int rs = extract32(insn, 16, 5);
2249 int is_lasr = extract32(insn, 15, 1);
2250 int o2_L_o1_o0 = extract32(insn, 21, 3) * 2 | is_lasr;
2251 int size = extract32(insn, 30, 2);
2254 switch (o2_L_o1_o0) {
2255 case 0x0: /* STXR */
2256 case 0x1: /* STLXR */
2258 gen_check_sp_alignment(s);
2261 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2263 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2264 gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, false);
2267 case 0x4: /* LDXR */
2268 case 0x5: /* LDAXR */
2270 gen_check_sp_alignment(s);
2272 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2274 gen_load_exclusive(s, rt, rt2, tcg_addr, size, false);
2276 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2280 case 0x9: /* STLR */
2281 /* Generate ISS for non-exclusive accesses including LASR. */
2283 gen_check_sp_alignment(s);
2285 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2286 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2287 do_gpr_st(s, cpu_reg(s, rt), tcg_addr, size, true, rt,
2288 disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2291 case 0xd: /* LDAR */
2292 /* Generate ISS for non-exclusive accesses including LASR. */
2294 gen_check_sp_alignment(s);
2296 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2297 do_gpr_ld(s, cpu_reg(s, rt), tcg_addr, size, false, false, true, rt,
2298 disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
2299 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2302 case 0x2: case 0x3: /* CASP / STXP */
2303 if (size & 2) { /* STXP / STLXP */
2305 gen_check_sp_alignment(s);
2308 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
2310 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2311 gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, true);
2315 && ((rt | rs) & 1) == 0
2316 && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
2318 gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2323 case 0x6: case 0x7: /* CASPA / LDXP */
2324 if (size & 2) { /* LDXP / LDAXP */
2326 gen_check_sp_alignment(s);
2328 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2330 gen_load_exclusive(s, rt, rt2, tcg_addr, size, true);
2332 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
2337 && ((rt | rs) & 1) == 0
2338 && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
2339 /* CASPA / CASPAL */
2340 gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
2346 case 0xb: /* CASL */
2347 case 0xe: /* CASA */
2348 case 0xf: /* CASAL */
2349 if (rt2 == 31 && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
2350 gen_compare_and_swap(s, rs, rt, rn, size);
2355 unallocated_encoding(s);
2359 * Load register (literal)
2361 * 31 30 29 27 26 25 24 23 5 4 0
2362 * +-----+-------+---+-----+-------------------+-------+
2363 * | opc | 0 1 1 | V | 0 0 | imm19 | Rt |
2364 * +-----+-------+---+-----+-------------------+-------+
2366 * V: 1 -> vector (simd/fp)
2367 * opc (non-vector): 00 -> 32 bit, 01 -> 64 bit,
2368 * 10-> 32 bit signed, 11 -> prefetch
2369 * opc (vector): 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit (11 unallocated)
2371 static void disas_ld_lit(DisasContext *s, uint32_t insn)
2373 int rt = extract32(insn, 0, 5);
2374 int64_t imm = sextract32(insn, 5, 19) << 2;
2375 bool is_vector = extract32(insn, 26, 1);
2376 int opc = extract32(insn, 30, 2);
2377 bool is_signed = false;
2379 TCGv_i64 tcg_rt, tcg_addr;
2383 unallocated_encoding(s);
2387 if (!fp_access_check(s)) {
2392 /* PRFM (literal) : prefetch */
2395 size = 2 + extract32(opc, 0, 1);
2396 is_signed = extract32(opc, 1, 1);
2399 tcg_rt = cpu_reg(s, rt);
2401 tcg_addr = tcg_const_i64((s->pc - 4) + imm);
2403 do_fp_ld(s, rt, tcg_addr, size);
2405 /* Only unsigned 32bit loads target 32bit registers. */
2406 bool iss_sf = opc != 0;
2408 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false,
2409 true, rt, iss_sf, false);
2411 tcg_temp_free_i64(tcg_addr);
2415 * LDNP (Load Pair - non-temporal hint)
2416 * LDP (Load Pair - non vector)
2417 * LDPSW (Load Pair Signed Word - non vector)
2418 * STNP (Store Pair - non-temporal hint)
2419 * STP (Store Pair - non vector)
2420 * LDNP (Load Pair of SIMD&FP - non-temporal hint)
2421 * LDP (Load Pair of SIMD&FP)
2422 * STNP (Store Pair of SIMD&FP - non-temporal hint)
2423 * STP (Store Pair of SIMD&FP)
2425 * 31 30 29 27 26 25 24 23 22 21 15 14 10 9 5 4 0
2426 * +-----+-------+---+---+-------+---+-----------------------------+
2427 * | opc | 1 0 1 | V | 0 | index | L | imm7 | Rt2 | Rn | Rt |
2428 * +-----+-------+---+---+-------+---+-------+-------+------+------+
2430 * opc: LDP/STP/LDNP/STNP 00 -> 32 bit, 10 -> 64 bit
2432 * LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit
2433 * V: 0 -> GPR, 1 -> Vector
2434 * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index,
2435 * 10 -> signed offset, 11 -> pre-index
2436 * L: 0 -> Store 1 -> Load
2438 * Rt, Rt2 = GPR or SIMD registers to be stored
2439 * Rn = general purpose register containing address
2440 * imm7 = signed offset (multiple of 4 or 8 depending on size)
2442 static void disas_ldst_pair(DisasContext *s, uint32_t insn)
2444 int rt = extract32(insn, 0, 5);
2445 int rn = extract32(insn, 5, 5);
2446 int rt2 = extract32(insn, 10, 5);
2447 uint64_t offset = sextract64(insn, 15, 7);
2448 int index = extract32(insn, 23, 2);
2449 bool is_vector = extract32(insn, 26, 1);
2450 bool is_load = extract32(insn, 22, 1);
2451 int opc = extract32(insn, 30, 2);
2453 bool is_signed = false;
2454 bool postindex = false;
2457 TCGv_i64 tcg_addr; /* calculated address */
2461 unallocated_encoding(s);
2468 size = 2 + extract32(opc, 1, 1);
2469 is_signed = extract32(opc, 0, 1);
2470 if (!is_load && is_signed) {
2471 unallocated_encoding(s);
2477 case 1: /* post-index */
2482 /* signed offset with "non-temporal" hint. Since we don't emulate
2483 * caches we don't care about hints to the cache system about
2484 * data access patterns, and handle this identically to plain
2488 /* There is no non-temporal-hint version of LDPSW */
2489 unallocated_encoding(s);
2494 case 2: /* signed offset, rn not updated */
2497 case 3: /* pre-index */
2503 if (is_vector && !fp_access_check(s)) {
2510 gen_check_sp_alignment(s);
2513 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2516 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
2521 do_fp_ld(s, rt, tcg_addr, size);
2523 do_fp_st(s, rt, tcg_addr, size);
2525 tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
2527 do_fp_ld(s, rt2, tcg_addr, size);
2529 do_fp_st(s, rt2, tcg_addr, size);
2532 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2533 TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
2536 TCGv_i64 tmp = tcg_temp_new_i64();
2538 /* Do not modify tcg_rt before recognizing any exception
2539 * from the second load.
2541 do_gpr_ld(s, tmp, tcg_addr, size, is_signed, false,
2542 false, 0, false, false);
2543 tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
2544 do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false,
2545 false, 0, false, false);
2547 tcg_gen_mov_i64(tcg_rt, tmp);
2548 tcg_temp_free_i64(tmp);
2550 do_gpr_st(s, tcg_rt, tcg_addr, size,
2551 false, 0, false, false);
2552 tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
2553 do_gpr_st(s, tcg_rt2, tcg_addr, size,
2554 false, 0, false, false);
2560 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset - (1 << size));
2562 tcg_gen_subi_i64(tcg_addr, tcg_addr, 1 << size);
2564 tcg_gen_mov_i64(cpu_reg_sp(s, rn), tcg_addr);
2569 * Load/store (immediate post-indexed)
2570 * Load/store (immediate pre-indexed)
2571 * Load/store (unscaled immediate)
2573 * 31 30 29 27 26 25 24 23 22 21 20 12 11 10 9 5 4 0
2574 * +----+-------+---+-----+-----+---+--------+-----+------+------+
2575 * |size| 1 1 1 | V | 0 0 | opc | 0 | imm9 | idx | Rn | Rt |
2576 * +----+-------+---+-----+-----+---+--------+-----+------+------+
2578 * idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback)
2580 * V = 0 -> non-vector
2581 * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
2582 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2584 static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
2590 int rn = extract32(insn, 5, 5);
2591 int imm9 = sextract32(insn, 12, 9);
2592 int idx = extract32(insn, 10, 2);
2593 bool is_signed = false;
2594 bool is_store = false;
2595 bool is_extended = false;
2596 bool is_unpriv = (idx == 2);
2597 bool iss_valid = !is_vector;
2604 size |= (opc & 2) << 1;
2605 if (size > 4 || is_unpriv) {
2606 unallocated_encoding(s);
2609 is_store = ((opc & 1) == 0);
2610 if (!fp_access_check(s)) {
2614 if (size == 3 && opc == 2) {
2615 /* PRFM - prefetch */
2617 unallocated_encoding(s);
2622 if (opc == 3 && size > 1) {
2623 unallocated_encoding(s);
2626 is_store = (opc == 0);
2627 is_signed = extract32(opc, 1, 1);
2628 is_extended = (size < 3) && extract32(opc, 0, 1);
2646 g_assert_not_reached();
2650 gen_check_sp_alignment(s);
2652 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2655 tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
2660 do_fp_st(s, rt, tcg_addr, size);
2662 do_fp_ld(s, rt, tcg_addr, size);
2665 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2666 int memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
2667 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2670 do_gpr_st_memidx(s, tcg_rt, tcg_addr, size, memidx,
2671 iss_valid, rt, iss_sf, false);
2673 do_gpr_ld_memidx(s, tcg_rt, tcg_addr, size,
2674 is_signed, is_extended, memidx,
2675 iss_valid, rt, iss_sf, false);
2680 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
2682 tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
2684 tcg_gen_mov_i64(tcg_rn, tcg_addr);
2689 * Load/store (register offset)
2691 * 31 30 29 27 26 25 24 23 22 21 20 16 15 13 12 11 10 9 5 4 0
2692 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
2693 * |size| 1 1 1 | V | 0 0 | opc | 1 | Rm | opt | S| 1 0 | Rn | Rt |
2694 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
2697 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
2698 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2700 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
2701 * opc<0>: 0 -> store, 1 -> load
2702 * V: 1 -> vector/simd
2703 * opt: extend encoding (see DecodeRegExtend)
2704 * S: if S=1 then scale (essentially index by sizeof(size))
2705 * Rt: register to transfer into/out of
2706 * Rn: address register or SP for base
2707 * Rm: offset register or ZR for offset
2709 static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
2715 int rn = extract32(insn, 5, 5);
2716 int shift = extract32(insn, 12, 1);
2717 int rm = extract32(insn, 16, 5);
2718 int opt = extract32(insn, 13, 3);
2719 bool is_signed = false;
2720 bool is_store = false;
2721 bool is_extended = false;
2726 if (extract32(opt, 1, 1) == 0) {
2727 unallocated_encoding(s);
2732 size |= (opc & 2) << 1;
2734 unallocated_encoding(s);
2737 is_store = !extract32(opc, 0, 1);
2738 if (!fp_access_check(s)) {
2742 if (size == 3 && opc == 2) {
2743 /* PRFM - prefetch */
2746 if (opc == 3 && size > 1) {
2747 unallocated_encoding(s);
2750 is_store = (opc == 0);
2751 is_signed = extract32(opc, 1, 1);
2752 is_extended = (size < 3) && extract32(opc, 0, 1);
2756 gen_check_sp_alignment(s);
2758 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2760 tcg_rm = read_cpu_reg(s, rm, 1);
2761 ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
2763 tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_rm);
2767 do_fp_st(s, rt, tcg_addr, size);
2769 do_fp_ld(s, rt, tcg_addr, size);
2772 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2773 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2775 do_gpr_st(s, tcg_rt, tcg_addr, size,
2776 true, rt, iss_sf, false);
2778 do_gpr_ld(s, tcg_rt, tcg_addr, size,
2779 is_signed, is_extended,
2780 true, rt, iss_sf, false);
2786 * Load/store (unsigned immediate)
2788 * 31 30 29 27 26 25 24 23 22 21 10 9 5
2789 * +----+-------+---+-----+-----+------------+-------+------+
2790 * |size| 1 1 1 | V | 0 1 | opc | imm12 | Rn | Rt |
2791 * +----+-------+---+-----+-----+------------+-------+------+
2794 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
2795 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
2797 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
2798 * opc<0>: 0 -> store, 1 -> load
2799 * Rn: base address register (inc SP)
2800 * Rt: target register
2802 static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
2808 int rn = extract32(insn, 5, 5);
2809 unsigned int imm12 = extract32(insn, 10, 12);
2810 unsigned int offset;
2815 bool is_signed = false;
2816 bool is_extended = false;
2819 size |= (opc & 2) << 1;
2821 unallocated_encoding(s);
2824 is_store = !extract32(opc, 0, 1);
2825 if (!fp_access_check(s)) {
2829 if (size == 3 && opc == 2) {
2830 /* PRFM - prefetch */
2833 if (opc == 3 && size > 1) {
2834 unallocated_encoding(s);
2837 is_store = (opc == 0);
2838 is_signed = extract32(opc, 1, 1);
2839 is_extended = (size < 3) && extract32(opc, 0, 1);
2843 gen_check_sp_alignment(s);
2845 tcg_addr = read_cpu_reg_sp(s, rn, 1);
2846 offset = imm12 << size;
2847 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
2851 do_fp_st(s, rt, tcg_addr, size);
2853 do_fp_ld(s, rt, tcg_addr, size);
2856 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2857 bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
2859 do_gpr_st(s, tcg_rt, tcg_addr, size,
2860 true, rt, iss_sf, false);
2862 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended,
2863 true, rt, iss_sf, false);
2868 /* Atomic memory operations
2870 * 31 30 27 26 24 22 21 16 15 12 10 5 0
2871 * +------+-------+---+-----+-----+---+----+----+-----+-----+----+-----+
2872 * | size | 1 1 1 | V | 0 0 | A R | 1 | Rs | o3 | opc | 0 0 | Rn | Rt |
2873 * +------+-------+---+-----+-----+--------+----+-----+-----+----+-----+
2875 * Rt: the result register
2876 * Rn: base address or SP
2877 * Rs: the source register for the operation
2878 * V: vector flag (always 0 as of v8.3)
2882 static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
2883 int size, int rt, bool is_vector)
2885 int rs = extract32(insn, 16, 5);
2886 int rn = extract32(insn, 5, 5);
2887 int o3_opc = extract32(insn, 12, 4);
2888 int feature = ARM_FEATURE_V8_ATOMICS;
2889 TCGv_i64 tcg_rn, tcg_rs;
2890 AtomicThreeOpFn *fn;
2893 unallocated_encoding(s);
2897 case 000: /* LDADD */
2898 fn = tcg_gen_atomic_fetch_add_i64;
2900 case 001: /* LDCLR */
2901 fn = tcg_gen_atomic_fetch_and_i64;
2903 case 002: /* LDEOR */
2904 fn = tcg_gen_atomic_fetch_xor_i64;
2906 case 003: /* LDSET */
2907 fn = tcg_gen_atomic_fetch_or_i64;
2909 case 004: /* LDSMAX */
2910 fn = tcg_gen_atomic_fetch_smax_i64;
2912 case 005: /* LDSMIN */
2913 fn = tcg_gen_atomic_fetch_smin_i64;
2915 case 006: /* LDUMAX */
2916 fn = tcg_gen_atomic_fetch_umax_i64;
2918 case 007: /* LDUMIN */
2919 fn = tcg_gen_atomic_fetch_umin_i64;
2922 fn = tcg_gen_atomic_xchg_i64;
2925 unallocated_encoding(s);
2928 if (!arm_dc_feature(s, feature)) {
2929 unallocated_encoding(s);
2934 gen_check_sp_alignment(s);
2936 tcg_rn = cpu_reg_sp(s, rn);
2937 tcg_rs = read_cpu_reg(s, rs, true);
2939 if (o3_opc == 1) { /* LDCLR */
2940 tcg_gen_not_i64(tcg_rs, tcg_rs);
2943 /* The tcg atomic primitives are all full barriers. Therefore we
2944 * can ignore the Acquire and Release bits of this instruction.
2946 fn(cpu_reg(s, rt), tcg_rn, tcg_rs, get_mem_index(s),
2947 s->be_data | size | MO_ALIGN);
2950 /* Load/store register (all forms) */
2951 static void disas_ldst_reg(DisasContext *s, uint32_t insn)
2953 int rt = extract32(insn, 0, 5);
2954 int opc = extract32(insn, 22, 2);
2955 bool is_vector = extract32(insn, 26, 1);
2956 int size = extract32(insn, 30, 2);
2958 switch (extract32(insn, 24, 2)) {
2960 if (extract32(insn, 21, 1) == 0) {
2961 /* Load/store register (unscaled immediate)
2962 * Load/store immediate pre/post-indexed
2963 * Load/store register unprivileged
2965 disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector);
2968 switch (extract32(insn, 10, 2)) {
2970 disas_ldst_atomic(s, insn, size, rt, is_vector);
2973 disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
2978 disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector);
2981 unallocated_encoding(s);
2984 /* AdvSIMD load/store multiple structures
2986 * 31 30 29 23 22 21 16 15 12 11 10 9 5 4 0
2987 * +---+---+---------------+---+-------------+--------+------+------+------+
2988 * | 0 | Q | 0 0 1 1 0 0 0 | L | 0 0 0 0 0 0 | opcode | size | Rn | Rt |
2989 * +---+---+---------------+---+-------------+--------+------+------+------+
2991 * AdvSIMD load/store multiple structures (post-indexed)
2993 * 31 30 29 23 22 21 20 16 15 12 11 10 9 5 4 0
2994 * +---+---+---------------+---+---+---------+--------+------+------+------+
2995 * | 0 | Q | 0 0 1 1 0 0 1 | L | 0 | Rm | opcode | size | Rn | Rt |
2996 * +---+---+---------------+---+---+---------+--------+------+------+------+
2998 * Rt: first (or only) SIMD&FP register to be transferred
2999 * Rn: base address or SP
3000 * Rm (post-index only): post-index register (when !31) or size dependent #imm
3002 static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
3004 int rt = extract32(insn, 0, 5);
3005 int rn = extract32(insn, 5, 5);
3006 int size = extract32(insn, 10, 2);
3007 int opcode = extract32(insn, 12, 4);
3008 bool is_store = !extract32(insn, 22, 1);
3009 bool is_postidx = extract32(insn, 23, 1);
3010 bool is_q = extract32(insn, 30, 1);
3011 TCGv_i64 tcg_addr, tcg_rn;
3013 int ebytes = 1 << size;
3014 int elements = (is_q ? 128 : 64) / (8 << size);
3015 int rpt; /* num iterations */
3016 int selem; /* structure elements */
3019 if (extract32(insn, 31, 1) || extract32(insn, 21, 1)) {
3020 unallocated_encoding(s);
3024 /* From the shared decode logic */
3055 unallocated_encoding(s);
3059 if (size == 3 && !is_q && selem != 1) {
3061 unallocated_encoding(s);
3065 if (!fp_access_check(s)) {
3070 gen_check_sp_alignment(s);
3073 tcg_rn = cpu_reg_sp(s, rn);
3074 tcg_addr = tcg_temp_new_i64();
3075 tcg_gen_mov_i64(tcg_addr, tcg_rn);
3077 for (r = 0; r < rpt; r++) {
3079 for (e = 0; e < elements; e++) {
3080 int tt = (rt + r) % 32;
3082 for (xs = 0; xs < selem; xs++) {
3084 do_vec_st(s, tt, e, tcg_addr, size);
3086 do_vec_ld(s, tt, e, tcg_addr, size);
3088 /* For non-quad operations, setting a slice of the low
3089 * 64 bits of the register clears the high 64 bits (in
3090 * the ARM ARM pseudocode this is implicit in the fact
3091 * that 'rval' is a 64 bit wide variable).
3092 * For quad operations, we might still need to zero the
3093 * high bits of SVE. We optimize by noticing that we only
3094 * need to do this the first time we touch a register.
3096 if (e == 0 && (r == 0 || xs == selem - 1)) {
3097 clear_vec_high(s, is_q, tt);
3100 tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
3107 int rm = extract32(insn, 16, 5);
3109 tcg_gen_mov_i64(tcg_rn, tcg_addr);
3111 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
3114 tcg_temp_free_i64(tcg_addr);
3117 /* AdvSIMD load/store single structure
3119 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
3120 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3121 * | 0 | Q | 0 0 1 1 0 1 0 | L R | 0 0 0 0 0 | opc | S | size | Rn | Rt |
3122 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3124 * AdvSIMD load/store single structure (post-indexed)
3126 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
3127 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3128 * | 0 | Q | 0 0 1 1 0 1 1 | L R | Rm | opc | S | size | Rn | Rt |
3129 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
3131 * Rt: first (or only) SIMD&FP register to be transferred
3132 * Rn: base address or SP
3133 * Rm (post-index only): post-index register (when !31) or size dependent #imm
3134 * index = encoded in Q:S:size dependent on size
3136 * lane_size = encoded in R, opc
3137 * transfer width = encoded in opc, S, size
3139 static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
3141 int rt = extract32(insn, 0, 5);
3142 int rn = extract32(insn, 5, 5);
3143 int size = extract32(insn, 10, 2);
3144 int S = extract32(insn, 12, 1);
3145 int opc = extract32(insn, 13, 3);
3146 int R = extract32(insn, 21, 1);
3147 int is_load = extract32(insn, 22, 1);
3148 int is_postidx = extract32(insn, 23, 1);
3149 int is_q = extract32(insn, 30, 1);
3151 int scale = extract32(opc, 1, 2);
3152 int selem = (extract32(opc, 0, 1) << 1 | R) + 1;
3153 bool replicate = false;
3154 int index = is_q << 3 | S << 2 | size;
3156 TCGv_i64 tcg_addr, tcg_rn;
3160 if (!is_load || S) {
3161 unallocated_encoding(s);
3170 if (extract32(size, 0, 1)) {
3171 unallocated_encoding(s);
3177 if (extract32(size, 1, 1)) {
3178 unallocated_encoding(s);
3181 if (!extract32(size, 0, 1)) {
3185 unallocated_encoding(s);
3193 g_assert_not_reached();
3196 if (!fp_access_check(s)) {
3200 ebytes = 1 << scale;
3203 gen_check_sp_alignment(s);
3206 tcg_rn = cpu_reg_sp(s, rn);
3207 tcg_addr = tcg_temp_new_i64();
3208 tcg_gen_mov_i64(tcg_addr, tcg_rn);
3210 for (xs = 0; xs < selem; xs++) {
3212 /* Load and replicate to all elements */
3214 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
3216 tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr,
3217 get_mem_index(s), s->be_data + scale);
3220 mulconst = 0x0101010101010101ULL;
3223 mulconst = 0x0001000100010001ULL;
3226 mulconst = 0x0000000100000001ULL;
3232 g_assert_not_reached();
3235 tcg_gen_muli_i64(tcg_tmp, tcg_tmp, mulconst);
3237 write_vec_element(s, tcg_tmp, rt, 0, MO_64);
3239 write_vec_element(s, tcg_tmp, rt, 1, MO_64);
3241 tcg_temp_free_i64(tcg_tmp);
3242 clear_vec_high(s, is_q, rt);
3244 /* Load/store one element per register */
3246 do_vec_ld(s, rt, index, tcg_addr, scale);
3248 do_vec_st(s, rt, index, tcg_addr, scale);
3251 tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
3256 int rm = extract32(insn, 16, 5);
3258 tcg_gen_mov_i64(tcg_rn, tcg_addr);
3260 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
3263 tcg_temp_free_i64(tcg_addr);
3266 /* Loads and stores */
3267 static void disas_ldst(DisasContext *s, uint32_t insn)
3269 switch (extract32(insn, 24, 6)) {
3270 case 0x08: /* Load/store exclusive */
3271 disas_ldst_excl(s, insn);
3273 case 0x18: case 0x1c: /* Load register (literal) */
3274 disas_ld_lit(s, insn);
3276 case 0x28: case 0x29:
3277 case 0x2c: case 0x2d: /* Load/store pair (all forms) */
3278 disas_ldst_pair(s, insn);
3280 case 0x38: case 0x39:
3281 case 0x3c: case 0x3d: /* Load/store register (all forms) */
3282 disas_ldst_reg(s, insn);
3284 case 0x0c: /* AdvSIMD load/store multiple structures */
3285 disas_ldst_multiple_struct(s, insn);
3287 case 0x0d: /* AdvSIMD load/store single structure */
3288 disas_ldst_single_struct(s, insn);
3291 unallocated_encoding(s);
3296 /* PC-rel. addressing
3297 * 31 30 29 28 24 23 5 4 0
3298 * +----+-------+-----------+-------------------+------+
3299 * | op | immlo | 1 0 0 0 0 | immhi | Rd |
3300 * +----+-------+-----------+-------------------+------+
3302 static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
3304 unsigned int page, rd;
3308 page = extract32(insn, 31, 1);
3309 /* SignExtend(immhi:immlo) -> offset */
3310 offset = sextract64(insn, 5, 19);
3311 offset = offset << 2 | extract32(insn, 29, 2);
3312 rd = extract32(insn, 0, 5);
3316 /* ADRP (page based) */
3321 tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
3325 * Add/subtract (immediate)
3327 * 31 30 29 28 24 23 22 21 10 9 5 4 0
3328 * +--+--+--+-----------+-----+-------------+-----+-----+
3329 * |sf|op| S| 1 0 0 0 1 |shift| imm12 | Rn | Rd |
3330 * +--+--+--+-----------+-----+-------------+-----+-----+
3332 * sf: 0 -> 32bit, 1 -> 64bit
3333 * op: 0 -> add , 1 -> sub
3335 * shift: 00 -> LSL imm by 0, 01 -> LSL imm by 12
3337 static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
3339 int rd = extract32(insn, 0, 5);
3340 int rn = extract32(insn, 5, 5);
3341 uint64_t imm = extract32(insn, 10, 12);
3342 int shift = extract32(insn, 22, 2);
3343 bool setflags = extract32(insn, 29, 1);
3344 bool sub_op = extract32(insn, 30, 1);
3345 bool is_64bit = extract32(insn, 31, 1);
3347 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
3348 TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd);
3349 TCGv_i64 tcg_result;
3358 unallocated_encoding(s);
3362 tcg_result = tcg_temp_new_i64();
3365 tcg_gen_subi_i64(tcg_result, tcg_rn, imm);
3367 tcg_gen_addi_i64(tcg_result, tcg_rn, imm);
3370 TCGv_i64 tcg_imm = tcg_const_i64(imm);
3372 gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
3374 gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
3376 tcg_temp_free_i64(tcg_imm);
3380 tcg_gen_mov_i64(tcg_rd, tcg_result);
3382 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
3385 tcg_temp_free_i64(tcg_result);
3388 /* The input should be a value in the bottom e bits (with higher
3389 * bits zero); returns that value replicated into every element
3390 * of size e in a 64 bit integer.
3392 static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
3402 /* Return a value with the bottom len bits set (where 0 < len <= 64) */
3403 static inline uint64_t bitmask64(unsigned int length)
3405 assert(length > 0 && length <= 64);
3406 return ~0ULL >> (64 - length);
3409 /* Simplified variant of pseudocode DecodeBitMasks() for the case where we
3410 * only require the wmask. Returns false if the imms/immr/immn are a reserved
3411 * value (ie should cause a guest UNDEF exception), and true if they are
3412 * valid, in which case the decoded bit pattern is written to result.
3414 static bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
3415 unsigned int imms, unsigned int immr)
3418 unsigned e, levels, s, r;
3421 assert(immn < 2 && imms < 64 && immr < 64);
3423 /* The bit patterns we create here are 64 bit patterns which
3424 * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
3425 * 64 bits each. Each element contains the same value: a run
3426 * of between 1 and e-1 non-zero bits, rotated within the
3427 * element by between 0 and e-1 bits.
3429 * The element size and run length are encoded into immn (1 bit)
3430 * and imms (6 bits) as follows:
3431 * 64 bit elements: immn = 1, imms = <length of run - 1>
3432 * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
3433 * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
3434 * 8 bit elements: immn = 0, imms = 110 : <length of run - 1>
3435 * 4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
3436 * 2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
3437 * Notice that immn = 0, imms = 11111x is the only combination
3438 * not covered by one of the above options; this is reserved.
3439 * Further, <length of run - 1> all-ones is a reserved pattern.
3441 * In all cases the rotation is by immr % e (and immr is 6 bits).
3444 /* First determine the element size */
3445 len = 31 - clz32((immn << 6) | (~imms & 0x3f));
3447 /* This is the immn == 0, imms == 0x11111x case */
3457 /* <length of run - 1> mustn't be all-ones. */
3461 /* Create the value of one element: s+1 set bits rotated
3462 * by r within the element (which is e bits wide)...
3464 mask = bitmask64(s + 1);
3466 mask = (mask >> r) | (mask << (e - r));
3467 mask &= bitmask64(e);
3469 /* ...then replicate the element over the whole 64 bit value */
3470 mask = bitfield_replicate(mask, e);
3475 /* Logical (immediate)
3476 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
3477 * +----+-----+-------------+---+------+------+------+------+
3478 * | sf | opc | 1 0 0 1 0 0 | N | immr | imms | Rn | Rd |
3479 * +----+-----+-------------+---+------+------+------+------+
3481 static void disas_logic_imm(DisasContext *s, uint32_t insn)
3483 unsigned int sf, opc, is_n, immr, imms, rn, rd;
3484 TCGv_i64 tcg_rd, tcg_rn;
3486 bool is_and = false;
3488 sf = extract32(insn, 31, 1);
3489 opc = extract32(insn, 29, 2);
3490 is_n = extract32(insn, 22, 1);
3491 immr = extract32(insn, 16, 6);
3492 imms = extract32(insn, 10, 6);
3493 rn = extract32(insn, 5, 5);
3494 rd = extract32(insn, 0, 5);
3497 unallocated_encoding(s);
3501 if (opc == 0x3) { /* ANDS */
3502 tcg_rd = cpu_reg(s, rd);
3504 tcg_rd = cpu_reg_sp(s, rd);
3506 tcg_rn = cpu_reg(s, rn);
3508 if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) {
3509 /* some immediate field values are reserved */
3510 unallocated_encoding(s);
3515 wmask &= 0xffffffff;
3519 case 0x3: /* ANDS */
3521 tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask);
3525 tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask);
3528 tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask);
3531 assert(FALSE); /* must handle all above */
3535 if (!sf && !is_and) {
3536 /* zero extend final result; we know we can skip this for AND
3537 * since the immediate had the high 32 bits clear.
3539 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3542 if (opc == 3) { /* ANDS */
3543 gen_logic_CC(sf, tcg_rd);
3548 * Move wide (immediate)
3550 * 31 30 29 28 23 22 21 20 5 4 0
3551 * +--+-----+-------------+-----+----------------+------+
3552 * |sf| opc | 1 0 0 1 0 1 | hw | imm16 | Rd |
3553 * +--+-----+-------------+-----+----------------+------+
3555 * sf: 0 -> 32 bit, 1 -> 64 bit
3556 * opc: 00 -> N, 10 -> Z, 11 -> K
3557 * hw: shift/16 (0,16, and sf only 32, 48)
3559 static void disas_movw_imm(DisasContext *s, uint32_t insn)
3561 int rd = extract32(insn, 0, 5);
3562 uint64_t imm = extract32(insn, 5, 16);
3563 int sf = extract32(insn, 31, 1);
3564 int opc = extract32(insn, 29, 2);
3565 int pos = extract32(insn, 21, 2) << 4;
3566 TCGv_i64 tcg_rd = cpu_reg(s, rd);
3569 if (!sf && (pos >= 32)) {
3570 unallocated_encoding(s);
3584 tcg_gen_movi_i64(tcg_rd, imm);
3587 tcg_imm = tcg_const_i64(imm);
3588 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_imm, pos, 16);
3589 tcg_temp_free_i64(tcg_imm);
3591 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3595 unallocated_encoding(s);
3601 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
3602 * +----+-----+-------------+---+------+------+------+------+
3603 * | sf | opc | 1 0 0 1 1 0 | N | immr | imms | Rn | Rd |
3604 * +----+-----+-------------+---+------+------+------+------+
3606 static void disas_bitfield(DisasContext *s, uint32_t insn)
3608 unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len;
3609 TCGv_i64 tcg_rd, tcg_tmp;
3611 sf = extract32(insn, 31, 1);
3612 opc = extract32(insn, 29, 2);
3613 n = extract32(insn, 22, 1);
3614 ri = extract32(insn, 16, 6);
3615 si = extract32(insn, 10, 6);
3616 rn = extract32(insn, 5, 5);
3617 rd = extract32(insn, 0, 5);
3618 bitsize = sf ? 64 : 32;
3620 if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) {
3621 unallocated_encoding(s);
3625 tcg_rd = cpu_reg(s, rd);
3627 /* Suppress the zero-extend for !sf. Since RI and SI are constrained
3628 to be smaller than bitsize, we'll never reference data outside the
3629 low 32-bits anyway. */
3630 tcg_tmp = read_cpu_reg(s, rn, 1);
3632 /* Recognize simple(r) extractions. */
3634 /* Wd<s-r:0> = Wn<s:r> */
3635 len = (si - ri) + 1;
3636 if (opc == 0) { /* SBFM: ASR, SBFX, SXTB, SXTH, SXTW */
3637 tcg_gen_sextract_i64(tcg_rd, tcg_tmp, ri, len);
3639 } else if (opc == 2) { /* UBFM: UBFX, LSR, UXTB, UXTH */
3640 tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len);
3643 /* opc == 1, BXFIL fall through to deposit */
3644 tcg_gen_extract_i64(tcg_tmp, tcg_tmp, ri, len);
3647 /* Handle the ri > si case with a deposit
3648 * Wd<32+s-r,32-r> = Wn<s:0>
3651 pos = (bitsize - ri) & (bitsize - 1);
3654 if (opc == 0 && len < ri) {
3655 /* SBFM: sign extend the destination field from len to fill
3656 the balance of the word. Let the deposit below insert all
3657 of those sign bits. */
3658 tcg_gen_sextract_i64(tcg_tmp, tcg_tmp, 0, len);
3662 if (opc == 1) { /* BFM, BXFIL */
3663 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
3665 /* SBFM or UBFM: We start with zero, and we haven't modified
3666 any bits outside bitsize, therefore the zero-extension
3667 below is unneeded. */
3668 tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
3673 if (!sf) { /* zero extend final result */
3674 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3679 * 31 30 29 28 23 22 21 20 16 15 10 9 5 4 0
3680 * +----+------+-------------+---+----+------+--------+------+------+
3681 * | sf | op21 | 1 0 0 1 1 1 | N | o0 | Rm | imms | Rn | Rd |
3682 * +----+------+-------------+---+----+------+--------+------+------+
3684 static void disas_extract(DisasContext *s, uint32_t insn)
3686 unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0;
3688 sf = extract32(insn, 31, 1);
3689 n = extract32(insn, 22, 1);
3690 rm = extract32(insn, 16, 5);
3691 imm = extract32(insn, 10, 6);
3692 rn = extract32(insn, 5, 5);
3693 rd = extract32(insn, 0, 5);
3694 op21 = extract32(insn, 29, 2);
3695 op0 = extract32(insn, 21, 1);
3696 bitsize = sf ? 64 : 32;
3698 if (sf != n || op21 || op0 || imm >= bitsize) {
3699 unallocated_encoding(s);
3701 TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
3703 tcg_rd = cpu_reg(s, rd);
3705 if (unlikely(imm == 0)) {
3706 /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
3707 * so an extract from bit 0 is a special case.
3710 tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm));
3712 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
3714 } else if (rm == rn) { /* ROR */
3715 tcg_rm = cpu_reg(s, rm);
3717 tcg_gen_rotri_i64(tcg_rd, tcg_rm, imm);
3719 TCGv_i32 tmp = tcg_temp_new_i32();
3720 tcg_gen_extrl_i64_i32(tmp, tcg_rm);
3721 tcg_gen_rotri_i32(tmp, tmp, imm);
3722 tcg_gen_extu_i32_i64(tcg_rd, tmp);
3723 tcg_temp_free_i32(tmp);
3726 tcg_rm = read_cpu_reg(s, rm, sf);
3727 tcg_rn = read_cpu_reg(s, rn, sf);
3728 tcg_gen_shri_i64(tcg_rm, tcg_rm, imm);
3729 tcg_gen_shli_i64(tcg_rn, tcg_rn, bitsize - imm);
3730 tcg_gen_or_i64(tcg_rd, tcg_rm, tcg_rn);
3732 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3738 /* Data processing - immediate */
3739 static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
3741 switch (extract32(insn, 23, 6)) {
3742 case 0x20: case 0x21: /* PC-rel. addressing */
3743 disas_pc_rel_adr(s, insn);
3745 case 0x22: case 0x23: /* Add/subtract (immediate) */
3746 disas_add_sub_imm(s, insn);
3748 case 0x24: /* Logical (immediate) */
3749 disas_logic_imm(s, insn);
3751 case 0x25: /* Move wide (immediate) */
3752 disas_movw_imm(s, insn);
3754 case 0x26: /* Bitfield */
3755 disas_bitfield(s, insn);
3757 case 0x27: /* Extract */
3758 disas_extract(s, insn);
3761 unallocated_encoding(s);
3766 /* Shift a TCGv src by TCGv shift_amount, put result in dst.
3767 * Note that it is the caller's responsibility to ensure that the
3768 * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
3769 * mandated semantics for out of range shifts.
3771 static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
3772 enum a64_shift_type shift_type, TCGv_i64 shift_amount)
3774 switch (shift_type) {
3775 case A64_SHIFT_TYPE_LSL:
3776 tcg_gen_shl_i64(dst, src, shift_amount);
3778 case A64_SHIFT_TYPE_LSR:
3779 tcg_gen_shr_i64(dst, src, shift_amount);
3781 case A64_SHIFT_TYPE_ASR:
3783 tcg_gen_ext32s_i64(dst, src);
3785 tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
3787 case A64_SHIFT_TYPE_ROR:
3789 tcg_gen_rotr_i64(dst, src, shift_amount);
3792 t0 = tcg_temp_new_i32();
3793 t1 = tcg_temp_new_i32();
3794 tcg_gen_extrl_i64_i32(t0, src);
3795 tcg_gen_extrl_i64_i32(t1, shift_amount);
3796 tcg_gen_rotr_i32(t0, t0, t1);
3797 tcg_gen_extu_i32_i64(dst, t0);
3798 tcg_temp_free_i32(t0);
3799 tcg_temp_free_i32(t1);
3803 assert(FALSE); /* all shift types should be handled */
3807 if (!sf) { /* zero extend final result */
3808 tcg_gen_ext32u_i64(dst, dst);
3812 /* Shift a TCGv src by immediate, put result in dst.
3813 * The shift amount must be in range (this should always be true as the
3814 * relevant instructions will UNDEF on bad shift immediates).
3816 static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
3817 enum a64_shift_type shift_type, unsigned int shift_i)
3819 assert(shift_i < (sf ? 64 : 32));
3822 tcg_gen_mov_i64(dst, src);
3824 TCGv_i64 shift_const;
3826 shift_const = tcg_const_i64(shift_i);
3827 shift_reg(dst, src, sf, shift_type, shift_const);
3828 tcg_temp_free_i64(shift_const);
3832 /* Logical (shifted register)
3833 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
3834 * +----+-----+-----------+-------+---+------+--------+------+------+
3835 * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd |
3836 * +----+-----+-----------+-------+---+------+--------+------+------+
3838 static void disas_logic_reg(DisasContext *s, uint32_t insn)
3840 TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
3841 unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
3843 sf = extract32(insn, 31, 1);
3844 opc = extract32(insn, 29, 2);
3845 shift_type = extract32(insn, 22, 2);
3846 invert = extract32(insn, 21, 1);
3847 rm = extract32(insn, 16, 5);
3848 shift_amount = extract32(insn, 10, 6);
3849 rn = extract32(insn, 5, 5);
3850 rd = extract32(insn, 0, 5);
3852 if (!sf && (shift_amount & (1 << 5))) {
3853 unallocated_encoding(s);
3857 tcg_rd = cpu_reg(s, rd);
3859 if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
3860 /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
3861 * register-register MOV and MVN, so it is worth special casing.
3863 tcg_rm = cpu_reg(s, rm);
3865 tcg_gen_not_i64(tcg_rd, tcg_rm);
3867 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3871 tcg_gen_mov_i64(tcg_rd, tcg_rm);
3873 tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
3879 tcg_rm = read_cpu_reg(s, rm, sf);
3882 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
3885 tcg_rn = cpu_reg(s, rn);
3887 switch (opc | (invert << 2)) {
3890 tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
3893 tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
3896 tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
3900 tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
3903 tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
3906 tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
3914 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3918 gen_logic_CC(sf, tcg_rd);
3923 * Add/subtract (extended register)
3925 * 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0|
3926 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
3927 * |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd |
3928 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
3930 * sf: 0 -> 32bit, 1 -> 64bit
3931 * op: 0 -> add , 1 -> sub
3934 * option: extension type (see DecodeRegExtend)
3935 * imm3: optional shift to Rm
3937 * Rd = Rn + LSL(extend(Rm), amount)
3939 static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
3941 int rd = extract32(insn, 0, 5);
3942 int rn = extract32(insn, 5, 5);
3943 int imm3 = extract32(insn, 10, 3);
3944 int option = extract32(insn, 13, 3);
3945 int rm = extract32(insn, 16, 5);
3946 bool setflags = extract32(insn, 29, 1);
3947 bool sub_op = extract32(insn, 30, 1);
3948 bool sf = extract32(insn, 31, 1);
3950 TCGv_i64 tcg_rm, tcg_rn; /* temps */
3952 TCGv_i64 tcg_result;
3955 unallocated_encoding(s);
3959 /* non-flag setting ops may use SP */
3961 tcg_rd = cpu_reg_sp(s, rd);
3963 tcg_rd = cpu_reg(s, rd);
3965 tcg_rn = read_cpu_reg_sp(s, rn, sf);
3967 tcg_rm = read_cpu_reg(s, rm, sf);
3968 ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
3970 tcg_result = tcg_temp_new_i64();
3974 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
3976 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
3980 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
3982 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
3987 tcg_gen_mov_i64(tcg_rd, tcg_result);
3989 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
3992 tcg_temp_free_i64(tcg_result);
3996 * Add/subtract (shifted register)
3998 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
3999 * +--+--+--+-----------+-----+--+-------+---------+------+------+
4000 * |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd |
4001 * +--+--+--+-----------+-----+--+-------+---------+------+------+
4003 * sf: 0 -> 32bit, 1 -> 64bit
4004 * op: 0 -> add , 1 -> sub
4006 * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
4007 * imm6: Shift amount to apply to Rm before the add/sub
4009 static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
4011 int rd = extract32(insn, 0, 5);
4012 int rn = extract32(insn, 5, 5);
4013 int imm6 = extract32(insn, 10, 6);
4014 int rm = extract32(insn, 16, 5);
4015 int shift_type = extract32(insn, 22, 2);
4016 bool setflags = extract32(insn, 29, 1);
4017 bool sub_op = extract32(insn, 30, 1);
4018 bool sf = extract32(insn, 31, 1);
4020 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4021 TCGv_i64 tcg_rn, tcg_rm;
4022 TCGv_i64 tcg_result;
4024 if ((shift_type == 3) || (!sf && (imm6 > 31))) {
4025 unallocated_encoding(s);
4029 tcg_rn = read_cpu_reg(s, rn, sf);
4030 tcg_rm = read_cpu_reg(s, rm, sf);
4032 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
4034 tcg_result = tcg_temp_new_i64();
4038 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
4040 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
4044 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
4046 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
4051 tcg_gen_mov_i64(tcg_rd, tcg_result);
4053 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
4056 tcg_temp_free_i64(tcg_result);
4059 /* Data-processing (3 source)
4061 * 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0
4062 * +--+------+-----------+------+------+----+------+------+------+
4063 * |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd |
4064 * +--+------+-----------+------+------+----+------+------+------+
4066 static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
4068 int rd = extract32(insn, 0, 5);
4069 int rn = extract32(insn, 5, 5);
4070 int ra = extract32(insn, 10, 5);
4071 int rm = extract32(insn, 16, 5);
4072 int op_id = (extract32(insn, 29, 3) << 4) |
4073 (extract32(insn, 21, 3) << 1) |
4074 extract32(insn, 15, 1);
4075 bool sf = extract32(insn, 31, 1);
4076 bool is_sub = extract32(op_id, 0, 1);
4077 bool is_high = extract32(op_id, 2, 1);
4078 bool is_signed = false;
4083 /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
4085 case 0x42: /* SMADDL */
4086 case 0x43: /* SMSUBL */
4087 case 0x44: /* SMULH */
4090 case 0x0: /* MADD (32bit) */
4091 case 0x1: /* MSUB (32bit) */
4092 case 0x40: /* MADD (64bit) */
4093 case 0x41: /* MSUB (64bit) */
4094 case 0x4a: /* UMADDL */
4095 case 0x4b: /* UMSUBL */
4096 case 0x4c: /* UMULH */
4099 unallocated_encoding(s);
4104 TCGv_i64 low_bits = tcg_temp_new_i64(); /* low bits discarded */
4105 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4106 TCGv_i64 tcg_rn = cpu_reg(s, rn);
4107 TCGv_i64 tcg_rm = cpu_reg(s, rm);
4110 tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
4112 tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
4115 tcg_temp_free_i64(low_bits);
4119 tcg_op1 = tcg_temp_new_i64();
4120 tcg_op2 = tcg_temp_new_i64();
4121 tcg_tmp = tcg_temp_new_i64();
4124 tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
4125 tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
4128 tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
4129 tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
4131 tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
4132 tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
4136 if (ra == 31 && !is_sub) {
4137 /* Special-case MADD with rA == XZR; it is the standard MUL alias */
4138 tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
4140 tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
4142 tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
4144 tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
4149 tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
4152 tcg_temp_free_i64(tcg_op1);
4153 tcg_temp_free_i64(tcg_op2);
4154 tcg_temp_free_i64(tcg_tmp);
4157 /* Add/subtract (with carry)
4158 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0
4159 * +--+--+--+------------------------+------+---------+------+-----+
4160 * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | opcode2 | Rn | Rd |
4161 * +--+--+--+------------------------+------+---------+------+-----+
4165 static void disas_adc_sbc(DisasContext *s, uint32_t insn)
4167 unsigned int sf, op, setflags, rm, rn, rd;
4168 TCGv_i64 tcg_y, tcg_rn, tcg_rd;
4170 if (extract32(insn, 10, 6) != 0) {
4171 unallocated_encoding(s);
4175 sf = extract32(insn, 31, 1);
4176 op = extract32(insn, 30, 1);
4177 setflags = extract32(insn, 29, 1);
4178 rm = extract32(insn, 16, 5);
4179 rn = extract32(insn, 5, 5);
4180 rd = extract32(insn, 0, 5);
4182 tcg_rd = cpu_reg(s, rd);
4183 tcg_rn = cpu_reg(s, rn);
4186 tcg_y = new_tmp_a64(s);
4187 tcg_gen_not_i64(tcg_y, cpu_reg(s, rm));
4189 tcg_y = cpu_reg(s, rm);
4193 gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y);
4195 gen_adc(sf, tcg_rd, tcg_rn, tcg_y);
4199 /* Conditional compare (immediate / register)
4200 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
4201 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
4202 * |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv |
4203 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
4206 static void disas_cc(DisasContext *s, uint32_t insn)
4208 unsigned int sf, op, y, cond, rn, nzcv, is_imm;
4209 TCGv_i32 tcg_t0, tcg_t1, tcg_t2;
4210 TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
4213 if (!extract32(insn, 29, 1)) {
4214 unallocated_encoding(s);
4217 if (insn & (1 << 10 | 1 << 4)) {
4218 unallocated_encoding(s);
4221 sf = extract32(insn, 31, 1);
4222 op = extract32(insn, 30, 1);
4223 is_imm = extract32(insn, 11, 1);
4224 y = extract32(insn, 16, 5); /* y = rm (reg) or imm5 (imm) */
4225 cond = extract32(insn, 12, 4);
4226 rn = extract32(insn, 5, 5);
4227 nzcv = extract32(insn, 0, 4);
4229 /* Set T0 = !COND. */
4230 tcg_t0 = tcg_temp_new_i32();
4231 arm_test_cc(&c, cond);
4232 tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0);
4235 /* Load the arguments for the new comparison. */
4237 tcg_y = new_tmp_a64(s);
4238 tcg_gen_movi_i64(tcg_y, y);
4240 tcg_y = cpu_reg(s, y);
4242 tcg_rn = cpu_reg(s, rn);
4244 /* Set the flags for the new comparison. */
4245 tcg_tmp = tcg_temp_new_i64();
4247 gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
4249 gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y);
4251 tcg_temp_free_i64(tcg_tmp);
4253 /* If COND was false, force the flags to #nzcv. Compute two masks
4254 * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0).
4255 * For tcg hosts that support ANDC, we can make do with just T1.
4256 * In either case, allow the tcg optimizer to delete any unused mask.
4258 tcg_t1 = tcg_temp_new_i32();
4259 tcg_t2 = tcg_temp_new_i32();
4260 tcg_gen_neg_i32(tcg_t1, tcg_t0);
4261 tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
4263 if (nzcv & 8) { /* N */
4264 tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
4266 if (TCG_TARGET_HAS_andc_i32) {
4267 tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1);
4269 tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2);
4272 if (nzcv & 4) { /* Z */
4273 if (TCG_TARGET_HAS_andc_i32) {
4274 tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1);
4276 tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2);
4279 tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0);
4281 if (nzcv & 2) { /* C */
4282 tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0);
4284 if (TCG_TARGET_HAS_andc_i32) {
4285 tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1);
4287 tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2);
4290 if (nzcv & 1) { /* V */
4291 tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1);
4293 if (TCG_TARGET_HAS_andc_i32) {
4294 tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1);
4296 tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2);
4299 tcg_temp_free_i32(tcg_t0);
4300 tcg_temp_free_i32(tcg_t1);
4301 tcg_temp_free_i32(tcg_t2);
4304 /* Conditional select
4305 * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0
4306 * +----+----+---+-----------------+------+------+-----+------+------+
4307 * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd |
4308 * +----+----+---+-----------------+------+------+-----+------+------+
4310 static void disas_cond_select(DisasContext *s, uint32_t insn)
4312 unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
4313 TCGv_i64 tcg_rd, zero;
4316 if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
4317 /* S == 1 or op2<1> == 1 */
4318 unallocated_encoding(s);
4321 sf = extract32(insn, 31, 1);
4322 else_inv = extract32(insn, 30, 1);
4323 rm = extract32(insn, 16, 5);
4324 cond = extract32(insn, 12, 4);
4325 else_inc = extract32(insn, 10, 1);
4326 rn = extract32(insn, 5, 5);
4327 rd = extract32(insn, 0, 5);
4329 tcg_rd = cpu_reg(s, rd);
4331 a64_test_cc(&c, cond);
4332 zero = tcg_const_i64(0);
4334 if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) {
4336 tcg_gen_setcond_i64(tcg_invert_cond(c.cond), tcg_rd, c.value, zero);
4338 tcg_gen_neg_i64(tcg_rd, tcg_rd);
4341 TCGv_i64 t_true = cpu_reg(s, rn);
4342 TCGv_i64 t_false = read_cpu_reg(s, rm, 1);
4343 if (else_inv && else_inc) {
4344 tcg_gen_neg_i64(t_false, t_false);
4345 } else if (else_inv) {
4346 tcg_gen_not_i64(t_false, t_false);
4347 } else if (else_inc) {
4348 tcg_gen_addi_i64(t_false, t_false, 1);
4350 tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false);
4353 tcg_temp_free_i64(zero);
4357 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4361 static void handle_clz(DisasContext *s, unsigned int sf,
4362 unsigned int rn, unsigned int rd)
4364 TCGv_i64 tcg_rd, tcg_rn;
4365 tcg_rd = cpu_reg(s, rd);
4366 tcg_rn = cpu_reg(s, rn);
4369 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
4371 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4372 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4373 tcg_gen_clzi_i32(tcg_tmp32, tcg_tmp32, 32);
4374 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4375 tcg_temp_free_i32(tcg_tmp32);
4379 static void handle_cls(DisasContext *s, unsigned int sf,
4380 unsigned int rn, unsigned int rd)
4382 TCGv_i64 tcg_rd, tcg_rn;
4383 tcg_rd = cpu_reg(s, rd);
4384 tcg_rn = cpu_reg(s, rn);
4387 tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
4389 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4390 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4391 tcg_gen_clrsb_i32(tcg_tmp32, tcg_tmp32);
4392 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4393 tcg_temp_free_i32(tcg_tmp32);
4397 static void handle_rbit(DisasContext *s, unsigned int sf,
4398 unsigned int rn, unsigned int rd)
4400 TCGv_i64 tcg_rd, tcg_rn;
4401 tcg_rd = cpu_reg(s, rd);
4402 tcg_rn = cpu_reg(s, rn);
4405 gen_helper_rbit64(tcg_rd, tcg_rn);
4407 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
4408 tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
4409 gen_helper_rbit(tcg_tmp32, tcg_tmp32);
4410 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
4411 tcg_temp_free_i32(tcg_tmp32);
4415 /* REV with sf==1, opcode==3 ("REV64") */
4416 static void handle_rev64(DisasContext *s, unsigned int sf,
4417 unsigned int rn, unsigned int rd)
4420 unallocated_encoding(s);
4423 tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
4426 /* REV with sf==0, opcode==2
4427 * REV32 (sf==1, opcode==2)
4429 static void handle_rev32(DisasContext *s, unsigned int sf,
4430 unsigned int rn, unsigned int rd)
4432 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4435 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
4436 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
4438 /* bswap32_i64 requires zero high word */
4439 tcg_gen_ext32u_i64(tcg_tmp, tcg_rn);
4440 tcg_gen_bswap32_i64(tcg_rd, tcg_tmp);
4441 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
4442 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
4443 tcg_gen_concat32_i64(tcg_rd, tcg_rd, tcg_tmp);
4445 tcg_temp_free_i64(tcg_tmp);
4447 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rn));
4448 tcg_gen_bswap32_i64(tcg_rd, tcg_rd);
4452 /* REV16 (opcode==1) */
4453 static void handle_rev16(DisasContext *s, unsigned int sf,
4454 unsigned int rn, unsigned int rd)
4456 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4457 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
4458 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
4459 TCGv_i64 mask = tcg_const_i64(sf ? 0x00ff00ff00ff00ffull : 0x00ff00ff);
4461 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 8);
4462 tcg_gen_and_i64(tcg_rd, tcg_rn, mask);
4463 tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask);
4464 tcg_gen_shli_i64(tcg_rd, tcg_rd, 8);
4465 tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_tmp);
4467 tcg_temp_free_i64(mask);
4468 tcg_temp_free_i64(tcg_tmp);
4471 /* Data-processing (1 source)
4472 * 31 30 29 28 21 20 16 15 10 9 5 4 0
4473 * +----+---+---+-----------------+---------+--------+------+------+
4474 * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd |
4475 * +----+---+---+-----------------+---------+--------+------+------+
4477 static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
4479 unsigned int sf, opcode, rn, rd;
4481 if (extract32(insn, 29, 1) || extract32(insn, 16, 5)) {
4482 unallocated_encoding(s);
4486 sf = extract32(insn, 31, 1);
4487 opcode = extract32(insn, 10, 6);
4488 rn = extract32(insn, 5, 5);
4489 rd = extract32(insn, 0, 5);
4493 handle_rbit(s, sf, rn, rd);
4496 handle_rev16(s, sf, rn, rd);
4499 handle_rev32(s, sf, rn, rd);
4502 handle_rev64(s, sf, rn, rd);
4505 handle_clz(s, sf, rn, rd);
4508 handle_cls(s, sf, rn, rd);
4513 static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
4514 unsigned int rm, unsigned int rn, unsigned int rd)
4516 TCGv_i64 tcg_n, tcg_m, tcg_rd;
4517 tcg_rd = cpu_reg(s, rd);
4519 if (!sf && is_signed) {
4520 tcg_n = new_tmp_a64(s);
4521 tcg_m = new_tmp_a64(s);
4522 tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
4523 tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
4525 tcg_n = read_cpu_reg(s, rn, sf);
4526 tcg_m = read_cpu_reg(s, rm, sf);
4530 gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
4532 gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
4535 if (!sf) { /* zero extend final result */
4536 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
4540 /* LSLV, LSRV, ASRV, RORV */
4541 static void handle_shift_reg(DisasContext *s,
4542 enum a64_shift_type shift_type, unsigned int sf,
4543 unsigned int rm, unsigned int rn, unsigned int rd)
4545 TCGv_i64 tcg_shift = tcg_temp_new_i64();
4546 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4547 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
4549 tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
4550 shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
4551 tcg_temp_free_i64(tcg_shift);
4554 /* CRC32[BHWX], CRC32C[BHWX] */
4555 static void handle_crc32(DisasContext *s,
4556 unsigned int sf, unsigned int sz, bool crc32c,
4557 unsigned int rm, unsigned int rn, unsigned int rd)
4559 TCGv_i64 tcg_acc, tcg_val;
4562 if (!arm_dc_feature(s, ARM_FEATURE_CRC)
4563 || (sf == 1 && sz != 3)
4564 || (sf == 0 && sz == 3)) {
4565 unallocated_encoding(s);
4570 tcg_val = cpu_reg(s, rm);
4584 g_assert_not_reached();
4586 tcg_val = new_tmp_a64(s);
4587 tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask);
4590 tcg_acc = cpu_reg(s, rn);
4591 tcg_bytes = tcg_const_i32(1 << sz);
4594 gen_helper_crc32c_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
4596 gen_helper_crc32_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
4599 tcg_temp_free_i32(tcg_bytes);
4602 /* Data-processing (2 source)
4603 * 31 30 29 28 21 20 16 15 10 9 5 4 0
4604 * +----+---+---+-----------------+------+--------+------+------+
4605 * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd |
4606 * +----+---+---+-----------------+------+--------+------+------+
4608 static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
4610 unsigned int sf, rm, opcode, rn, rd;
4611 sf = extract32(insn, 31, 1);
4612 rm = extract32(insn, 16, 5);
4613 opcode = extract32(insn, 10, 6);
4614 rn = extract32(insn, 5, 5);
4615 rd = extract32(insn, 0, 5);
4617 if (extract32(insn, 29, 1)) {
4618 unallocated_encoding(s);
4624 handle_div(s, false, sf, rm, rn, rd);
4627 handle_div(s, true, sf, rm, rn, rd);
4630 handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
4633 handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
4636 handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
4639 handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
4648 case 23: /* CRC32 */
4650 int sz = extract32(opcode, 0, 2);
4651 bool crc32c = extract32(opcode, 2, 1);
4652 handle_crc32(s, sf, sz, crc32c, rm, rn, rd);
4656 unallocated_encoding(s);
4661 /* Data processing - register */
4662 static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
4664 switch (extract32(insn, 24, 5)) {
4665 case 0x0a: /* Logical (shifted register) */
4666 disas_logic_reg(s, insn);
4668 case 0x0b: /* Add/subtract */
4669 if (insn & (1 << 21)) { /* (extended register) */
4670 disas_add_sub_ext_reg(s, insn);
4672 disas_add_sub_reg(s, insn);
4675 case 0x1b: /* Data-processing (3 source) */
4676 disas_data_proc_3src(s, insn);
4679 switch (extract32(insn, 21, 3)) {
4680 case 0x0: /* Add/subtract (with carry) */
4681 disas_adc_sbc(s, insn);
4683 case 0x2: /* Conditional compare */
4684 disas_cc(s, insn); /* both imm and reg forms */
4686 case 0x4: /* Conditional select */
4687 disas_cond_select(s, insn);
4689 case 0x6: /* Data-processing */
4690 if (insn & (1 << 30)) { /* (1 source) */
4691 disas_data_proc_1src(s, insn);
4692 } else { /* (2 source) */
4693 disas_data_proc_2src(s, insn);
4697 unallocated_encoding(s);
4702 unallocated_encoding(s);
4707 static void handle_fp_compare(DisasContext *s, bool is_double,
4708 unsigned int rn, unsigned int rm,
4709 bool cmp_with_zero, bool signal_all_nans)
4711 TCGv_i64 tcg_flags = tcg_temp_new_i64();
4712 TCGv_ptr fpst = get_fpstatus_ptr(false);
4715 TCGv_i64 tcg_vn, tcg_vm;
4717 tcg_vn = read_fp_dreg(s, rn);
4718 if (cmp_with_zero) {
4719 tcg_vm = tcg_const_i64(0);
4721 tcg_vm = read_fp_dreg(s, rm);
4723 if (signal_all_nans) {
4724 gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4726 gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4728 tcg_temp_free_i64(tcg_vn);
4729 tcg_temp_free_i64(tcg_vm);
4731 TCGv_i32 tcg_vn, tcg_vm;
4733 tcg_vn = read_fp_sreg(s, rn);
4734 if (cmp_with_zero) {
4735 tcg_vm = tcg_const_i32(0);
4737 tcg_vm = read_fp_sreg(s, rm);
4739 if (signal_all_nans) {
4740 gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4742 gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
4744 tcg_temp_free_i32(tcg_vn);
4745 tcg_temp_free_i32(tcg_vm);
4748 tcg_temp_free_ptr(fpst);
4750 gen_set_nzcv(tcg_flags);
4752 tcg_temp_free_i64(tcg_flags);
4755 /* Floating point compare
4756 * 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0
4757 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
4758 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 |
4759 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
4761 static void disas_fp_compare(DisasContext *s, uint32_t insn)
4763 unsigned int mos, type, rm, op, rn, opc, op2r;
4765 mos = extract32(insn, 29, 3);
4766 type = extract32(insn, 22, 2); /* 0 = single, 1 = double */
4767 rm = extract32(insn, 16, 5);
4768 op = extract32(insn, 14, 2);
4769 rn = extract32(insn, 5, 5);
4770 opc = extract32(insn, 3, 2);
4771 op2r = extract32(insn, 0, 3);
4773 if (mos || op || op2r || type > 1) {
4774 unallocated_encoding(s);
4778 if (!fp_access_check(s)) {
4782 handle_fp_compare(s, type, rn, rm, opc & 1, opc & 2);
4785 /* Floating point conditional compare
4786 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
4787 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
4788 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv |
4789 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
4791 static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
4793 unsigned int mos, type, rm, cond, rn, op, nzcv;
4795 TCGLabel *label_continue = NULL;
4797 mos = extract32(insn, 29, 3);
4798 type = extract32(insn, 22, 2); /* 0 = single, 1 = double */
4799 rm = extract32(insn, 16, 5);
4800 cond = extract32(insn, 12, 4);
4801 rn = extract32(insn, 5, 5);
4802 op = extract32(insn, 4, 1);
4803 nzcv = extract32(insn, 0, 4);
4805 if (mos || type > 1) {
4806 unallocated_encoding(s);
4810 if (!fp_access_check(s)) {
4814 if (cond < 0x0e) { /* not always */
4815 TCGLabel *label_match = gen_new_label();
4816 label_continue = gen_new_label();
4817 arm_gen_test_cc(cond, label_match);
4819 tcg_flags = tcg_const_i64(nzcv << 28);
4820 gen_set_nzcv(tcg_flags);
4821 tcg_temp_free_i64(tcg_flags);
4822 tcg_gen_br(label_continue);
4823 gen_set_label(label_match);
4826 handle_fp_compare(s, type, rn, rm, false, op);
4829 gen_set_label(label_continue);
4833 /* Floating point conditional select
4834 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
4835 * +---+---+---+-----------+------+---+------+------+-----+------+------+
4836 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 1 1 | Rn | Rd |
4837 * +---+---+---+-----------+------+---+------+------+-----+------+------+
4839 static void disas_fp_csel(DisasContext *s, uint32_t insn)
4841 unsigned int mos, type, rm, cond, rn, rd;
4842 TCGv_i64 t_true, t_false, t_zero;
4845 mos = extract32(insn, 29, 3);
4846 type = extract32(insn, 22, 2); /* 0 = single, 1 = double */
4847 rm = extract32(insn, 16, 5);
4848 cond = extract32(insn, 12, 4);
4849 rn = extract32(insn, 5, 5);
4850 rd = extract32(insn, 0, 5);
4852 if (mos || type > 1) {
4853 unallocated_encoding(s);
4857 if (!fp_access_check(s)) {
4861 /* Zero extend sreg inputs to 64 bits now. */
4862 t_true = tcg_temp_new_i64();
4863 t_false = tcg_temp_new_i64();
4864 read_vec_element(s, t_true, rn, 0, type ? MO_64 : MO_32);
4865 read_vec_element(s, t_false, rm, 0, type ? MO_64 : MO_32);
4867 a64_test_cc(&c, cond);
4868 t_zero = tcg_const_i64(0);
4869 tcg_gen_movcond_i64(c.cond, t_true, c.value, t_zero, t_true, t_false);
4870 tcg_temp_free_i64(t_zero);
4871 tcg_temp_free_i64(t_false);
4874 /* Note that sregs write back zeros to the high bits,
4875 and we've already done the zero-extension. */
4876 write_fp_dreg(s, rd, t_true);
4877 tcg_temp_free_i64(t_true);
4880 /* Floating-point data-processing (1 source) - half precision */
4881 static void handle_fp_1src_half(DisasContext *s, int opcode, int rd, int rn)
4883 TCGv_ptr fpst = NULL;
4884 TCGv_i32 tcg_op = tcg_temp_new_i32();
4885 TCGv_i32 tcg_res = tcg_temp_new_i32();
4887 read_vec_element_i32(s, tcg_op, rn, 0, MO_16);
4890 case 0x0: /* FMOV */
4891 tcg_gen_mov_i32(tcg_res, tcg_op);
4893 case 0x1: /* FABS */
4894 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
4896 case 0x2: /* FNEG */
4897 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
4899 case 0x3: /* FSQRT */
4900 gen_helper_sqrt_f16(tcg_res, tcg_op, cpu_env);
4902 case 0x8: /* FRINTN */
4903 case 0x9: /* FRINTP */
4904 case 0xa: /* FRINTM */
4905 case 0xb: /* FRINTZ */
4906 case 0xc: /* FRINTA */
4908 TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
4909 fpst = get_fpstatus_ptr(true);
4911 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
4912 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
4914 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
4915 tcg_temp_free_i32(tcg_rmode);
4918 case 0xe: /* FRINTX */
4919 fpst = get_fpstatus_ptr(true);
4920 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, fpst);
4922 case 0xf: /* FRINTI */
4923 fpst = get_fpstatus_ptr(true);
4924 gen_helper_advsimd_rinth(tcg_res, tcg_op, fpst);
4930 write_fp_sreg(s, rd, tcg_res);
4933 tcg_temp_free_ptr(fpst);
4935 tcg_temp_free_i32(tcg_op);
4936 tcg_temp_free_i32(tcg_res);
4939 /* Floating-point data-processing (1 source) - single precision */
4940 static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
4946 fpst = get_fpstatus_ptr(false);
4947 tcg_op = read_fp_sreg(s, rn);
4948 tcg_res = tcg_temp_new_i32();
4951 case 0x0: /* FMOV */
4952 tcg_gen_mov_i32(tcg_res, tcg_op);
4954 case 0x1: /* FABS */
4955 gen_helper_vfp_abss(tcg_res, tcg_op);
4957 case 0x2: /* FNEG */
4958 gen_helper_vfp_negs(tcg_res, tcg_op);
4960 case 0x3: /* FSQRT */
4961 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
4963 case 0x8: /* FRINTN */
4964 case 0x9: /* FRINTP */
4965 case 0xa: /* FRINTM */
4966 case 0xb: /* FRINTZ */
4967 case 0xc: /* FRINTA */
4969 TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
4971 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
4972 gen_helper_rints(tcg_res, tcg_op, fpst);
4974 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
4975 tcg_temp_free_i32(tcg_rmode);
4978 case 0xe: /* FRINTX */
4979 gen_helper_rints_exact(tcg_res, tcg_op, fpst);
4981 case 0xf: /* FRINTI */
4982 gen_helper_rints(tcg_res, tcg_op, fpst);
4988 write_fp_sreg(s, rd, tcg_res);
4990 tcg_temp_free_ptr(fpst);
4991 tcg_temp_free_i32(tcg_op);
4992 tcg_temp_free_i32(tcg_res);
4995 /* Floating-point data-processing (1 source) - double precision */
4996 static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
5003 case 0x0: /* FMOV */
5004 gen_gvec_fn2(s, false, rd, rn, tcg_gen_gvec_mov, 0);
5008 fpst = get_fpstatus_ptr(false);
5009 tcg_op = read_fp_dreg(s, rn);
5010 tcg_res = tcg_temp_new_i64();
5013 case 0x1: /* FABS */
5014 gen_helper_vfp_absd(tcg_res, tcg_op);
5016 case 0x2: /* FNEG */
5017 gen_helper_vfp_negd(tcg_res, tcg_op);
5019 case 0x3: /* FSQRT */
5020 gen_helper_vfp_sqrtd(tcg_res, tcg_op, cpu_env);
5022 case 0x8: /* FRINTN */
5023 case 0x9: /* FRINTP */
5024 case 0xa: /* FRINTM */
5025 case 0xb: /* FRINTZ */
5026 case 0xc: /* FRINTA */
5028 TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
5030 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
5031 gen_helper_rintd(tcg_res, tcg_op, fpst);
5033 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
5034 tcg_temp_free_i32(tcg_rmode);
5037 case 0xe: /* FRINTX */
5038 gen_helper_rintd_exact(tcg_res, tcg_op, fpst);
5040 case 0xf: /* FRINTI */
5041 gen_helper_rintd(tcg_res, tcg_op, fpst);
5047 write_fp_dreg(s, rd, tcg_res);
5049 tcg_temp_free_ptr(fpst);
5050 tcg_temp_free_i64(tcg_op);
5051 tcg_temp_free_i64(tcg_res);
5054 static void handle_fp_fcvt(DisasContext *s, int opcode,
5055 int rd, int rn, int dtype, int ntype)
5060 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
5062 /* Single to double */
5063 TCGv_i64 tcg_rd = tcg_temp_new_i64();
5064 gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, cpu_env);
5065 write_fp_dreg(s, rd, tcg_rd);
5066 tcg_temp_free_i64(tcg_rd);
5068 /* Single to half */
5069 TCGv_i32 tcg_rd = tcg_temp_new_i32();
5070 gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, cpu_env);
5071 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
5072 write_fp_sreg(s, rd, tcg_rd);
5073 tcg_temp_free_i32(tcg_rd);
5075 tcg_temp_free_i32(tcg_rn);
5080 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
5081 TCGv_i32 tcg_rd = tcg_temp_new_i32();
5083 /* Double to single */
5084 gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, cpu_env);
5086 /* Double to half */
5087 gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, cpu_env);
5088 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
5090 write_fp_sreg(s, rd, tcg_rd);
5091 tcg_temp_free_i32(tcg_rd);
5092 tcg_temp_free_i64(tcg_rn);
5097 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
5098 tcg_gen_ext16u_i32(tcg_rn, tcg_rn);
5100 /* Half to single */
5101 TCGv_i32 tcg_rd = tcg_temp_new_i32();
5102 gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, cpu_env);
5103 write_fp_sreg(s, rd, tcg_rd);
5104 tcg_temp_free_i32(tcg_rd);
5106 /* Half to double */
5107 TCGv_i64 tcg_rd = tcg_temp_new_i64();
5108 gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, cpu_env);
5109 write_fp_dreg(s, rd, tcg_rd);
5110 tcg_temp_free_i64(tcg_rd);
5112 tcg_temp_free_i32(tcg_rn);
5120 /* Floating point data-processing (1 source)
5121 * 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0
5122 * +---+---+---+-----------+------+---+--------+-----------+------+------+
5123 * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd |
5124 * +---+---+---+-----------+------+---+--------+-----------+------+------+
5126 static void disas_fp_1src(DisasContext *s, uint32_t insn)
5128 int type = extract32(insn, 22, 2);
5129 int opcode = extract32(insn, 15, 6);
5130 int rn = extract32(insn, 5, 5);
5131 int rd = extract32(insn, 0, 5);
5134 case 0x4: case 0x5: case 0x7:
5136 /* FCVT between half, single and double precision */
5137 int dtype = extract32(opcode, 0, 2);
5138 if (type == 2 || dtype == type) {
5139 unallocated_encoding(s);
5142 if (!fp_access_check(s)) {
5146 handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
5152 /* 32-to-32 and 64-to-64 ops */
5155 if (!fp_access_check(s)) {
5159 handle_fp_1src_single(s, opcode, rd, rn);
5162 if (!fp_access_check(s)) {
5166 handle_fp_1src_double(s, opcode, rd, rn);
5169 if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
5170 unallocated_encoding(s);
5174 if (!fp_access_check(s)) {
5178 handle_fp_1src_half(s, opcode, rd, rn);
5181 unallocated_encoding(s);
5185 unallocated_encoding(s);
5190 /* Floating-point data-processing (2 source) - single precision */
5191 static void handle_fp_2src_single(DisasContext *s, int opcode,
5192 int rd, int rn, int rm)
5199 tcg_res = tcg_temp_new_i32();
5200 fpst = get_fpstatus_ptr(false);
5201 tcg_op1 = read_fp_sreg(s, rn);
5202 tcg_op2 = read_fp_sreg(s, rm);
5205 case 0x0: /* FMUL */
5206 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
5208 case 0x1: /* FDIV */
5209 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
5211 case 0x2: /* FADD */
5212 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
5214 case 0x3: /* FSUB */
5215 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
5217 case 0x4: /* FMAX */
5218 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
5220 case 0x5: /* FMIN */
5221 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
5223 case 0x6: /* FMAXNM */
5224 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
5226 case 0x7: /* FMINNM */
5227 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
5229 case 0x8: /* FNMUL */
5230 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
5231 gen_helper_vfp_negs(tcg_res, tcg_res);
5235 write_fp_sreg(s, rd, tcg_res);
5237 tcg_temp_free_ptr(fpst);
5238 tcg_temp_free_i32(tcg_op1);
5239 tcg_temp_free_i32(tcg_op2);
5240 tcg_temp_free_i32(tcg_res);
5243 /* Floating-point data-processing (2 source) - double precision */
5244 static void handle_fp_2src_double(DisasContext *s, int opcode,
5245 int rd, int rn, int rm)
5252 tcg_res = tcg_temp_new_i64();
5253 fpst = get_fpstatus_ptr(false);
5254 tcg_op1 = read_fp_dreg(s, rn);
5255 tcg_op2 = read_fp_dreg(s, rm);
5258 case 0x0: /* FMUL */
5259 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
5261 case 0x1: /* FDIV */
5262 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
5264 case 0x2: /* FADD */
5265 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
5267 case 0x3: /* FSUB */
5268 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
5270 case 0x4: /* FMAX */
5271 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
5273 case 0x5: /* FMIN */
5274 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
5276 case 0x6: /* FMAXNM */
5277 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
5279 case 0x7: /* FMINNM */
5280 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
5282 case 0x8: /* FNMUL */
5283 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
5284 gen_helper_vfp_negd(tcg_res, tcg_res);
5288 write_fp_dreg(s, rd, tcg_res);
5290 tcg_temp_free_ptr(fpst);
5291 tcg_temp_free_i64(tcg_op1);
5292 tcg_temp_free_i64(tcg_op2);
5293 tcg_temp_free_i64(tcg_res);
5296 /* Floating point data-processing (2 source)
5297 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
5298 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
5299 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | opcode | 1 0 | Rn | Rd |
5300 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
5302 static void disas_fp_2src(DisasContext *s, uint32_t insn)
5304 int type = extract32(insn, 22, 2);
5305 int rd = extract32(insn, 0, 5);
5306 int rn = extract32(insn, 5, 5);
5307 int rm = extract32(insn, 16, 5);
5308 int opcode = extract32(insn, 12, 4);
5311 unallocated_encoding(s);
5317 if (!fp_access_check(s)) {
5320 handle_fp_2src_single(s, opcode, rd, rn, rm);
5323 if (!fp_access_check(s)) {
5326 handle_fp_2src_double(s, opcode, rd, rn, rm);
5329 unallocated_encoding(s);
5333 /* Floating-point data-processing (3 source) - single precision */
5334 static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1,
5335 int rd, int rn, int rm, int ra)
5337 TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
5338 TCGv_i32 tcg_res = tcg_temp_new_i32();
5339 TCGv_ptr fpst = get_fpstatus_ptr(false);
5341 tcg_op1 = read_fp_sreg(s, rn);
5342 tcg_op2 = read_fp_sreg(s, rm);
5343 tcg_op3 = read_fp_sreg(s, ra);
5345 /* These are fused multiply-add, and must be done as one
5346 * floating point operation with no rounding between the
5347 * multiplication and addition steps.
5348 * NB that doing the negations here as separate steps is
5349 * correct : an input NaN should come out with its sign bit
5350 * flipped if it is a negated-input.
5353 gen_helper_vfp_negs(tcg_op3, tcg_op3);
5357 gen_helper_vfp_negs(tcg_op1, tcg_op1);
5360 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
5362 write_fp_sreg(s, rd, tcg_res);
5364 tcg_temp_free_ptr(fpst);
5365 tcg_temp_free_i32(tcg_op1);
5366 tcg_temp_free_i32(tcg_op2);
5367 tcg_temp_free_i32(tcg_op3);
5368 tcg_temp_free_i32(tcg_res);
5371 /* Floating-point data-processing (3 source) - double precision */
5372 static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1,
5373 int rd, int rn, int rm, int ra)
5375 TCGv_i64 tcg_op1, tcg_op2, tcg_op3;
5376 TCGv_i64 tcg_res = tcg_temp_new_i64();
5377 TCGv_ptr fpst = get_fpstatus_ptr(false);
5379 tcg_op1 = read_fp_dreg(s, rn);
5380 tcg_op2 = read_fp_dreg(s, rm);
5381 tcg_op3 = read_fp_dreg(s, ra);
5383 /* These are fused multiply-add, and must be done as one
5384 * floating point operation with no rounding between the
5385 * multiplication and addition steps.
5386 * NB that doing the negations here as separate steps is
5387 * correct : an input NaN should come out with its sign bit
5388 * flipped if it is a negated-input.
5391 gen_helper_vfp_negd(tcg_op3, tcg_op3);
5395 gen_helper_vfp_negd(tcg_op1, tcg_op1);
5398 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
5400 write_fp_dreg(s, rd, tcg_res);
5402 tcg_temp_free_ptr(fpst);
5403 tcg_temp_free_i64(tcg_op1);
5404 tcg_temp_free_i64(tcg_op2);
5405 tcg_temp_free_i64(tcg_op3);
5406 tcg_temp_free_i64(tcg_res);
5409 /* Floating point data-processing (3 source)
5410 * 31 30 29 28 24 23 22 21 20 16 15 14 10 9 5 4 0
5411 * +---+---+---+-----------+------+----+------+----+------+------+------+
5412 * | M | 0 | S | 1 1 1 1 1 | type | o1 | Rm | o0 | Ra | Rn | Rd |
5413 * +---+---+---+-----------+------+----+------+----+------+------+------+
5415 static void disas_fp_3src(DisasContext *s, uint32_t insn)
5417 int type = extract32(insn, 22, 2);
5418 int rd = extract32(insn, 0, 5);
5419 int rn = extract32(insn, 5, 5);
5420 int ra = extract32(insn, 10, 5);
5421 int rm = extract32(insn, 16, 5);
5422 bool o0 = extract32(insn, 15, 1);
5423 bool o1 = extract32(insn, 21, 1);
5427 if (!fp_access_check(s)) {
5430 handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra);
5433 if (!fp_access_check(s)) {
5436 handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
5439 unallocated_encoding(s);
5443 /* The imm8 encodes the sign bit, enough bits to represent an exponent in
5444 * the range 01....1xx to 10....0xx, and the most significant 4 bits of
5445 * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
5447 static uint64_t vfp_expand_imm(int size, uint8_t imm8)
5453 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
5454 (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) |
5455 extract32(imm8, 0, 6);
5459 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
5460 (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) |
5461 (extract32(imm8, 0, 6) << 3);
5465 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
5466 (extract32(imm8, 6, 1) ? 0x3000 : 0x4000) |
5467 (extract32(imm8, 0, 6) << 6);
5470 g_assert_not_reached();
5475 /* Floating point immediate
5476 * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
5477 * +---+---+---+-----------+------+---+------------+-------+------+------+
5478 * | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd |
5479 * +---+---+---+-----------+------+---+------------+-------+------+------+
5481 static void disas_fp_imm(DisasContext *s, uint32_t insn)
5483 int rd = extract32(insn, 0, 5);
5484 int imm8 = extract32(insn, 13, 8);
5485 int is_double = extract32(insn, 22, 2);
5489 if (is_double > 1) {
5490 unallocated_encoding(s);
5494 if (!fp_access_check(s)) {
5498 imm = vfp_expand_imm(MO_32 + is_double, imm8);
5500 tcg_res = tcg_const_i64(imm);
5501 write_fp_dreg(s, rd, tcg_res);
5502 tcg_temp_free_i64(tcg_res);
5505 /* Handle floating point <=> fixed point conversions. Note that we can
5506 * also deal with fp <=> integer conversions as a special case (scale == 64)
5507 * OPTME: consider handling that special case specially or at least skipping
5508 * the call to scalbn in the helpers for zero shifts.
5510 static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode,
5511 bool itof, int rmode, int scale, int sf, int type)
5513 bool is_signed = !(opcode & 1);
5514 bool is_double = type;
5515 TCGv_ptr tcg_fpstatus;
5518 tcg_fpstatus = get_fpstatus_ptr(false);
5520 tcg_shift = tcg_const_i32(64 - scale);
5523 TCGv_i64 tcg_int = cpu_reg(s, rn);
5525 TCGv_i64 tcg_extend = new_tmp_a64(s);
5528 tcg_gen_ext32s_i64(tcg_extend, tcg_int);
5530 tcg_gen_ext32u_i64(tcg_extend, tcg_int);
5533 tcg_int = tcg_extend;
5537 TCGv_i64 tcg_double = tcg_temp_new_i64();
5539 gen_helper_vfp_sqtod(tcg_double, tcg_int,
5540 tcg_shift, tcg_fpstatus);
5542 gen_helper_vfp_uqtod(tcg_double, tcg_int,
5543 tcg_shift, tcg_fpstatus);
5545 write_fp_dreg(s, rd, tcg_double);
5546 tcg_temp_free_i64(tcg_double);
5548 TCGv_i32 tcg_single = tcg_temp_new_i32();
5550 gen_helper_vfp_sqtos(tcg_single, tcg_int,
5551 tcg_shift, tcg_fpstatus);
5553 gen_helper_vfp_uqtos(tcg_single, tcg_int,
5554 tcg_shift, tcg_fpstatus);
5556 write_fp_sreg(s, rd, tcg_single);
5557 tcg_temp_free_i32(tcg_single);
5560 TCGv_i64 tcg_int = cpu_reg(s, rd);
5563 if (extract32(opcode, 2, 1)) {
5564 /* There are too many rounding modes to all fit into rmode,
5565 * so FCVTA[US] is a special case.
5567 rmode = FPROUNDING_TIEAWAY;
5570 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
5572 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
5575 TCGv_i64 tcg_double = read_fp_dreg(s, rn);
5578 gen_helper_vfp_tosld(tcg_int, tcg_double,
5579 tcg_shift, tcg_fpstatus);
5581 gen_helper_vfp_tosqd(tcg_int, tcg_double,
5582 tcg_shift, tcg_fpstatus);
5586 gen_helper_vfp_tould(tcg_int, tcg_double,
5587 tcg_shift, tcg_fpstatus);
5589 gen_helper_vfp_touqd(tcg_int, tcg_double,
5590 tcg_shift, tcg_fpstatus);
5593 tcg_temp_free_i64(tcg_double);
5595 TCGv_i32 tcg_single = read_fp_sreg(s, rn);
5598 gen_helper_vfp_tosqs(tcg_int, tcg_single,
5599 tcg_shift, tcg_fpstatus);
5601 gen_helper_vfp_touqs(tcg_int, tcg_single,
5602 tcg_shift, tcg_fpstatus);
5605 TCGv_i32 tcg_dest = tcg_temp_new_i32();
5607 gen_helper_vfp_tosls(tcg_dest, tcg_single,
5608 tcg_shift, tcg_fpstatus);
5610 gen_helper_vfp_touls(tcg_dest, tcg_single,
5611 tcg_shift, tcg_fpstatus);
5613 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
5614 tcg_temp_free_i32(tcg_dest);
5616 tcg_temp_free_i32(tcg_single);
5619 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
5620 tcg_temp_free_i32(tcg_rmode);
5623 tcg_gen_ext32u_i64(tcg_int, tcg_int);
5627 tcg_temp_free_ptr(tcg_fpstatus);
5628 tcg_temp_free_i32(tcg_shift);
5631 /* Floating point <-> fixed point conversions
5632 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
5633 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
5634 * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd |
5635 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
5637 static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
5639 int rd = extract32(insn, 0, 5);
5640 int rn = extract32(insn, 5, 5);
5641 int scale = extract32(insn, 10, 6);
5642 int opcode = extract32(insn, 16, 3);
5643 int rmode = extract32(insn, 19, 2);
5644 int type = extract32(insn, 22, 2);
5645 bool sbit = extract32(insn, 29, 1);
5646 bool sf = extract32(insn, 31, 1);
5649 if (sbit || (type > 1)
5650 || (!sf && scale < 32)) {
5651 unallocated_encoding(s);
5655 switch ((rmode << 3) | opcode) {
5656 case 0x2: /* SCVTF */
5657 case 0x3: /* UCVTF */
5660 case 0x18: /* FCVTZS */
5661 case 0x19: /* FCVTZU */
5665 unallocated_encoding(s);
5669 if (!fp_access_check(s)) {
5673 handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type);
5676 static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
5678 /* FMOV: gpr to or from float, double, or top half of quad fp reg,
5679 * without conversion.
5683 TCGv_i64 tcg_rn = cpu_reg(s, rn);
5689 tmp = tcg_temp_new_i64();
5690 tcg_gen_ext32u_i64(tmp, tcg_rn);
5691 write_fp_dreg(s, rd, tmp);
5692 tcg_temp_free_i64(tmp);
5696 write_fp_dreg(s, rd, tcg_rn);
5699 /* 64 bit to top half. */
5700 tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(s, rd));
5701 clear_vec_high(s, true, rd);
5705 TCGv_i64 tcg_rd = cpu_reg(s, rd);
5710 tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_32));
5714 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_64));
5717 /* 64 bits from top half */
5718 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(s, rn));
5724 /* Floating point <-> integer conversions
5725 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
5726 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
5727 * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
5728 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
5730 static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
5732 int rd = extract32(insn, 0, 5);
5733 int rn = extract32(insn, 5, 5);
5734 int opcode = extract32(insn, 16, 3);
5735 int rmode = extract32(insn, 19, 2);
5736 int type = extract32(insn, 22, 2);
5737 bool sbit = extract32(insn, 29, 1);
5738 bool sf = extract32(insn, 31, 1);
5741 unallocated_encoding(s);
5747 bool itof = opcode & 1;
5750 unallocated_encoding(s);
5754 switch (sf << 3 | type << 1 | rmode) {
5755 case 0x0: /* 32 bit */
5756 case 0xa: /* 64 bit */
5757 case 0xd: /* 64 bit to top half of quad */
5760 /* all other sf/type/rmode combinations are invalid */
5761 unallocated_encoding(s);
5765 if (!fp_access_check(s)) {
5768 handle_fmov(s, rd, rn, type, itof);
5770 /* actual FP conversions */
5771 bool itof = extract32(opcode, 1, 1);
5773 if (type > 1 || (rmode != 0 && opcode > 1)) {
5774 unallocated_encoding(s);
5778 if (!fp_access_check(s)) {
5781 handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
5785 /* FP-specific subcases of table C3-6 (SIMD and FP data processing)
5786 * 31 30 29 28 25 24 0
5787 * +---+---+---+---------+-----------------------------+
5788 * | | 0 | | 1 1 1 1 | |
5789 * +---+---+---+---------+-----------------------------+
5791 static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
5793 if (extract32(insn, 24, 1)) {
5794 /* Floating point data-processing (3 source) */
5795 disas_fp_3src(s, insn);
5796 } else if (extract32(insn, 21, 1) == 0) {
5797 /* Floating point to fixed point conversions */
5798 disas_fp_fixed_conv(s, insn);
5800 switch (extract32(insn, 10, 2)) {
5802 /* Floating point conditional compare */
5803 disas_fp_ccomp(s, insn);
5806 /* Floating point data-processing (2 source) */
5807 disas_fp_2src(s, insn);
5810 /* Floating point conditional select */
5811 disas_fp_csel(s, insn);
5814 switch (ctz32(extract32(insn, 12, 4))) {
5815 case 0: /* [15:12] == xxx1 */
5816 /* Floating point immediate */
5817 disas_fp_imm(s, insn);
5819 case 1: /* [15:12] == xx10 */
5820 /* Floating point compare */
5821 disas_fp_compare(s, insn);
5823 case 2: /* [15:12] == x100 */
5824 /* Floating point data-processing (1 source) */
5825 disas_fp_1src(s, insn);
5827 case 3: /* [15:12] == 1000 */
5828 unallocated_encoding(s);
5830 default: /* [15:12] == 0000 */
5831 /* Floating point <-> integer conversions */
5832 disas_fp_int_conv(s, insn);
5840 static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right,
5843 /* Extract 64 bits from the middle of two concatenated 64 bit
5844 * vector register slices left:right. The extracted bits start
5845 * at 'pos' bits into the right (least significant) side.
5846 * We return the result in tcg_right, and guarantee not to
5849 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
5850 assert(pos > 0 && pos < 64);
5852 tcg_gen_shri_i64(tcg_right, tcg_right, pos);
5853 tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos);
5854 tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp);
5856 tcg_temp_free_i64(tcg_tmp);
5860 * 31 30 29 24 23 22 21 20 16 15 14 11 10 9 5 4 0
5861 * +---+---+-------------+-----+---+------+---+------+---+------+------+
5862 * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 | Rm | 0 | imm4 | 0 | Rn | Rd |
5863 * +---+---+-------------+-----+---+------+---+------+---+------+------+
5865 static void disas_simd_ext(DisasContext *s, uint32_t insn)
5867 int is_q = extract32(insn, 30, 1);
5868 int op2 = extract32(insn, 22, 2);
5869 int imm4 = extract32(insn, 11, 4);
5870 int rm = extract32(insn, 16, 5);
5871 int rn = extract32(insn, 5, 5);
5872 int rd = extract32(insn, 0, 5);
5873 int pos = imm4 << 3;
5874 TCGv_i64 tcg_resl, tcg_resh;
5876 if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) {
5877 unallocated_encoding(s);
5881 if (!fp_access_check(s)) {
5885 tcg_resh = tcg_temp_new_i64();
5886 tcg_resl = tcg_temp_new_i64();
5888 /* Vd gets bits starting at pos bits into Vm:Vn. This is
5889 * either extracting 128 bits from a 128:128 concatenation, or
5890 * extracting 64 bits from a 64:64 concatenation.
5893 read_vec_element(s, tcg_resl, rn, 0, MO_64);
5895 read_vec_element(s, tcg_resh, rm, 0, MO_64);
5896 do_ext64(s, tcg_resh, tcg_resl, pos);
5898 tcg_gen_movi_i64(tcg_resh, 0);
5905 EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} };
5906 EltPosns *elt = eltposns;
5913 read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64);
5915 read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64);
5918 do_ext64(s, tcg_resh, tcg_resl, pos);
5919 tcg_hh = tcg_temp_new_i64();
5920 read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64);
5921 do_ext64(s, tcg_hh, tcg_resh, pos);
5922 tcg_temp_free_i64(tcg_hh);
5926 write_vec_element(s, tcg_resl, rd, 0, MO_64);
5927 tcg_temp_free_i64(tcg_resl);
5928 write_vec_element(s, tcg_resh, rd, 1, MO_64);
5929 tcg_temp_free_i64(tcg_resh);
5933 * 31 30 29 24 23 22 21 20 16 15 14 13 12 11 10 9 5 4 0
5934 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
5935 * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 | Rm | 0 | len | op | 0 0 | Rn | Rd |
5936 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
5938 static void disas_simd_tb(DisasContext *s, uint32_t insn)
5940 int op2 = extract32(insn, 22, 2);
5941 int is_q = extract32(insn, 30, 1);
5942 int rm = extract32(insn, 16, 5);
5943 int rn = extract32(insn, 5, 5);
5944 int rd = extract32(insn, 0, 5);
5945 int is_tblx = extract32(insn, 12, 1);
5946 int len = extract32(insn, 13, 2);
5947 TCGv_i64 tcg_resl, tcg_resh, tcg_idx;
5948 TCGv_i32 tcg_regno, tcg_numregs;
5951 unallocated_encoding(s);
5955 if (!fp_access_check(s)) {
5959 /* This does a table lookup: for every byte element in the input
5960 * we index into a table formed from up to four vector registers,
5961 * and then the output is the result of the lookups. Our helper
5962 * function does the lookup operation for a single 64 bit part of
5965 tcg_resl = tcg_temp_new_i64();
5966 tcg_resh = tcg_temp_new_i64();
5969 read_vec_element(s, tcg_resl, rd, 0, MO_64);
5971 tcg_gen_movi_i64(tcg_resl, 0);
5973 if (is_tblx && is_q) {
5974 read_vec_element(s, tcg_resh, rd, 1, MO_64);
5976 tcg_gen_movi_i64(tcg_resh, 0);
5979 tcg_idx = tcg_temp_new_i64();
5980 tcg_regno = tcg_const_i32(rn);
5981 tcg_numregs = tcg_const_i32(len + 1);
5982 read_vec_element(s, tcg_idx, rm, 0, MO_64);
5983 gen_helper_simd_tbl(tcg_resl, cpu_env, tcg_resl, tcg_idx,
5984 tcg_regno, tcg_numregs);
5986 read_vec_element(s, tcg_idx, rm, 1, MO_64);
5987 gen_helper_simd_tbl(tcg_resh, cpu_env, tcg_resh, tcg_idx,
5988 tcg_regno, tcg_numregs);
5990 tcg_temp_free_i64(tcg_idx);
5991 tcg_temp_free_i32(tcg_regno);
5992 tcg_temp_free_i32(tcg_numregs);
5994 write_vec_element(s, tcg_resl, rd, 0, MO_64);
5995 tcg_temp_free_i64(tcg_resl);
5996 write_vec_element(s, tcg_resh, rd, 1, MO_64);
5997 tcg_temp_free_i64(tcg_resh);
6001 * 31 30 29 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
6002 * +---+---+-------------+------+---+------+---+------------------+------+
6003 * | 0 | Q | 0 0 1 1 1 0 | size | 0 | Rm | 0 | opc | 1 0 | Rn | Rd |
6004 * +---+---+-------------+------+---+------+---+------------------+------+
6006 static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
6008 int rd = extract32(insn, 0, 5);
6009 int rn = extract32(insn, 5, 5);
6010 int rm = extract32(insn, 16, 5);
6011 int size = extract32(insn, 22, 2);
6012 /* opc field bits [1:0] indicate ZIP/UZP/TRN;
6013 * bit 2 indicates 1 vs 2 variant of the insn.
6015 int opcode = extract32(insn, 12, 2);
6016 bool part = extract32(insn, 14, 1);
6017 bool is_q = extract32(insn, 30, 1);
6018 int esize = 8 << size;
6020 int datasize = is_q ? 128 : 64;
6021 int elements = datasize / esize;
6022 TCGv_i64 tcg_res, tcg_resl, tcg_resh;
6024 if (opcode == 0 || (size == 3 && !is_q)) {
6025 unallocated_encoding(s);
6029 if (!fp_access_check(s)) {
6033 tcg_resl = tcg_const_i64(0);
6034 tcg_resh = tcg_const_i64(0);
6035 tcg_res = tcg_temp_new_i64();
6037 for (i = 0; i < elements; i++) {
6039 case 1: /* UZP1/2 */
6041 int midpoint = elements / 2;
6043 read_vec_element(s, tcg_res, rn, 2 * i + part, size);
6045 read_vec_element(s, tcg_res, rm,
6046 2 * (i - midpoint) + part, size);
6050 case 2: /* TRN1/2 */
6052 read_vec_element(s, tcg_res, rm, (i & ~1) + part, size);
6054 read_vec_element(s, tcg_res, rn, (i & ~1) + part, size);
6057 case 3: /* ZIP1/2 */
6059 int base = part * elements / 2;
6061 read_vec_element(s, tcg_res, rm, base + (i >> 1), size);
6063 read_vec_element(s, tcg_res, rn, base + (i >> 1), size);
6068 g_assert_not_reached();
6073 tcg_gen_shli_i64(tcg_res, tcg_res, ofs);
6074 tcg_gen_or_i64(tcg_resl, tcg_resl, tcg_res);
6076 tcg_gen_shli_i64(tcg_res, tcg_res, ofs - 64);
6077 tcg_gen_or_i64(tcg_resh, tcg_resh, tcg_res);
6081 tcg_temp_free_i64(tcg_res);
6083 write_vec_element(s, tcg_resl, rd, 0, MO_64);
6084 tcg_temp_free_i64(tcg_resl);
6085 write_vec_element(s, tcg_resh, rd, 1, MO_64);
6086 tcg_temp_free_i64(tcg_resh);
6090 * do_reduction_op helper
6092 * This mirrors the Reduce() pseudocode in the ARM ARM. It is
6093 * important for correct NaN propagation that we do these
6094 * operations in exactly the order specified by the pseudocode.
6096 * This is a recursive function, TCG temps should be freed by the
6097 * calling function once it is done with the values.
6099 static TCGv_i32 do_reduction_op(DisasContext *s, int fpopcode, int rn,
6100 int esize, int size, int vmap, TCGv_ptr fpst)
6102 if (esize == size) {
6104 TCGMemOp msize = esize == 16 ? MO_16 : MO_32;
6107 /* We should have one register left here */
6108 assert(ctpop8(vmap) == 1);
6109 element = ctz32(vmap);
6110 assert(element < 8);
6112 tcg_elem = tcg_temp_new_i32();
6113 read_vec_element_i32(s, tcg_elem, rn, element, msize);
6116 int bits = size / 2;
6117 int shift = ctpop8(vmap) / 2;
6118 int vmap_lo = (vmap >> shift) & vmap;
6119 int vmap_hi = (vmap & ~vmap_lo);
6120 TCGv_i32 tcg_hi, tcg_lo, tcg_res;
6122 tcg_hi = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_hi, fpst);
6123 tcg_lo = do_reduction_op(s, fpopcode, rn, esize, bits, vmap_lo, fpst);
6124 tcg_res = tcg_temp_new_i32();
6127 case 0x0c: /* fmaxnmv half-precision */
6128 gen_helper_advsimd_maxnumh(tcg_res, tcg_lo, tcg_hi, fpst);
6130 case 0x0f: /* fmaxv half-precision */
6131 gen_helper_advsimd_maxh(tcg_res, tcg_lo, tcg_hi, fpst);
6133 case 0x1c: /* fminnmv half-precision */
6134 gen_helper_advsimd_minnumh(tcg_res, tcg_lo, tcg_hi, fpst);
6136 case 0x1f: /* fminv half-precision */
6137 gen_helper_advsimd_minh(tcg_res, tcg_lo, tcg_hi, fpst);
6139 case 0x2c: /* fmaxnmv */
6140 gen_helper_vfp_maxnums(tcg_res, tcg_lo, tcg_hi, fpst);
6142 case 0x2f: /* fmaxv */
6143 gen_helper_vfp_maxs(tcg_res, tcg_lo, tcg_hi, fpst);
6145 case 0x3c: /* fminnmv */
6146 gen_helper_vfp_minnums(tcg_res, tcg_lo, tcg_hi, fpst);
6148 case 0x3f: /* fminv */
6149 gen_helper_vfp_mins(tcg_res, tcg_lo, tcg_hi, fpst);
6152 g_assert_not_reached();
6155 tcg_temp_free_i32(tcg_hi);
6156 tcg_temp_free_i32(tcg_lo);
6161 /* AdvSIMD across lanes
6162 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
6163 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
6164 * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
6165 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
6167 static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
6169 int rd = extract32(insn, 0, 5);
6170 int rn = extract32(insn, 5, 5);
6171 int size = extract32(insn, 22, 2);
6172 int opcode = extract32(insn, 12, 5);
6173 bool is_q = extract32(insn, 30, 1);
6174 bool is_u = extract32(insn, 29, 1);
6176 bool is_min = false;
6180 TCGv_i64 tcg_res, tcg_elt;
6183 case 0x1b: /* ADDV */
6185 unallocated_encoding(s);
6189 case 0x3: /* SADDLV, UADDLV */
6190 case 0xa: /* SMAXV, UMAXV */
6191 case 0x1a: /* SMINV, UMINV */
6192 if (size == 3 || (size == 2 && !is_q)) {
6193 unallocated_encoding(s);
6197 case 0xc: /* FMAXNMV, FMINNMV */
6198 case 0xf: /* FMAXV, FMINV */
6199 /* Bit 1 of size field encodes min vs max and the actual size
6200 * depends on the encoding of the U bit. If not set (and FP16
6201 * enabled) then we do half-precision float instead of single
6204 is_min = extract32(size, 1, 1);
6206 if (!is_u && arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
6208 } else if (!is_u || !is_q || extract32(size, 0, 1)) {
6209 unallocated_encoding(s);
6216 unallocated_encoding(s);
6220 if (!fp_access_check(s)) {
6225 elements = (is_q ? 128 : 64) / esize;
6227 tcg_res = tcg_temp_new_i64();
6228 tcg_elt = tcg_temp_new_i64();
6230 /* These instructions operate across all lanes of a vector
6231 * to produce a single result. We can guarantee that a 64
6232 * bit intermediate is sufficient:
6233 * + for [US]ADDLV the maximum element size is 32 bits, and
6234 * the result type is 64 bits
6235 * + for FMAX*V, FMIN*V, ADDV the intermediate type is the
6236 * same as the element size, which is 32 bits at most
6237 * For the integer operations we can choose to work at 64
6238 * or 32 bits and truncate at the end; for simplicity
6239 * we use 64 bits always. The floating point
6240 * ops do require 32 bit intermediates, though.
6243 read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN));
6245 for (i = 1; i < elements; i++) {
6246 read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN));
6249 case 0x03: /* SADDLV / UADDLV */
6250 case 0x1b: /* ADDV */
6251 tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
6253 case 0x0a: /* SMAXV / UMAXV */
6255 tcg_gen_umax_i64(tcg_res, tcg_res, tcg_elt);
6257 tcg_gen_smax_i64(tcg_res, tcg_res, tcg_elt);
6260 case 0x1a: /* SMINV / UMINV */
6262 tcg_gen_umin_i64(tcg_res, tcg_res, tcg_elt);
6264 tcg_gen_smin_i64(tcg_res, tcg_res, tcg_elt);
6268 g_assert_not_reached();
6273 /* Floating point vector reduction ops which work across 32
6274 * bit (single) or 16 bit (half-precision) intermediates.
6275 * Note that correct NaN propagation requires that we do these
6276 * operations in exactly the order specified by the pseudocode.
6278 TCGv_ptr fpst = get_fpstatus_ptr(size == MO_16);
6279 int fpopcode = opcode | is_min << 4 | is_u << 5;
6280 int vmap = (1 << elements) - 1;
6281 TCGv_i32 tcg_res32 = do_reduction_op(s, fpopcode, rn, esize,
6282 (is_q ? 128 : 64), vmap, fpst);
6283 tcg_gen_extu_i32_i64(tcg_res, tcg_res32);
6284 tcg_temp_free_i32(tcg_res32);
6285 tcg_temp_free_ptr(fpst);
6288 tcg_temp_free_i64(tcg_elt);
6290 /* Now truncate the result to the width required for the final output */
6291 if (opcode == 0x03) {
6292 /* SADDLV, UADDLV: result is 2*esize */
6298 tcg_gen_ext8u_i64(tcg_res, tcg_res);
6301 tcg_gen_ext16u_i64(tcg_res, tcg_res);
6304 tcg_gen_ext32u_i64(tcg_res, tcg_res);
6309 g_assert_not_reached();
6312 write_fp_dreg(s, rd, tcg_res);
6313 tcg_temp_free_i64(tcg_res);
6316 /* DUP (Element, Vector)
6318 * 31 30 29 21 20 16 15 10 9 5 4 0
6319 * +---+---+-------------------+--------+-------------+------+------+
6320 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
6321 * +---+---+-------------------+--------+-------------+------+------+
6323 * size: encoded in imm5 (see ARM ARM LowestSetBit())
6325 static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn,
6328 int size = ctz32(imm5);
6329 int index = imm5 >> (size + 1);
6331 if (size > 3 || (size == 3 && !is_q)) {
6332 unallocated_encoding(s);
6336 if (!fp_access_check(s)) {
6340 tcg_gen_gvec_dup_mem(size, vec_full_reg_offset(s, rd),
6341 vec_reg_offset(s, rn, index, size),
6342 is_q ? 16 : 8, vec_full_reg_size(s));
6345 /* DUP (element, scalar)
6346 * 31 21 20 16 15 10 9 5 4 0
6347 * +-----------------------+--------+-------------+------+------+
6348 * | 0 1 0 1 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
6349 * +-----------------------+--------+-------------+------+------+
6351 static void handle_simd_dupes(DisasContext *s, int rd, int rn,
6354 int size = ctz32(imm5);
6359 unallocated_encoding(s);
6363 if (!fp_access_check(s)) {
6367 index = imm5 >> (size + 1);
6369 /* This instruction just extracts the specified element and
6370 * zero-extends it into the bottom of the destination register.
6372 tmp = tcg_temp_new_i64();
6373 read_vec_element(s, tmp, rn, index, size);
6374 write_fp_dreg(s, rd, tmp);
6375 tcg_temp_free_i64(tmp);
6380 * 31 30 29 21 20 16 15 10 9 5 4 0
6381 * +---+---+-------------------+--------+-------------+------+------+
6382 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 1 1 | Rn | Rd |
6383 * +---+---+-------------------+--------+-------------+------+------+
6385 * size: encoded in imm5 (see ARM ARM LowestSetBit())
6387 static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn,
6390 int size = ctz32(imm5);
6391 uint32_t dofs, oprsz, maxsz;
6393 if (size > 3 || ((size == 3) && !is_q)) {
6394 unallocated_encoding(s);
6398 if (!fp_access_check(s)) {
6402 dofs = vec_full_reg_offset(s, rd);
6403 oprsz = is_q ? 16 : 8;
6404 maxsz = vec_full_reg_size(s);
6406 tcg_gen_gvec_dup_i64(size, dofs, oprsz, maxsz, cpu_reg(s, rn));
6411 * 31 21 20 16 15 14 11 10 9 5 4 0
6412 * +-----------------------+--------+------------+---+------+------+
6413 * | 0 1 1 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
6414 * +-----------------------+--------+------------+---+------+------+
6416 * size: encoded in imm5 (see ARM ARM LowestSetBit())
6417 * index: encoded in imm5<4:size+1>
6419 static void handle_simd_inse(DisasContext *s, int rd, int rn,
6422 int size = ctz32(imm5);
6423 int src_index, dst_index;
6427 unallocated_encoding(s);
6431 if (!fp_access_check(s)) {
6435 dst_index = extract32(imm5, 1+size, 5);
6436 src_index = extract32(imm4, size, 4);
6438 tmp = tcg_temp_new_i64();
6440 read_vec_element(s, tmp, rn, src_index, size);
6441 write_vec_element(s, tmp, rd, dst_index, size);
6443 tcg_temp_free_i64(tmp);
6449 * 31 21 20 16 15 10 9 5 4 0
6450 * +-----------------------+--------+-------------+------+------+
6451 * | 0 1 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 1 1 1 | Rn | Rd |
6452 * +-----------------------+--------+-------------+------+------+
6454 * size: encoded in imm5 (see ARM ARM LowestSetBit())
6455 * index: encoded in imm5<4:size+1>
6457 static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5)
6459 int size = ctz32(imm5);
6463 unallocated_encoding(s);
6467 if (!fp_access_check(s)) {
6471 idx = extract32(imm5, 1 + size, 4 - size);
6472 write_vec_element(s, cpu_reg(s, rn), rd, idx, size);
6479 * 31 30 29 21 20 16 15 12 10 9 5 4 0
6480 * +---+---+-------------------+--------+-------------+------+------+
6481 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 1 U 1 1 | Rn | Rd |
6482 * +---+---+-------------------+--------+-------------+------+------+
6484 * U: unsigned when set
6485 * size: encoded in imm5 (see ARM ARM LowestSetBit())
6487 static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed,
6488 int rn, int rd, int imm5)
6490 int size = ctz32(imm5);
6494 /* Check for UnallocatedEncodings */
6496 if (size > 2 || (size == 2 && !is_q)) {
6497 unallocated_encoding(s);
6502 || (size < 3 && is_q)
6503 || (size == 3 && !is_q)) {
6504 unallocated_encoding(s);
6509 if (!fp_access_check(s)) {
6513 element = extract32(imm5, 1+size, 4);
6515 tcg_rd = cpu_reg(s, rd);
6516 read_vec_element(s, tcg_rd, rn, element, size | (is_signed ? MO_SIGN : 0));
6517 if (is_signed && !is_q) {
6518 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
6523 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
6524 * +---+---+----+-----------------+------+---+------+---+------+------+
6525 * | 0 | Q | op | 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
6526 * +---+---+----+-----------------+------+---+------+---+------+------+
6528 static void disas_simd_copy(DisasContext *s, uint32_t insn)
6530 int rd = extract32(insn, 0, 5);
6531 int rn = extract32(insn, 5, 5);
6532 int imm4 = extract32(insn, 11, 4);
6533 int op = extract32(insn, 29, 1);
6534 int is_q = extract32(insn, 30, 1);
6535 int imm5 = extract32(insn, 16, 5);
6540 handle_simd_inse(s, rd, rn, imm4, imm5);
6542 unallocated_encoding(s);
6547 /* DUP (element - vector) */
6548 handle_simd_dupe(s, is_q, rd, rn, imm5);
6552 handle_simd_dupg(s, is_q, rd, rn, imm5);
6557 handle_simd_insg(s, rd, rn, imm5);
6559 unallocated_encoding(s);
6564 /* UMOV/SMOV (is_q indicates 32/64; imm4 indicates signedness) */
6565 handle_simd_umov_smov(s, is_q, (imm4 == 5), rn, rd, imm5);
6568 unallocated_encoding(s);
6574 /* AdvSIMD modified immediate
6575 * 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0
6576 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
6577 * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd |
6578 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
6580 * There are a number of operations that can be carried out here:
6581 * MOVI - move (shifted) imm into register
6582 * MVNI - move inverted (shifted) imm into register
6583 * ORR - bitwise OR of (shifted) imm with register
6584 * BIC - bitwise clear of (shifted) imm with register
6585 * With ARMv8.2 we also have:
6586 * FMOV half-precision
6588 static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
6590 int rd = extract32(insn, 0, 5);
6591 int cmode = extract32(insn, 12, 4);
6592 int cmode_3_1 = extract32(cmode, 1, 3);
6593 int cmode_0 = extract32(cmode, 0, 1);
6594 int o2 = extract32(insn, 11, 1);
6595 uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
6596 bool is_neg = extract32(insn, 29, 1);
6597 bool is_q = extract32(insn, 30, 1);
6600 if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
6601 /* Check for FMOV (vector, immediate) - half-precision */
6602 if (!(arm_dc_feature(s, ARM_FEATURE_V8_FP16) && o2 && cmode == 0xf)) {
6603 unallocated_encoding(s);
6608 if (!fp_access_check(s)) {
6612 /* See AdvSIMDExpandImm() in ARM ARM */
6613 switch (cmode_3_1) {
6614 case 0: /* Replicate(Zeros(24):imm8, 2) */
6615 case 1: /* Replicate(Zeros(16):imm8:Zeros(8), 2) */
6616 case 2: /* Replicate(Zeros(8):imm8:Zeros(16), 2) */
6617 case 3: /* Replicate(imm8:Zeros(24), 2) */
6619 int shift = cmode_3_1 * 8;
6620 imm = bitfield_replicate(abcdefgh << shift, 32);
6623 case 4: /* Replicate(Zeros(8):imm8, 4) */
6624 case 5: /* Replicate(imm8:Zeros(8), 4) */
6626 int shift = (cmode_3_1 & 0x1) * 8;
6627 imm = bitfield_replicate(abcdefgh << shift, 16);
6632 /* Replicate(Zeros(8):imm8:Ones(16), 2) */
6633 imm = (abcdefgh << 16) | 0xffff;
6635 /* Replicate(Zeros(16):imm8:Ones(8), 2) */
6636 imm = (abcdefgh << 8) | 0xff;
6638 imm = bitfield_replicate(imm, 32);
6641 if (!cmode_0 && !is_neg) {
6642 imm = bitfield_replicate(abcdefgh, 8);
6643 } else if (!cmode_0 && is_neg) {
6646 for (i = 0; i < 8; i++) {
6647 if ((abcdefgh) & (1 << i)) {
6648 imm |= 0xffULL << (i * 8);
6651 } else if (cmode_0) {
6653 imm = (abcdefgh & 0x3f) << 48;
6654 if (abcdefgh & 0x80) {
6655 imm |= 0x8000000000000000ULL;
6657 if (abcdefgh & 0x40) {
6658 imm |= 0x3fc0000000000000ULL;
6660 imm |= 0x4000000000000000ULL;
6664 /* FMOV (vector, immediate) - half-precision */
6665 imm = vfp_expand_imm(MO_16, abcdefgh);
6666 /* now duplicate across the lanes */
6667 imm = bitfield_replicate(imm, 16);
6669 imm = (abcdefgh & 0x3f) << 19;
6670 if (abcdefgh & 0x80) {
6673 if (abcdefgh & 0x40) {
6684 fprintf(stderr, "%s: cmode_3_1: %x\n", __func__, cmode_3_1);
6685 g_assert_not_reached();
6688 if (cmode_3_1 != 7 && is_neg) {
6692 if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
6693 /* MOVI or MVNI, with MVNI negation handled above. */
6694 tcg_gen_gvec_dup64i(vec_full_reg_offset(s, rd), is_q ? 16 : 8,
6695 vec_full_reg_size(s), imm);
6697 /* ORR or BIC, with BIC negation to AND handled above. */
6699 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_andi, MO_64);
6701 gen_gvec_fn2i(s, is_q, rd, rd, imm, tcg_gen_gvec_ori, MO_64);
6706 /* AdvSIMD scalar copy
6707 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
6708 * +-----+----+-----------------+------+---+------+---+------+------+
6709 * | 0 1 | op | 1 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
6710 * +-----+----+-----------------+------+---+------+---+------+------+
6712 static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn)
6714 int rd = extract32(insn, 0, 5);
6715 int rn = extract32(insn, 5, 5);
6716 int imm4 = extract32(insn, 11, 4);
6717 int imm5 = extract32(insn, 16, 5);
6718 int op = extract32(insn, 29, 1);
6720 if (op != 0 || imm4 != 0) {
6721 unallocated_encoding(s);
6725 /* DUP (element, scalar) */
6726 handle_simd_dupes(s, rd, rn, imm5);
6729 /* AdvSIMD scalar pairwise
6730 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
6731 * +-----+---+-----------+------+-----------+--------+-----+------+------+
6732 * | 0 1 | U | 1 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
6733 * +-----+---+-----------+------+-----------+--------+-----+------+------+
6735 static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
6737 int u = extract32(insn, 29, 1);
6738 int size = extract32(insn, 22, 2);
6739 int opcode = extract32(insn, 12, 5);
6740 int rn = extract32(insn, 5, 5);
6741 int rd = extract32(insn, 0, 5);
6744 /* For some ops (the FP ones), size[1] is part of the encoding.
6745 * For ADDP strictly it is not but size[1] is always 1 for valid
6748 opcode |= (extract32(size, 1, 1) << 5);
6751 case 0x3b: /* ADDP */
6752 if (u || size != 3) {
6753 unallocated_encoding(s);
6756 if (!fp_access_check(s)) {
6762 case 0xc: /* FMAXNMP */
6763 case 0xd: /* FADDP */
6764 case 0xf: /* FMAXP */
6765 case 0x2c: /* FMINNMP */
6766 case 0x2f: /* FMINP */
6767 /* FP op, size[0] is 32 or 64 bit*/
6769 if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
6770 unallocated_encoding(s);
6776 size = extract32(size, 0, 1) ? MO_64 : MO_32;
6779 if (!fp_access_check(s)) {
6783 fpst = get_fpstatus_ptr(size == MO_16);
6786 unallocated_encoding(s);
6790 if (size == MO_64) {
6791 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
6792 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
6793 TCGv_i64 tcg_res = tcg_temp_new_i64();
6795 read_vec_element(s, tcg_op1, rn, 0, MO_64);
6796 read_vec_element(s, tcg_op2, rn, 1, MO_64);
6799 case 0x3b: /* ADDP */
6800 tcg_gen_add_i64(tcg_res, tcg_op1, tcg_op2);
6802 case 0xc: /* FMAXNMP */
6803 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6805 case 0xd: /* FADDP */
6806 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
6808 case 0xf: /* FMAXP */
6809 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
6811 case 0x2c: /* FMINNMP */
6812 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
6814 case 0x2f: /* FMINP */
6815 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
6818 g_assert_not_reached();
6821 write_fp_dreg(s, rd, tcg_res);
6823 tcg_temp_free_i64(tcg_op1);
6824 tcg_temp_free_i64(tcg_op2);
6825 tcg_temp_free_i64(tcg_res);
6827 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
6828 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
6829 TCGv_i32 tcg_res = tcg_temp_new_i32();
6831 read_vec_element_i32(s, tcg_op1, rn, 0, size);
6832 read_vec_element_i32(s, tcg_op2, rn, 1, size);
6834 if (size == MO_16) {
6836 case 0xc: /* FMAXNMP */
6837 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
6839 case 0xd: /* FADDP */
6840 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
6842 case 0xf: /* FMAXP */
6843 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
6845 case 0x2c: /* FMINNMP */
6846 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
6848 case 0x2f: /* FMINP */
6849 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
6852 g_assert_not_reached();
6856 case 0xc: /* FMAXNMP */
6857 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
6859 case 0xd: /* FADDP */
6860 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
6862 case 0xf: /* FMAXP */
6863 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
6865 case 0x2c: /* FMINNMP */
6866 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
6868 case 0x2f: /* FMINP */
6869 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
6872 g_assert_not_reached();
6876 write_fp_sreg(s, rd, tcg_res);
6878 tcg_temp_free_i32(tcg_op1);
6879 tcg_temp_free_i32(tcg_op2);
6880 tcg_temp_free_i32(tcg_res);
6884 tcg_temp_free_ptr(fpst);
6889 * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
6891 * This code is handles the common shifting code and is used by both
6892 * the vector and scalar code.
6894 static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
6895 TCGv_i64 tcg_rnd, bool accumulate,
6896 bool is_u, int size, int shift)
6898 bool extended_result = false;
6899 bool round = tcg_rnd != NULL;
6901 TCGv_i64 tcg_src_hi;
6903 if (round && size == 3) {
6904 extended_result = true;
6905 ext_lshift = 64 - shift;
6906 tcg_src_hi = tcg_temp_new_i64();
6907 } else if (shift == 64) {
6908 if (!accumulate && is_u) {
6909 /* result is zero */
6910 tcg_gen_movi_i64(tcg_res, 0);
6915 /* Deal with the rounding step */
6917 if (extended_result) {
6918 TCGv_i64 tcg_zero = tcg_const_i64(0);
6920 /* take care of sign extending tcg_res */
6921 tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63);
6922 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
6923 tcg_src, tcg_src_hi,
6926 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
6930 tcg_temp_free_i64(tcg_zero);
6932 tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd);
6936 /* Now do the shift right */
6937 if (round && extended_result) {
6938 /* extended case, >64 bit precision required */
6939 if (ext_lshift == 0) {
6940 /* special case, only high bits matter */
6941 tcg_gen_mov_i64(tcg_src, tcg_src_hi);
6943 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
6944 tcg_gen_shli_i64(tcg_src_hi, tcg_src_hi, ext_lshift);
6945 tcg_gen_or_i64(tcg_src, tcg_src, tcg_src_hi);
6950 /* essentially shifting in 64 zeros */
6951 tcg_gen_movi_i64(tcg_src, 0);
6953 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
6957 /* effectively extending the sign-bit */
6958 tcg_gen_sari_i64(tcg_src, tcg_src, 63);
6960 tcg_gen_sari_i64(tcg_src, tcg_src, shift);
6966 tcg_gen_add_i64(tcg_res, tcg_res, tcg_src);
6968 tcg_gen_mov_i64(tcg_res, tcg_src);
6971 if (extended_result) {
6972 tcg_temp_free_i64(tcg_src_hi);
6976 /* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */
6977 static void handle_scalar_simd_shri(DisasContext *s,
6978 bool is_u, int immh, int immb,
6979 int opcode, int rn, int rd)
6982 int immhb = immh << 3 | immb;
6983 int shift = 2 * (8 << size) - immhb;
6984 bool accumulate = false;
6986 bool insert = false;
6991 if (!extract32(immh, 3, 1)) {
6992 unallocated_encoding(s);
6996 if (!fp_access_check(s)) {
7001 case 0x02: /* SSRA / USRA (accumulate) */
7004 case 0x04: /* SRSHR / URSHR (rounding) */
7007 case 0x06: /* SRSRA / URSRA (accum + rounding) */
7008 accumulate = round = true;
7010 case 0x08: /* SRI */
7016 uint64_t round_const = 1ULL << (shift - 1);
7017 tcg_round = tcg_const_i64(round_const);
7022 tcg_rn = read_fp_dreg(s, rn);
7023 tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
7026 /* shift count same as element size is valid but does nothing;
7027 * special case to avoid potential shift by 64.
7029 int esize = 8 << size;
7030 if (shift != esize) {
7031 tcg_gen_shri_i64(tcg_rn, tcg_rn, shift);
7032 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, 0, esize - shift);
7035 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
7036 accumulate, is_u, size, shift);
7039 write_fp_dreg(s, rd, tcg_rd);
7041 tcg_temp_free_i64(tcg_rn);
7042 tcg_temp_free_i64(tcg_rd);
7044 tcg_temp_free_i64(tcg_round);
7048 /* SHL/SLI - Scalar shift left */
7049 static void handle_scalar_simd_shli(DisasContext *s, bool insert,
7050 int immh, int immb, int opcode,
7053 int size = 32 - clz32(immh) - 1;
7054 int immhb = immh << 3 | immb;
7055 int shift = immhb - (8 << size);
7056 TCGv_i64 tcg_rn = new_tmp_a64(s);
7057 TCGv_i64 tcg_rd = new_tmp_a64(s);
7059 if (!extract32(immh, 3, 1)) {
7060 unallocated_encoding(s);
7064 if (!fp_access_check(s)) {
7068 tcg_rn = read_fp_dreg(s, rn);
7069 tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
7072 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, shift, 64 - shift);
7074 tcg_gen_shli_i64(tcg_rd, tcg_rn, shift);
7077 write_fp_dreg(s, rd, tcg_rd);
7079 tcg_temp_free_i64(tcg_rn);
7080 tcg_temp_free_i64(tcg_rd);
7083 /* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with
7084 * (signed/unsigned) narrowing */
7085 static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
7086 bool is_u_shift, bool is_u_narrow,
7087 int immh, int immb, int opcode,
7090 int immhb = immh << 3 | immb;
7091 int size = 32 - clz32(immh) - 1;
7092 int esize = 8 << size;
7093 int shift = (2 * esize) - immhb;
7094 int elements = is_scalar ? 1 : (64 / esize);
7095 bool round = extract32(opcode, 0, 1);
7096 TCGMemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
7097 TCGv_i64 tcg_rn, tcg_rd, tcg_round;
7098 TCGv_i32 tcg_rd_narrowed;
7101 static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = {
7102 { gen_helper_neon_narrow_sat_s8,
7103 gen_helper_neon_unarrow_sat8 },
7104 { gen_helper_neon_narrow_sat_s16,
7105 gen_helper_neon_unarrow_sat16 },
7106 { gen_helper_neon_narrow_sat_s32,
7107 gen_helper_neon_unarrow_sat32 },
7110 static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = {
7111 gen_helper_neon_narrow_sat_u8,
7112 gen_helper_neon_narrow_sat_u16,
7113 gen_helper_neon_narrow_sat_u32,
7116 NeonGenNarrowEnvFn *narrowfn;
7122 if (extract32(immh, 3, 1)) {
7123 unallocated_encoding(s);
7127 if (!fp_access_check(s)) {
7132 narrowfn = unsigned_narrow_fns[size];
7134 narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0];
7137 tcg_rn = tcg_temp_new_i64();
7138 tcg_rd = tcg_temp_new_i64();
7139 tcg_rd_narrowed = tcg_temp_new_i32();
7140 tcg_final = tcg_const_i64(0);
7143 uint64_t round_const = 1ULL << (shift - 1);
7144 tcg_round = tcg_const_i64(round_const);
7149 for (i = 0; i < elements; i++) {
7150 read_vec_element(s, tcg_rn, rn, i, ldop);
7151 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
7152 false, is_u_shift, size+1, shift);
7153 narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
7154 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
7155 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
7159 write_vec_element(s, tcg_final, rd, 0, MO_64);
7161 write_vec_element(s, tcg_final, rd, 1, MO_64);
7165 tcg_temp_free_i64(tcg_round);
7167 tcg_temp_free_i64(tcg_rn);
7168 tcg_temp_free_i64(tcg_rd);
7169 tcg_temp_free_i32(tcg_rd_narrowed);
7170 tcg_temp_free_i64(tcg_final);
7172 clear_vec_high(s, is_q, rd);
7175 /* SQSHLU, UQSHL, SQSHL: saturating left shifts */
7176 static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q,
7177 bool src_unsigned, bool dst_unsigned,
7178 int immh, int immb, int rn, int rd)
7180 int immhb = immh << 3 | immb;
7181 int size = 32 - clz32(immh) - 1;
7182 int shift = immhb - (8 << size);
7186 assert(!(scalar && is_q));
7189 if (!is_q && extract32(immh, 3, 1)) {
7190 unallocated_encoding(s);
7194 /* Since we use the variable-shift helpers we must
7195 * replicate the shift count into each element of
7196 * the tcg_shift value.
7200 shift |= shift << 8;
7203 shift |= shift << 16;
7209 g_assert_not_reached();
7213 if (!fp_access_check(s)) {
7218 TCGv_i64 tcg_shift = tcg_const_i64(shift);
7219 static NeonGenTwo64OpEnvFn * const fns[2][2] = {
7220 { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 },
7221 { NULL, gen_helper_neon_qshl_u64 },
7223 NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned];
7224 int maxpass = is_q ? 2 : 1;
7226 for (pass = 0; pass < maxpass; pass++) {
7227 TCGv_i64 tcg_op = tcg_temp_new_i64();
7229 read_vec_element(s, tcg_op, rn, pass, MO_64);
7230 genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
7231 write_vec_element(s, tcg_op, rd, pass, MO_64);
7233 tcg_temp_free_i64(tcg_op);
7235 tcg_temp_free_i64(tcg_shift);
7236 clear_vec_high(s, is_q, rd);
7238 TCGv_i32 tcg_shift = tcg_const_i32(shift);
7239 static NeonGenTwoOpEnvFn * const fns[2][2][3] = {
7241 { gen_helper_neon_qshl_s8,
7242 gen_helper_neon_qshl_s16,
7243 gen_helper_neon_qshl_s32 },
7244 { gen_helper_neon_qshlu_s8,
7245 gen_helper_neon_qshlu_s16,
7246 gen_helper_neon_qshlu_s32 }
7248 { NULL, NULL, NULL },
7249 { gen_helper_neon_qshl_u8,
7250 gen_helper_neon_qshl_u16,
7251 gen_helper_neon_qshl_u32 }
7254 NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size];
7255 TCGMemOp memop = scalar ? size : MO_32;
7256 int maxpass = scalar ? 1 : is_q ? 4 : 2;
7258 for (pass = 0; pass < maxpass; pass++) {
7259 TCGv_i32 tcg_op = tcg_temp_new_i32();
7261 read_vec_element_i32(s, tcg_op, rn, pass, memop);
7262 genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
7266 tcg_gen_ext8u_i32(tcg_op, tcg_op);
7269 tcg_gen_ext16u_i32(tcg_op, tcg_op);
7274 g_assert_not_reached();
7276 write_fp_sreg(s, rd, tcg_op);
7278 write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
7281 tcg_temp_free_i32(tcg_op);
7283 tcg_temp_free_i32(tcg_shift);
7286 clear_vec_high(s, is_q, rd);
7291 /* Common vector code for handling integer to FP conversion */
7292 static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn,
7293 int elements, int is_signed,
7294 int fracbits, int size)
7296 TCGv_ptr tcg_fpst = get_fpstatus_ptr(size == MO_16);
7297 TCGv_i32 tcg_shift = NULL;
7299 TCGMemOp mop = size | (is_signed ? MO_SIGN : 0);
7302 if (fracbits || size == MO_64) {
7303 tcg_shift = tcg_const_i32(fracbits);
7306 if (size == MO_64) {
7307 TCGv_i64 tcg_int64 = tcg_temp_new_i64();
7308 TCGv_i64 tcg_double = tcg_temp_new_i64();
7310 for (pass = 0; pass < elements; pass++) {
7311 read_vec_element(s, tcg_int64, rn, pass, mop);
7314 gen_helper_vfp_sqtod(tcg_double, tcg_int64,
7315 tcg_shift, tcg_fpst);
7317 gen_helper_vfp_uqtod(tcg_double, tcg_int64,
7318 tcg_shift, tcg_fpst);
7320 if (elements == 1) {
7321 write_fp_dreg(s, rd, tcg_double);
7323 write_vec_element(s, tcg_double, rd, pass, MO_64);
7327 tcg_temp_free_i64(tcg_int64);
7328 tcg_temp_free_i64(tcg_double);
7331 TCGv_i32 tcg_int32 = tcg_temp_new_i32();
7332 TCGv_i32 tcg_float = tcg_temp_new_i32();
7334 for (pass = 0; pass < elements; pass++) {
7335 read_vec_element_i32(s, tcg_int32, rn, pass, mop);
7341 gen_helper_vfp_sltos(tcg_float, tcg_int32,
7342 tcg_shift, tcg_fpst);
7344 gen_helper_vfp_ultos(tcg_float, tcg_int32,
7345 tcg_shift, tcg_fpst);
7349 gen_helper_vfp_sitos(tcg_float, tcg_int32, tcg_fpst);
7351 gen_helper_vfp_uitos(tcg_float, tcg_int32, tcg_fpst);
7358 gen_helper_vfp_sltoh(tcg_float, tcg_int32,
7359 tcg_shift, tcg_fpst);
7361 gen_helper_vfp_ultoh(tcg_float, tcg_int32,
7362 tcg_shift, tcg_fpst);
7366 gen_helper_vfp_sitoh(tcg_float, tcg_int32, tcg_fpst);
7368 gen_helper_vfp_uitoh(tcg_float, tcg_int32, tcg_fpst);
7373 g_assert_not_reached();
7376 if (elements == 1) {
7377 write_fp_sreg(s, rd, tcg_float);
7379 write_vec_element_i32(s, tcg_float, rd, pass, size);
7383 tcg_temp_free_i32(tcg_int32);
7384 tcg_temp_free_i32(tcg_float);
7387 tcg_temp_free_ptr(tcg_fpst);
7389 tcg_temp_free_i32(tcg_shift);
7392 clear_vec_high(s, elements << size == 16, rd);
7395 /* UCVTF/SCVTF - Integer to FP conversion */
7396 static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
7397 bool is_q, bool is_u,
7398 int immh, int immb, int opcode,
7401 int size, elements, fracbits;
7402 int immhb = immh << 3 | immb;
7406 if (!is_scalar && !is_q) {
7407 unallocated_encoding(s);
7410 } else if (immh & 4) {
7412 } else if (immh & 2) {
7414 if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
7415 unallocated_encoding(s);
7419 /* immh == 0 would be a failure of the decode logic */
7420 g_assert(immh == 1);
7421 unallocated_encoding(s);
7428 elements = (8 << is_q) >> size;
7430 fracbits = (16 << size) - immhb;
7432 if (!fp_access_check(s)) {
7436 handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size);
7439 /* FCVTZS, FVCVTZU - FP to fixedpoint conversion */
7440 static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
7441 bool is_q, bool is_u,
7442 int immh, int immb, int rn, int rd)
7444 int immhb = immh << 3 | immb;
7445 int pass, size, fracbits;
7446 TCGv_ptr tcg_fpstatus;
7447 TCGv_i32 tcg_rmode, tcg_shift;
7451 if (!is_scalar && !is_q) {
7452 unallocated_encoding(s);
7455 } else if (immh & 0x4) {
7457 } else if (immh & 0x2) {
7459 if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
7460 unallocated_encoding(s);
7464 /* Should have split out AdvSIMD modified immediate earlier. */
7466 unallocated_encoding(s);
7470 if (!fp_access_check(s)) {
7474 assert(!(is_scalar && is_q));
7476 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO));
7477 tcg_fpstatus = get_fpstatus_ptr(size == MO_16);
7478 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
7479 fracbits = (16 << size) - immhb;
7480 tcg_shift = tcg_const_i32(fracbits);
7482 if (size == MO_64) {
7483 int maxpass = is_scalar ? 1 : 2;
7485 for (pass = 0; pass < maxpass; pass++) {
7486 TCGv_i64 tcg_op = tcg_temp_new_i64();
7488 read_vec_element(s, tcg_op, rn, pass, MO_64);
7490 gen_helper_vfp_touqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
7492 gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
7494 write_vec_element(s, tcg_op, rd, pass, MO_64);
7495 tcg_temp_free_i64(tcg_op);
7497 clear_vec_high(s, is_q, rd);
7499 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
7500 int maxpass = is_scalar ? 1 : ((8 << is_q) >> size);
7505 fn = gen_helper_vfp_touhh;
7507 fn = gen_helper_vfp_toshh;
7512 fn = gen_helper_vfp_touls;
7514 fn = gen_helper_vfp_tosls;
7518 g_assert_not_reached();
7521 for (pass = 0; pass < maxpass; pass++) {
7522 TCGv_i32 tcg_op = tcg_temp_new_i32();
7524 read_vec_element_i32(s, tcg_op, rn, pass, size);
7525 fn(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
7527 write_fp_sreg(s, rd, tcg_op);
7529 write_vec_element_i32(s, tcg_op, rd, pass, size);
7531 tcg_temp_free_i32(tcg_op);
7534 clear_vec_high(s, is_q, rd);
7538 tcg_temp_free_ptr(tcg_fpstatus);
7539 tcg_temp_free_i32(tcg_shift);
7540 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
7541 tcg_temp_free_i32(tcg_rmode);
7544 /* AdvSIMD scalar shift by immediate
7545 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
7546 * +-----+---+-------------+------+------+--------+---+------+------+
7547 * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
7548 * +-----+---+-------------+------+------+--------+---+------+------+
7550 * This is the scalar version so it works on a fixed sized registers
7552 static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
7554 int rd = extract32(insn, 0, 5);
7555 int rn = extract32(insn, 5, 5);
7556 int opcode = extract32(insn, 11, 5);
7557 int immb = extract32(insn, 16, 3);
7558 int immh = extract32(insn, 19, 4);
7559 bool is_u = extract32(insn, 29, 1);
7562 unallocated_encoding(s);
7567 case 0x08: /* SRI */
7569 unallocated_encoding(s);
7573 case 0x00: /* SSHR / USHR */
7574 case 0x02: /* SSRA / USRA */
7575 case 0x04: /* SRSHR / URSHR */
7576 case 0x06: /* SRSRA / URSRA */
7577 handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd);
7579 case 0x0a: /* SHL / SLI */
7580 handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd);
7582 case 0x1c: /* SCVTF, UCVTF */
7583 handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb,
7586 case 0x10: /* SQSHRUN, SQSHRUN2 */
7587 case 0x11: /* SQRSHRUN, SQRSHRUN2 */
7589 unallocated_encoding(s);
7592 handle_vec_simd_sqshrn(s, true, false, false, true,
7593 immh, immb, opcode, rn, rd);
7595 case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */
7596 case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */
7597 handle_vec_simd_sqshrn(s, true, false, is_u, is_u,
7598 immh, immb, opcode, rn, rd);
7600 case 0xc: /* SQSHLU */
7602 unallocated_encoding(s);
7605 handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd);
7607 case 0xe: /* SQSHL, UQSHL */
7608 handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd);
7610 case 0x1f: /* FCVTZS, FCVTZU */
7611 handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd);
7614 unallocated_encoding(s);
7619 /* AdvSIMD scalar three different
7620 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
7621 * +-----+---+-----------+------+---+------+--------+-----+------+------+
7622 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
7623 * +-----+---+-----------+------+---+------+--------+-----+------+------+
7625 static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
7627 bool is_u = extract32(insn, 29, 1);
7628 int size = extract32(insn, 22, 2);
7629 int opcode = extract32(insn, 12, 4);
7630 int rm = extract32(insn, 16, 5);
7631 int rn = extract32(insn, 5, 5);
7632 int rd = extract32(insn, 0, 5);
7635 unallocated_encoding(s);
7640 case 0x9: /* SQDMLAL, SQDMLAL2 */
7641 case 0xb: /* SQDMLSL, SQDMLSL2 */
7642 case 0xd: /* SQDMULL, SQDMULL2 */
7643 if (size == 0 || size == 3) {
7644 unallocated_encoding(s);
7649 unallocated_encoding(s);
7653 if (!fp_access_check(s)) {
7658 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
7659 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
7660 TCGv_i64 tcg_res = tcg_temp_new_i64();
7662 read_vec_element(s, tcg_op1, rn, 0, MO_32 | MO_SIGN);
7663 read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN);
7665 tcg_gen_mul_i64(tcg_res, tcg_op1, tcg_op2);
7666 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env, tcg_res, tcg_res);
7669 case 0xd: /* SQDMULL, SQDMULL2 */
7671 case 0xb: /* SQDMLSL, SQDMLSL2 */
7672 tcg_gen_neg_i64(tcg_res, tcg_res);
7674 case 0x9: /* SQDMLAL, SQDMLAL2 */
7675 read_vec_element(s, tcg_op1, rd, 0, MO_64);
7676 gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env,
7680 g_assert_not_reached();
7683 write_fp_dreg(s, rd, tcg_res);
7685 tcg_temp_free_i64(tcg_op1);
7686 tcg_temp_free_i64(tcg_op2);
7687 tcg_temp_free_i64(tcg_res);
7689 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
7690 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
7691 TCGv_i64 tcg_res = tcg_temp_new_i64();
7693 read_vec_element_i32(s, tcg_op1, rn, 0, MO_16);
7694 read_vec_element_i32(s, tcg_op2, rm, 0, MO_16);
7696 gen_helper_neon_mull_s16(tcg_res, tcg_op1, tcg_op2);
7697 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env, tcg_res, tcg_res);
7700 case 0xd: /* SQDMULL, SQDMULL2 */
7702 case 0xb: /* SQDMLSL, SQDMLSL2 */
7703 gen_helper_neon_negl_u32(tcg_res, tcg_res);
7705 case 0x9: /* SQDMLAL, SQDMLAL2 */
7707 TCGv_i64 tcg_op3 = tcg_temp_new_i64();
7708 read_vec_element(s, tcg_op3, rd, 0, MO_32);
7709 gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env,
7711 tcg_temp_free_i64(tcg_op3);
7715 g_assert_not_reached();
7718 tcg_gen_ext32u_i64(tcg_res, tcg_res);
7719 write_fp_dreg(s, rd, tcg_res);
7721 tcg_temp_free_i32(tcg_op1);
7722 tcg_temp_free_i32(tcg_op2);
7723 tcg_temp_free_i64(tcg_res);
7727 /* CMTST : test is "if (X & Y != 0)". */
7728 static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
7730 tcg_gen_and_i32(d, a, b);
7731 tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
7732 tcg_gen_neg_i32(d, d);
7735 static void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
7737 tcg_gen_and_i64(d, a, b);
7738 tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
7739 tcg_gen_neg_i64(d, d);
7742 static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
7744 tcg_gen_and_vec(vece, d, a, b);
7745 tcg_gen_dupi_vec(vece, a, 0);
7746 tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
7749 static void handle_3same_64(DisasContext *s, int opcode, bool u,
7750 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
7752 /* Handle 64x64->64 opcodes which are shared between the scalar
7753 * and vector 3-same groups. We cover every opcode where size == 3
7754 * is valid in either the three-reg-same (integer, not pairwise)
7755 * or scalar-three-reg-same groups.
7760 case 0x1: /* SQADD */
7762 gen_helper_neon_qadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7764 gen_helper_neon_qadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7767 case 0x5: /* SQSUB */
7769 gen_helper_neon_qsub_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7771 gen_helper_neon_qsub_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7774 case 0x6: /* CMGT, CMHI */
7775 /* 64 bit integer comparison, result = test ? (2^64 - 1) : 0.
7776 * We implement this using setcond (test) and then negating.
7778 cond = u ? TCG_COND_GTU : TCG_COND_GT;
7780 tcg_gen_setcond_i64(cond, tcg_rd, tcg_rn, tcg_rm);
7781 tcg_gen_neg_i64(tcg_rd, tcg_rd);
7783 case 0x7: /* CMGE, CMHS */
7784 cond = u ? TCG_COND_GEU : TCG_COND_GE;
7786 case 0x11: /* CMTST, CMEQ */
7791 gen_cmtst_i64(tcg_rd, tcg_rn, tcg_rm);
7793 case 0x8: /* SSHL, USHL */
7795 gen_helper_neon_shl_u64(tcg_rd, tcg_rn, tcg_rm);
7797 gen_helper_neon_shl_s64(tcg_rd, tcg_rn, tcg_rm);
7800 case 0x9: /* SQSHL, UQSHL */
7802 gen_helper_neon_qshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7804 gen_helper_neon_qshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7807 case 0xa: /* SRSHL, URSHL */
7809 gen_helper_neon_rshl_u64(tcg_rd, tcg_rn, tcg_rm);
7811 gen_helper_neon_rshl_s64(tcg_rd, tcg_rn, tcg_rm);
7814 case 0xb: /* SQRSHL, UQRSHL */
7816 gen_helper_neon_qrshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7818 gen_helper_neon_qrshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
7821 case 0x10: /* ADD, SUB */
7823 tcg_gen_sub_i64(tcg_rd, tcg_rn, tcg_rm);
7825 tcg_gen_add_i64(tcg_rd, tcg_rn, tcg_rm);
7829 g_assert_not_reached();
7833 /* Handle the 3-same-operands float operations; shared by the scalar
7834 * and vector encodings. The caller must filter out any encodings
7835 * not allocated for the encoding it is dealing with.
7837 static void handle_3same_float(DisasContext *s, int size, int elements,
7838 int fpopcode, int rd, int rn, int rm)
7841 TCGv_ptr fpst = get_fpstatus_ptr(false);
7843 for (pass = 0; pass < elements; pass++) {
7846 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
7847 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
7848 TCGv_i64 tcg_res = tcg_temp_new_i64();
7850 read_vec_element(s, tcg_op1, rn, pass, MO_64);
7851 read_vec_element(s, tcg_op2, rm, pass, MO_64);
7854 case 0x39: /* FMLS */
7855 /* As usual for ARM, separate negation for fused multiply-add */
7856 gen_helper_vfp_negd(tcg_op1, tcg_op1);
7858 case 0x19: /* FMLA */
7859 read_vec_element(s, tcg_res, rd, pass, MO_64);
7860 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2,
7863 case 0x18: /* FMAXNM */
7864 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
7866 case 0x1a: /* FADD */
7867 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
7869 case 0x1b: /* FMULX */
7870 gen_helper_vfp_mulxd(tcg_res, tcg_op1, tcg_op2, fpst);
7872 case 0x1c: /* FCMEQ */
7873 gen_helper_neon_ceq_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7875 case 0x1e: /* FMAX */
7876 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
7878 case 0x1f: /* FRECPS */
7879 gen_helper_recpsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7881 case 0x38: /* FMINNM */
7882 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
7884 case 0x3a: /* FSUB */
7885 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
7887 case 0x3e: /* FMIN */
7888 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
7890 case 0x3f: /* FRSQRTS */
7891 gen_helper_rsqrtsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7893 case 0x5b: /* FMUL */
7894 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
7896 case 0x5c: /* FCMGE */
7897 gen_helper_neon_cge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7899 case 0x5d: /* FACGE */
7900 gen_helper_neon_acge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7902 case 0x5f: /* FDIV */
7903 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
7905 case 0x7a: /* FABD */
7906 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
7907 gen_helper_vfp_absd(tcg_res, tcg_res);
7909 case 0x7c: /* FCMGT */
7910 gen_helper_neon_cgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7912 case 0x7d: /* FACGT */
7913 gen_helper_neon_acgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
7916 g_assert_not_reached();
7919 write_vec_element(s, tcg_res, rd, pass, MO_64);
7921 tcg_temp_free_i64(tcg_res);
7922 tcg_temp_free_i64(tcg_op1);
7923 tcg_temp_free_i64(tcg_op2);
7926 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
7927 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
7928 TCGv_i32 tcg_res = tcg_temp_new_i32();
7930 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
7931 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
7934 case 0x39: /* FMLS */
7935 /* As usual for ARM, separate negation for fused multiply-add */
7936 gen_helper_vfp_negs(tcg_op1, tcg_op1);
7938 case 0x19: /* FMLA */
7939 read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
7940 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2,
7943 case 0x1a: /* FADD */
7944 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
7946 case 0x1b: /* FMULX */
7947 gen_helper_vfp_mulxs(tcg_res, tcg_op1, tcg_op2, fpst);
7949 case 0x1c: /* FCMEQ */
7950 gen_helper_neon_ceq_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7952 case 0x1e: /* FMAX */
7953 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
7955 case 0x1f: /* FRECPS */
7956 gen_helper_recpsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7958 case 0x18: /* FMAXNM */
7959 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
7961 case 0x38: /* FMINNM */
7962 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
7964 case 0x3a: /* FSUB */
7965 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
7967 case 0x3e: /* FMIN */
7968 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
7970 case 0x3f: /* FRSQRTS */
7971 gen_helper_rsqrtsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7973 case 0x5b: /* FMUL */
7974 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
7976 case 0x5c: /* FCMGE */
7977 gen_helper_neon_cge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7979 case 0x5d: /* FACGE */
7980 gen_helper_neon_acge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7982 case 0x5f: /* FDIV */
7983 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
7985 case 0x7a: /* FABD */
7986 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
7987 gen_helper_vfp_abss(tcg_res, tcg_res);
7989 case 0x7c: /* FCMGT */
7990 gen_helper_neon_cgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7992 case 0x7d: /* FACGT */
7993 gen_helper_neon_acgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
7996 g_assert_not_reached();
7999 if (elements == 1) {
8000 /* scalar single so clear high part */
8001 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
8003 tcg_gen_extu_i32_i64(tcg_tmp, tcg_res);
8004 write_vec_element(s, tcg_tmp, rd, pass, MO_64);
8005 tcg_temp_free_i64(tcg_tmp);
8007 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
8010 tcg_temp_free_i32(tcg_res);
8011 tcg_temp_free_i32(tcg_op1);
8012 tcg_temp_free_i32(tcg_op2);
8016 tcg_temp_free_ptr(fpst);
8018 clear_vec_high(s, elements * (size ? 8 : 4) > 8, rd);
8021 /* AdvSIMD scalar three same
8022 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
8023 * +-----+---+-----------+------+---+------+--------+---+------+------+
8024 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
8025 * +-----+---+-----------+------+---+------+--------+---+------+------+
8027 static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
8029 int rd = extract32(insn, 0, 5);
8030 int rn = extract32(insn, 5, 5);
8031 int opcode = extract32(insn, 11, 5);
8032 int rm = extract32(insn, 16, 5);
8033 int size = extract32(insn, 22, 2);
8034 bool u = extract32(insn, 29, 1);
8037 if (opcode >= 0x18) {
8038 /* Floating point: U, size[1] and opcode indicate operation */
8039 int fpopcode = opcode | (extract32(size, 1, 1) << 5) | (u << 6);
8041 case 0x1b: /* FMULX */
8042 case 0x1f: /* FRECPS */
8043 case 0x3f: /* FRSQRTS */
8044 case 0x5d: /* FACGE */
8045 case 0x7d: /* FACGT */
8046 case 0x1c: /* FCMEQ */
8047 case 0x5c: /* FCMGE */
8048 case 0x7c: /* FCMGT */
8049 case 0x7a: /* FABD */
8052 unallocated_encoding(s);
8056 if (!fp_access_check(s)) {
8060 handle_3same_float(s, extract32(size, 0, 1), 1, fpopcode, rd, rn, rm);
8065 case 0x1: /* SQADD, UQADD */
8066 case 0x5: /* SQSUB, UQSUB */
8067 case 0x9: /* SQSHL, UQSHL */
8068 case 0xb: /* SQRSHL, UQRSHL */
8070 case 0x8: /* SSHL, USHL */
8071 case 0xa: /* SRSHL, URSHL */
8072 case 0x6: /* CMGT, CMHI */
8073 case 0x7: /* CMGE, CMHS */
8074 case 0x11: /* CMTST, CMEQ */
8075 case 0x10: /* ADD, SUB (vector) */
8077 unallocated_encoding(s);
8081 case 0x16: /* SQDMULH, SQRDMULH (vector) */
8082 if (size != 1 && size != 2) {
8083 unallocated_encoding(s);
8088 unallocated_encoding(s);
8092 if (!fp_access_check(s)) {
8096 tcg_rd = tcg_temp_new_i64();
8099 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
8100 TCGv_i64 tcg_rm = read_fp_dreg(s, rm);
8102 handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm);
8103 tcg_temp_free_i64(tcg_rn);
8104 tcg_temp_free_i64(tcg_rm);
8106 /* Do a single operation on the lowest element in the vector.
8107 * We use the standard Neon helpers and rely on 0 OP 0 == 0 with
8108 * no side effects for all these operations.
8109 * OPTME: special-purpose helpers would avoid doing some
8110 * unnecessary work in the helper for the 8 and 16 bit cases.
8112 NeonGenTwoOpEnvFn *genenvfn;
8113 TCGv_i32 tcg_rn = tcg_temp_new_i32();
8114 TCGv_i32 tcg_rm = tcg_temp_new_i32();
8115 TCGv_i32 tcg_rd32 = tcg_temp_new_i32();
8117 read_vec_element_i32(s, tcg_rn, rn, 0, size);
8118 read_vec_element_i32(s, tcg_rm, rm, 0, size);
8121 case 0x1: /* SQADD, UQADD */
8123 static NeonGenTwoOpEnvFn * const fns[3][2] = {
8124 { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
8125 { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
8126 { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
8128 genenvfn = fns[size][u];
8131 case 0x5: /* SQSUB, UQSUB */
8133 static NeonGenTwoOpEnvFn * const fns[3][2] = {
8134 { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
8135 { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
8136 { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
8138 genenvfn = fns[size][u];
8141 case 0x9: /* SQSHL, UQSHL */
8143 static NeonGenTwoOpEnvFn * const fns[3][2] = {
8144 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
8145 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
8146 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
8148 genenvfn = fns[size][u];
8151 case 0xb: /* SQRSHL, UQRSHL */
8153 static NeonGenTwoOpEnvFn * const fns[3][2] = {
8154 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
8155 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
8156 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
8158 genenvfn = fns[size][u];
8161 case 0x16: /* SQDMULH, SQRDMULH */
8163 static NeonGenTwoOpEnvFn * const fns[2][2] = {
8164 { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
8165 { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
8167 assert(size == 1 || size == 2);
8168 genenvfn = fns[size - 1][u];
8172 g_assert_not_reached();
8175 genenvfn(tcg_rd32, cpu_env, tcg_rn, tcg_rm);
8176 tcg_gen_extu_i32_i64(tcg_rd, tcg_rd32);
8177 tcg_temp_free_i32(tcg_rd32);
8178 tcg_temp_free_i32(tcg_rn);
8179 tcg_temp_free_i32(tcg_rm);
8182 write_fp_dreg(s, rd, tcg_rd);
8184 tcg_temp_free_i64(tcg_rd);
8187 /* AdvSIMD scalar three same FP16
8188 * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0
8189 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
8190 * | 0 1 | U | 1 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd |
8191 * +-----+---+-----------+---+-----+------+-----+--------+---+----+----+
8192 * v: 0101 1110 0100 0000 0000 0100 0000 0000 => 5e400400
8193 * m: 1101 1111 0110 0000 1100 0100 0000 0000 => df60c400
8195 static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s,
8198 int rd = extract32(insn, 0, 5);
8199 int rn = extract32(insn, 5, 5);
8200 int opcode = extract32(insn, 11, 3);
8201 int rm = extract32(insn, 16, 5);
8202 bool u = extract32(insn, 29, 1);
8203 bool a = extract32(insn, 23, 1);
8204 int fpopcode = opcode | (a << 3) | (u << 4);
8211 case 0x03: /* FMULX */
8212 case 0x04: /* FCMEQ (reg) */
8213 case 0x07: /* FRECPS */
8214 case 0x0f: /* FRSQRTS */
8215 case 0x14: /* FCMGE (reg) */
8216 case 0x15: /* FACGE */
8217 case 0x1a: /* FABD */
8218 case 0x1c: /* FCMGT (reg) */
8219 case 0x1d: /* FACGT */
8222 unallocated_encoding(s);
8226 if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
8227 unallocated_encoding(s);
8230 if (!fp_access_check(s)) {
8234 fpst = get_fpstatus_ptr(true);
8236 tcg_op1 = tcg_temp_new_i32();
8237 tcg_op2 = tcg_temp_new_i32();
8238 tcg_res = tcg_temp_new_i32();
8240 read_vec_element_i32(s, tcg_op1, rn, 0, MO_16);
8241 read_vec_element_i32(s, tcg_op2, rm, 0, MO_16);
8244 case 0x03: /* FMULX */
8245 gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
8247 case 0x04: /* FCMEQ (reg) */
8248 gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8250 case 0x07: /* FRECPS */
8251 gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8253 case 0x0f: /* FRSQRTS */
8254 gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8256 case 0x14: /* FCMGE (reg) */
8257 gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8259 case 0x15: /* FACGE */
8260 gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8262 case 0x1a: /* FABD */
8263 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
8264 tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
8266 case 0x1c: /* FCMGT (reg) */
8267 gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8269 case 0x1d: /* FACGT */
8270 gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
8273 g_assert_not_reached();
8276 write_fp_sreg(s, rd, tcg_res);
8279 tcg_temp_free_i32(tcg_res);
8280 tcg_temp_free_i32(tcg_op1);
8281 tcg_temp_free_i32(tcg_op2);
8282 tcg_temp_free_ptr(fpst);
8285 /* AdvSIMD scalar three same extra
8286 * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0
8287 * +-----+---+-----------+------+---+------+---+--------+---+----+----+
8288 * | 0 1 | U | 1 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd |
8289 * +-----+---+-----------+------+---+------+---+--------+---+----+----+
8291 static void disas_simd_scalar_three_reg_same_extra(DisasContext *s,
8294 int rd = extract32(insn, 0, 5);
8295 int rn = extract32(insn, 5, 5);
8296 int opcode = extract32(insn, 11, 4);
8297 int rm = extract32(insn, 16, 5);
8298 int size = extract32(insn, 22, 2);
8299 bool u = extract32(insn, 29, 1);
8300 TCGv_i32 ele1, ele2, ele3;
8304 switch (u * 16 + opcode) {
8305 case 0x10: /* SQRDMLAH (vector) */
8306 case 0x11: /* SQRDMLSH (vector) */
8307 if (size != 1 && size != 2) {
8308 unallocated_encoding(s);
8311 feature = ARM_FEATURE_V8_RDM;
8314 unallocated_encoding(s);
8317 if (!arm_dc_feature(s, feature)) {
8318 unallocated_encoding(s);
8321 if (!fp_access_check(s)) {
8325 /* Do a single operation on the lowest element in the vector.
8326 * We use the standard Neon helpers and rely on 0 OP 0 == 0
8327 * with no side effects for all these operations.
8328 * OPTME: special-purpose helpers would avoid doing some
8329 * unnecessary work in the helper for the 16 bit cases.
8331 ele1 = tcg_temp_new_i32();
8332 ele2 = tcg_temp_new_i32();
8333 ele3 = tcg_temp_new_i32();
8335 read_vec_element_i32(s, ele1, rn, 0, size);
8336 read_vec_element_i32(s, ele2, rm, 0, size);
8337 read_vec_element_i32(s, ele3, rd, 0, size);
8340 case 0x0: /* SQRDMLAH */
8342 gen_helper_neon_qrdmlah_s16(ele3, cpu_env, ele1, ele2, ele3);
8344 gen_helper_neon_qrdmlah_s32(ele3, cpu_env, ele1, ele2, ele3);
8347 case 0x1: /* SQRDMLSH */
8349 gen_helper_neon_qrdmlsh_s16(ele3, cpu_env, ele1, ele2, ele3);
8351 gen_helper_neon_qrdmlsh_s32(ele3, cpu_env, ele1, ele2, ele3);
8355 g_assert_not_reached();
8357 tcg_temp_free_i32(ele1);
8358 tcg_temp_free_i32(ele2);
8360 res = tcg_temp_new_i64();
8361 tcg_gen_extu_i32_i64(res, ele3);
8362 tcg_temp_free_i32(ele3);
8364 write_fp_dreg(s, rd, res);
8365 tcg_temp_free_i64(res);
8368 static void handle_2misc_64(DisasContext *s, int opcode, bool u,
8369 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn,
8370 TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus)
8372 /* Handle 64->64 opcodes which are shared between the scalar and
8373 * vector 2-reg-misc groups. We cover every integer opcode where size == 3
8374 * is valid in either group and also the double-precision fp ops.
8375 * The caller only need provide tcg_rmode and tcg_fpstatus if the op
8381 case 0x4: /* CLS, CLZ */
8383 tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
8385 tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
8389 /* This opcode is shared with CNT and RBIT but we have earlier
8390 * enforced that size == 3 if and only if this is the NOT insn.
8392 tcg_gen_not_i64(tcg_rd, tcg_rn);
8394 case 0x7: /* SQABS, SQNEG */
8396 gen_helper_neon_qneg_s64(tcg_rd, cpu_env, tcg_rn);
8398 gen_helper_neon_qabs_s64(tcg_rd, cpu_env, tcg_rn);
8401 case 0xa: /* CMLT */
8402 /* 64 bit integer comparison against zero, result is
8403 * test ? (2^64 - 1) : 0. We implement via setcond(!test) and
8408 tcg_gen_setcondi_i64(cond, tcg_rd, tcg_rn, 0);
8409 tcg_gen_neg_i64(tcg_rd, tcg_rd);
8411 case 0x8: /* CMGT, CMGE */
8412 cond = u ? TCG_COND_GE : TCG_COND_GT;
8414 case 0x9: /* CMEQ, CMLE */
8415 cond = u ? TCG_COND_LE : TCG_COND_EQ;
8417 case 0xb: /* ABS, NEG */
8419 tcg_gen_neg_i64(tcg_rd, tcg_rn);
8421 TCGv_i64 tcg_zero = tcg_const_i64(0);
8422 tcg_gen_neg_i64(tcg_rd, tcg_rn);
8423 tcg_gen_movcond_i64(TCG_COND_GT, tcg_rd, tcg_rn, tcg_zero,
8425 tcg_temp_free_i64(tcg_zero);
8428 case 0x2f: /* FABS */
8429 gen_helper_vfp_absd(tcg_rd, tcg_rn);
8431 case 0x6f: /* FNEG */
8432 gen_helper_vfp_negd(tcg_rd, tcg_rn);
8434 case 0x7f: /* FSQRT */
8435 gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, cpu_env);
8437 case 0x1a: /* FCVTNS */
8438 case 0x1b: /* FCVTMS */
8439 case 0x1c: /* FCVTAS */
8440 case 0x3a: /* FCVTPS */
8441 case 0x3b: /* FCVTZS */
8443 TCGv_i32 tcg_shift = tcg_const_i32(0);
8444 gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
8445 tcg_temp_free_i32(tcg_shift);
8448 case 0x5a: /* FCVTNU */
8449 case 0x5b: /* FCVTMU */
8450 case 0x5c: /* FCVTAU */
8451 case 0x7a: /* FCVTPU */
8452 case 0x7b: /* FCVTZU */
8454 TCGv_i32 tcg_shift = tcg_const_i32(0);
8455 gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
8456 tcg_temp_free_i32(tcg_shift);
8459 case 0x18: /* FRINTN */
8460 case 0x19: /* FRINTM */
8461 case 0x38: /* FRINTP */
8462 case 0x39: /* FRINTZ */
8463 case 0x58: /* FRINTA */
8464 case 0x79: /* FRINTI */
8465 gen_helper_rintd(tcg_rd, tcg_rn, tcg_fpstatus);
8467 case 0x59: /* FRINTX */
8468 gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus);
8471 g_assert_not_reached();
8475 static void handle_2misc_fcmp_zero(DisasContext *s, int opcode,
8476 bool is_scalar, bool is_u, bool is_q,
8477 int size, int rn, int rd)
8479 bool is_double = (size == MO_64);
8482 if (!fp_access_check(s)) {
8486 fpst = get_fpstatus_ptr(size == MO_16);
8489 TCGv_i64 tcg_op = tcg_temp_new_i64();
8490 TCGv_i64 tcg_zero = tcg_const_i64(0);
8491 TCGv_i64 tcg_res = tcg_temp_new_i64();
8492 NeonGenTwoDoubleOPFn *genfn;
8497 case 0x2e: /* FCMLT (zero) */
8500 case 0x2c: /* FCMGT (zero) */
8501 genfn = gen_helper_neon_cgt_f64;
8503 case 0x2d: /* FCMEQ (zero) */
8504 genfn = gen_helper_neon_ceq_f64;
8506 case 0x6d: /* FCMLE (zero) */
8509 case 0x6c: /* FCMGE (zero) */
8510 genfn = gen_helper_neon_cge_f64;
8513 g_assert_not_reached();
8516 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
8517 read_vec_element(s, tcg_op, rn, pass, MO_64);
8519 genfn(tcg_res, tcg_zero, tcg_op, fpst);
8521 genfn(tcg_res, tcg_op, tcg_zero, fpst);
8523 write_vec_element(s, tcg_res, rd, pass, MO_64);
8525 tcg_temp_free_i64(tcg_res);
8526 tcg_temp_free_i64(tcg_zero);
8527 tcg_temp_free_i64(tcg_op);
8529 clear_vec_high(s, !is_scalar, rd);
8531 TCGv_i32 tcg_op = tcg_temp_new_i32();
8532 TCGv_i32 tcg_zero = tcg_const_i32(0);
8533 TCGv_i32 tcg_res = tcg_temp_new_i32();
8534 NeonGenTwoSingleOPFn *genfn;
8536 int pass, maxpasses;
8538 if (size == MO_16) {
8540 case 0x2e: /* FCMLT (zero) */
8543 case 0x2c: /* FCMGT (zero) */
8544 genfn = gen_helper_advsimd_cgt_f16;
8546 case 0x2d: /* FCMEQ (zero) */
8547 genfn = gen_helper_advsimd_ceq_f16;
8549 case 0x6d: /* FCMLE (zero) */
8552 case 0x6c: /* FCMGE (zero) */
8553 genfn = gen_helper_advsimd_cge_f16;
8556 g_assert_not_reached();
8560 case 0x2e: /* FCMLT (zero) */
8563 case 0x2c: /* FCMGT (zero) */
8564 genfn = gen_helper_neon_cgt_f32;
8566 case 0x2d: /* FCMEQ (zero) */
8567 genfn = gen_helper_neon_ceq_f32;
8569 case 0x6d: /* FCMLE (zero) */
8572 case 0x6c: /* FCMGE (zero) */
8573 genfn = gen_helper_neon_cge_f32;
8576 g_assert_not_reached();
8583 int vector_size = 8 << is_q;
8584 maxpasses = vector_size >> size;
8587 for (pass = 0; pass < maxpasses; pass++) {
8588 read_vec_element_i32(s, tcg_op, rn, pass, size);
8590 genfn(tcg_res, tcg_zero, tcg_op, fpst);
8592 genfn(tcg_res, tcg_op, tcg_zero, fpst);
8595 write_fp_sreg(s, rd, tcg_res);
8597 write_vec_element_i32(s, tcg_res, rd, pass, size);
8600 tcg_temp_free_i32(tcg_res);
8601 tcg_temp_free_i32(tcg_zero);
8602 tcg_temp_free_i32(tcg_op);
8604 clear_vec_high(s, is_q, rd);
8608 tcg_temp_free_ptr(fpst);
8611 static void handle_2misc_reciprocal(DisasContext *s, int opcode,
8612 bool is_scalar, bool is_u, bool is_q,
8613 int size, int rn, int rd)
8615 bool is_double = (size == 3);
8616 TCGv_ptr fpst = get_fpstatus_ptr(false);
8619 TCGv_i64 tcg_op = tcg_temp_new_i64();
8620 TCGv_i64 tcg_res = tcg_temp_new_i64();
8623 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
8624 read_vec_element(s, tcg_op, rn, pass, MO_64);
8626 case 0x3d: /* FRECPE */
8627 gen_helper_recpe_f64(tcg_res, tcg_op, fpst);
8629 case 0x3f: /* FRECPX */
8630 gen_helper_frecpx_f64(tcg_res, tcg_op, fpst);
8632 case 0x7d: /* FRSQRTE */
8633 gen_helper_rsqrte_f64(tcg_res, tcg_op, fpst);
8636 g_assert_not_reached();
8638 write_vec_element(s, tcg_res, rd, pass, MO_64);
8640 tcg_temp_free_i64(tcg_res);
8641 tcg_temp_free_i64(tcg_op);
8642 clear_vec_high(s, !is_scalar, rd);
8644 TCGv_i32 tcg_op = tcg_temp_new_i32();
8645 TCGv_i32 tcg_res = tcg_temp_new_i32();
8646 int pass, maxpasses;
8651 maxpasses = is_q ? 4 : 2;
8654 for (pass = 0; pass < maxpasses; pass++) {
8655 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
8658 case 0x3c: /* URECPE */
8659 gen_helper_recpe_u32(tcg_res, tcg_op, fpst);
8661 case 0x3d: /* FRECPE */
8662 gen_helper_recpe_f32(tcg_res, tcg_op, fpst);
8664 case 0x3f: /* FRECPX */
8665 gen_helper_frecpx_f32(tcg_res, tcg_op, fpst);
8667 case 0x7d: /* FRSQRTE */
8668 gen_helper_rsqrte_f32(tcg_res, tcg_op, fpst);
8671 g_assert_not_reached();
8675 write_fp_sreg(s, rd, tcg_res);
8677 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
8680 tcg_temp_free_i32(tcg_res);
8681 tcg_temp_free_i32(tcg_op);
8683 clear_vec_high(s, is_q, rd);
8686 tcg_temp_free_ptr(fpst);
8689 static void handle_2misc_narrow(DisasContext *s, bool scalar,
8690 int opcode, bool u, bool is_q,
8691 int size, int rn, int rd)
8693 /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
8694 * in the source becomes a size element in the destination).
8697 TCGv_i32 tcg_res[2];
8698 int destelt = is_q ? 2 : 0;
8699 int passes = scalar ? 1 : 2;
8702 tcg_res[1] = tcg_const_i32(0);
8705 for (pass = 0; pass < passes; pass++) {
8706 TCGv_i64 tcg_op = tcg_temp_new_i64();
8707 NeonGenNarrowFn *genfn = NULL;
8708 NeonGenNarrowEnvFn *genenvfn = NULL;
8711 read_vec_element(s, tcg_op, rn, pass, size + 1);
8713 read_vec_element(s, tcg_op, rn, pass, MO_64);
8715 tcg_res[pass] = tcg_temp_new_i32();
8718 case 0x12: /* XTN, SQXTUN */
8720 static NeonGenNarrowFn * const xtnfns[3] = {
8721 gen_helper_neon_narrow_u8,
8722 gen_helper_neon_narrow_u16,
8723 tcg_gen_extrl_i64_i32,
8725 static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
8726 gen_helper_neon_unarrow_sat8,
8727 gen_helper_neon_unarrow_sat16,
8728 gen_helper_neon_unarrow_sat32,
8731 genenvfn = sqxtunfns[size];
8733 genfn = xtnfns[size];
8737 case 0x14: /* SQXTN, UQXTN */
8739 static NeonGenNarrowEnvFn * const fns[3][2] = {
8740 { gen_helper_neon_narrow_sat_s8,
8741 gen_helper_neon_narrow_sat_u8 },
8742 { gen_helper_neon_narrow_sat_s16,
8743 gen_helper_neon_narrow_sat_u16 },
8744 { gen_helper_neon_narrow_sat_s32,
8745 gen_helper_neon_narrow_sat_u32 },
8747 genenvfn = fns[size][u];
8750 case 0x16: /* FCVTN, FCVTN2 */
8751 /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */
8753 gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, cpu_env);
8755 TCGv_i32 tcg_lo = tcg_temp_new_i32();
8756 TCGv_i32 tcg_hi = tcg_temp_new_i32();
8757 tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, tcg_op);
8758 gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, cpu_env);
8759 gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, cpu_env);
8760 tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16);
8761 tcg_temp_free_i32(tcg_lo);
8762 tcg_temp_free_i32(tcg_hi);
8765 case 0x56: /* FCVTXN, FCVTXN2 */
8766 /* 64 bit to 32 bit float conversion
8767 * with von Neumann rounding (round to odd)
8770 gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, cpu_env);
8773 g_assert_not_reached();
8777 genfn(tcg_res[pass], tcg_op);
8778 } else if (genenvfn) {
8779 genenvfn(tcg_res[pass], cpu_env, tcg_op);
8782 tcg_temp_free_i64(tcg_op);
8785 for (pass = 0; pass < 2; pass++) {
8786 write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32);
8787 tcg_temp_free_i32(tcg_res[pass]);
8789 clear_vec_high(s, is_q, rd);
8792 /* Remaining saturating accumulating ops */
8793 static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u,
8794 bool is_q, int size, int rn, int rd)
8796 bool is_double = (size == 3);
8799 TCGv_i64 tcg_rn = tcg_temp_new_i64();
8800 TCGv_i64 tcg_rd = tcg_temp_new_i64();
8803 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
8804 read_vec_element(s, tcg_rn, rn, pass, MO_64);
8805 read_vec_element(s, tcg_rd, rd, pass, MO_64);
8807 if (is_u) { /* USQADD */
8808 gen_helper_neon_uqadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8809 } else { /* SUQADD */
8810 gen_helper_neon_sqadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8812 write_vec_element(s, tcg_rd, rd, pass, MO_64);
8814 tcg_temp_free_i64(tcg_rd);
8815 tcg_temp_free_i64(tcg_rn);
8816 clear_vec_high(s, !is_scalar, rd);
8818 TCGv_i32 tcg_rn = tcg_temp_new_i32();
8819 TCGv_i32 tcg_rd = tcg_temp_new_i32();
8820 int pass, maxpasses;
8825 maxpasses = is_q ? 4 : 2;
8828 for (pass = 0; pass < maxpasses; pass++) {
8830 read_vec_element_i32(s, tcg_rn, rn, pass, size);
8831 read_vec_element_i32(s, tcg_rd, rd, pass, size);
8833 read_vec_element_i32(s, tcg_rn, rn, pass, MO_32);
8834 read_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
8837 if (is_u) { /* USQADD */
8840 gen_helper_neon_uqadd_s8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8843 gen_helper_neon_uqadd_s16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8846 gen_helper_neon_uqadd_s32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8849 g_assert_not_reached();
8851 } else { /* SUQADD */
8854 gen_helper_neon_sqadd_u8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8857 gen_helper_neon_sqadd_u16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8860 gen_helper_neon_sqadd_u32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
8863 g_assert_not_reached();
8868 TCGv_i64 tcg_zero = tcg_const_i64(0);
8869 write_vec_element(s, tcg_zero, rd, 0, MO_64);
8870 tcg_temp_free_i64(tcg_zero);
8872 write_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
8874 tcg_temp_free_i32(tcg_rd);
8875 tcg_temp_free_i32(tcg_rn);
8876 clear_vec_high(s, is_q, rd);
8880 /* AdvSIMD scalar two reg misc
8881 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
8882 * +-----+---+-----------+------+-----------+--------+-----+------+------+
8883 * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
8884 * +-----+---+-----------+------+-----------+--------+-----+------+------+
8886 static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
8888 int rd = extract32(insn, 0, 5);
8889 int rn = extract32(insn, 5, 5);
8890 int opcode = extract32(insn, 12, 5);
8891 int size = extract32(insn, 22, 2);
8892 bool u = extract32(insn, 29, 1);
8893 bool is_fcvt = false;
8896 TCGv_ptr tcg_fpstatus;
8899 case 0x3: /* USQADD / SUQADD*/
8900 if (!fp_access_check(s)) {
8903 handle_2misc_satacc(s, true, u, false, size, rn, rd);
8905 case 0x7: /* SQABS / SQNEG */
8907 case 0xa: /* CMLT */
8909 unallocated_encoding(s);
8913 case 0x8: /* CMGT, CMGE */
8914 case 0x9: /* CMEQ, CMLE */
8915 case 0xb: /* ABS, NEG */
8917 unallocated_encoding(s);
8921 case 0x12: /* SQXTUN */
8923 unallocated_encoding(s);
8927 case 0x14: /* SQXTN, UQXTN */
8929 unallocated_encoding(s);
8932 if (!fp_access_check(s)) {
8935 handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd);
8940 /* Floating point: U, size[1] and opcode indicate operation;
8941 * size[0] indicates single or double precision.
8943 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
8944 size = extract32(size, 0, 1) ? 3 : 2;
8946 case 0x2c: /* FCMGT (zero) */
8947 case 0x2d: /* FCMEQ (zero) */
8948 case 0x2e: /* FCMLT (zero) */
8949 case 0x6c: /* FCMGE (zero) */
8950 case 0x6d: /* FCMLE (zero) */
8951 handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd);
8953 case 0x1d: /* SCVTF */
8954 case 0x5d: /* UCVTF */
8956 bool is_signed = (opcode == 0x1d);
8957 if (!fp_access_check(s)) {
8960 handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size);
8963 case 0x3d: /* FRECPE */
8964 case 0x3f: /* FRECPX */
8965 case 0x7d: /* FRSQRTE */
8966 if (!fp_access_check(s)) {
8969 handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd);
8971 case 0x1a: /* FCVTNS */
8972 case 0x1b: /* FCVTMS */
8973 case 0x3a: /* FCVTPS */
8974 case 0x3b: /* FCVTZS */
8975 case 0x5a: /* FCVTNU */
8976 case 0x5b: /* FCVTMU */
8977 case 0x7a: /* FCVTPU */
8978 case 0x7b: /* FCVTZU */
8980 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
8982 case 0x1c: /* FCVTAS */
8983 case 0x5c: /* FCVTAU */
8984 /* TIEAWAY doesn't fit in the usual rounding mode encoding */
8986 rmode = FPROUNDING_TIEAWAY;
8988 case 0x56: /* FCVTXN, FCVTXN2 */
8990 unallocated_encoding(s);
8993 if (!fp_access_check(s)) {
8996 handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd);
8999 unallocated_encoding(s);
9004 unallocated_encoding(s);
9008 if (!fp_access_check(s)) {
9013 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
9014 tcg_fpstatus = get_fpstatus_ptr(false);
9015 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
9018 tcg_fpstatus = NULL;
9022 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
9023 TCGv_i64 tcg_rd = tcg_temp_new_i64();
9025 handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus);
9026 write_fp_dreg(s, rd, tcg_rd);
9027 tcg_temp_free_i64(tcg_rd);
9028 tcg_temp_free_i64(tcg_rn);
9030 TCGv_i32 tcg_rn = tcg_temp_new_i32();
9031 TCGv_i32 tcg_rd = tcg_temp_new_i32();
9033 read_vec_element_i32(s, tcg_rn, rn, 0, size);
9036 case 0x7: /* SQABS, SQNEG */
9038 NeonGenOneOpEnvFn *genfn;
9039 static NeonGenOneOpEnvFn * const fns[3][2] = {
9040 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
9041 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
9042 { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 },
9044 genfn = fns[size][u];
9045 genfn(tcg_rd, cpu_env, tcg_rn);
9048 case 0x1a: /* FCVTNS */
9049 case 0x1b: /* FCVTMS */
9050 case 0x1c: /* FCVTAS */
9051 case 0x3a: /* FCVTPS */
9052 case 0x3b: /* FCVTZS */
9054 TCGv_i32 tcg_shift = tcg_const_i32(0);
9055 gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
9056 tcg_temp_free_i32(tcg_shift);
9059 case 0x5a: /* FCVTNU */
9060 case 0x5b: /* FCVTMU */
9061 case 0x5c: /* FCVTAU */
9062 case 0x7a: /* FCVTPU */
9063 case 0x7b: /* FCVTZU */
9065 TCGv_i32 tcg_shift = tcg_const_i32(0);
9066 gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
9067 tcg_temp_free_i32(tcg_shift);
9071 g_assert_not_reached();
9074 write_fp_sreg(s, rd, tcg_rd);
9075 tcg_temp_free_i32(tcg_rd);
9076 tcg_temp_free_i32(tcg_rn);
9080 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
9081 tcg_temp_free_i32(tcg_rmode);
9082 tcg_temp_free_ptr(tcg_fpstatus);
9086 static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9088 tcg_gen_vec_sar8i_i64(a, a, shift);
9089 tcg_gen_vec_add8_i64(d, d, a);
9092 static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9094 tcg_gen_vec_sar16i_i64(a, a, shift);
9095 tcg_gen_vec_add16_i64(d, d, a);
9098 static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
9100 tcg_gen_sari_i32(a, a, shift);
9101 tcg_gen_add_i32(d, d, a);
9104 static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9106 tcg_gen_sari_i64(a, a, shift);
9107 tcg_gen_add_i64(d, d, a);
9110 static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
9112 tcg_gen_sari_vec(vece, a, a, sh);
9113 tcg_gen_add_vec(vece, d, d, a);
9116 static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9118 tcg_gen_vec_shr8i_i64(a, a, shift);
9119 tcg_gen_vec_add8_i64(d, d, a);
9122 static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9124 tcg_gen_vec_shr16i_i64(a, a, shift);
9125 tcg_gen_vec_add16_i64(d, d, a);
9128 static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
9130 tcg_gen_shri_i32(a, a, shift);
9131 tcg_gen_add_i32(d, d, a);
9134 static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9136 tcg_gen_shri_i64(a, a, shift);
9137 tcg_gen_add_i64(d, d, a);
9140 static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
9142 tcg_gen_shri_vec(vece, a, a, sh);
9143 tcg_gen_add_vec(vece, d, d, a);
9146 static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9148 uint64_t mask = dup_const(MO_8, 0xff >> shift);
9149 TCGv_i64 t = tcg_temp_new_i64();
9151 tcg_gen_shri_i64(t, a, shift);
9152 tcg_gen_andi_i64(t, t, mask);
9153 tcg_gen_andi_i64(d, d, ~mask);
9154 tcg_gen_or_i64(d, d, t);
9155 tcg_temp_free_i64(t);
9158 static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9160 uint64_t mask = dup_const(MO_16, 0xffff >> shift);
9161 TCGv_i64 t = tcg_temp_new_i64();
9163 tcg_gen_shri_i64(t, a, shift);
9164 tcg_gen_andi_i64(t, t, mask);
9165 tcg_gen_andi_i64(d, d, ~mask);
9166 tcg_gen_or_i64(d, d, t);
9167 tcg_temp_free_i64(t);
9170 static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
9172 tcg_gen_shri_i32(a, a, shift);
9173 tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
9176 static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9178 tcg_gen_shri_i64(a, a, shift);
9179 tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
9182 static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
9184 uint64_t mask = (2ull << ((8 << vece) - 1)) - 1;
9185 TCGv_vec t = tcg_temp_new_vec_matching(d);
9186 TCGv_vec m = tcg_temp_new_vec_matching(d);
9188 tcg_gen_dupi_vec(vece, m, mask ^ (mask >> sh));
9189 tcg_gen_shri_vec(vece, t, a, sh);
9190 tcg_gen_and_vec(vece, d, d, m);
9191 tcg_gen_or_vec(vece, d, d, t);
9193 tcg_temp_free_vec(t);
9194 tcg_temp_free_vec(m);
9197 /* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
9198 static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
9199 int immh, int immb, int opcode, int rn, int rd)
9201 static const GVecGen2i ssra_op[4] = {
9202 { .fni8 = gen_ssra8_i64,
9203 .fniv = gen_ssra_vec,
9205 .opc = INDEX_op_sari_vec,
9207 { .fni8 = gen_ssra16_i64,
9208 .fniv = gen_ssra_vec,
9210 .opc = INDEX_op_sari_vec,
9212 { .fni4 = gen_ssra32_i32,
9213 .fniv = gen_ssra_vec,
9215 .opc = INDEX_op_sari_vec,
9217 { .fni8 = gen_ssra64_i64,
9218 .fniv = gen_ssra_vec,
9219 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9221 .opc = INDEX_op_sari_vec,
9224 static const GVecGen2i usra_op[4] = {
9225 { .fni8 = gen_usra8_i64,
9226 .fniv = gen_usra_vec,
9228 .opc = INDEX_op_shri_vec,
9230 { .fni8 = gen_usra16_i64,
9231 .fniv = gen_usra_vec,
9233 .opc = INDEX_op_shri_vec,
9235 { .fni4 = gen_usra32_i32,
9236 .fniv = gen_usra_vec,
9238 .opc = INDEX_op_shri_vec,
9240 { .fni8 = gen_usra64_i64,
9241 .fniv = gen_usra_vec,
9242 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9244 .opc = INDEX_op_shri_vec,
9247 static const GVecGen2i sri_op[4] = {
9248 { .fni8 = gen_shr8_ins_i64,
9249 .fniv = gen_shr_ins_vec,
9251 .opc = INDEX_op_shri_vec,
9253 { .fni8 = gen_shr16_ins_i64,
9254 .fniv = gen_shr_ins_vec,
9256 .opc = INDEX_op_shri_vec,
9258 { .fni4 = gen_shr32_ins_i32,
9259 .fniv = gen_shr_ins_vec,
9261 .opc = INDEX_op_shri_vec,
9263 { .fni8 = gen_shr64_ins_i64,
9264 .fniv = gen_shr_ins_vec,
9265 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9267 .opc = INDEX_op_shri_vec,
9271 int size = 32 - clz32(immh) - 1;
9272 int immhb = immh << 3 | immb;
9273 int shift = 2 * (8 << size) - immhb;
9274 bool accumulate = false;
9275 int dsize = is_q ? 128 : 64;
9276 int esize = 8 << size;
9277 int elements = dsize/esize;
9278 TCGMemOp memop = size | (is_u ? 0 : MO_SIGN);
9279 TCGv_i64 tcg_rn = new_tmp_a64(s);
9280 TCGv_i64 tcg_rd = new_tmp_a64(s);
9282 uint64_t round_const;
9285 if (extract32(immh, 3, 1) && !is_q) {
9286 unallocated_encoding(s);
9289 tcg_debug_assert(size <= 3);
9291 if (!fp_access_check(s)) {
9296 case 0x02: /* SSRA / USRA (accumulate) */
9298 /* Shift count same as element size produces zero to add. */
9299 if (shift == 8 << size) {
9302 gen_gvec_op2i(s, is_q, rd, rn, shift, &usra_op[size]);
9304 /* Shift count same as element size produces all sign to add. */
9305 if (shift == 8 << size) {
9308 gen_gvec_op2i(s, is_q, rd, rn, shift, &ssra_op[size]);
9311 case 0x08: /* SRI */
9312 /* Shift count same as element size is valid but does nothing. */
9313 if (shift == 8 << size) {
9316 gen_gvec_op2i(s, is_q, rd, rn, shift, &sri_op[size]);
9319 case 0x00: /* SSHR / USHR */
9321 if (shift == 8 << size) {
9322 /* Shift count the same size as element size produces zero. */
9323 tcg_gen_gvec_dup8i(vec_full_reg_offset(s, rd),
9324 is_q ? 16 : 8, vec_full_reg_size(s), 0);
9326 gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shri, size);
9329 /* Shift count the same size as element size produces all sign. */
9330 if (shift == 8 << size) {
9333 gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_sari, size);
9337 case 0x04: /* SRSHR / URSHR (rounding) */
9339 case 0x06: /* SRSRA / URSRA (accum + rounding) */
9343 g_assert_not_reached();
9346 round_const = 1ULL << (shift - 1);
9347 tcg_round = tcg_const_i64(round_const);
9349 for (i = 0; i < elements; i++) {
9350 read_vec_element(s, tcg_rn, rn, i, memop);
9352 read_vec_element(s, tcg_rd, rd, i, memop);
9355 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
9356 accumulate, is_u, size, shift);
9358 write_vec_element(s, tcg_rd, rd, i, size);
9360 tcg_temp_free_i64(tcg_round);
9363 clear_vec_high(s, is_q, rd);
9366 static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9368 uint64_t mask = dup_const(MO_8, 0xff << shift);
9369 TCGv_i64 t = tcg_temp_new_i64();
9371 tcg_gen_shli_i64(t, a, shift);
9372 tcg_gen_andi_i64(t, t, mask);
9373 tcg_gen_andi_i64(d, d, ~mask);
9374 tcg_gen_or_i64(d, d, t);
9375 tcg_temp_free_i64(t);
9378 static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9380 uint64_t mask = dup_const(MO_16, 0xffff << shift);
9381 TCGv_i64 t = tcg_temp_new_i64();
9383 tcg_gen_shli_i64(t, a, shift);
9384 tcg_gen_andi_i64(t, t, mask);
9385 tcg_gen_andi_i64(d, d, ~mask);
9386 tcg_gen_or_i64(d, d, t);
9387 tcg_temp_free_i64(t);
9390 static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
9392 tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
9395 static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
9397 tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
9400 static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
9402 uint64_t mask = (1ull << sh) - 1;
9403 TCGv_vec t = tcg_temp_new_vec_matching(d);
9404 TCGv_vec m = tcg_temp_new_vec_matching(d);
9406 tcg_gen_dupi_vec(vece, m, mask);
9407 tcg_gen_shli_vec(vece, t, a, sh);
9408 tcg_gen_and_vec(vece, d, d, m);
9409 tcg_gen_or_vec(vece, d, d, t);
9411 tcg_temp_free_vec(t);
9412 tcg_temp_free_vec(m);
9415 /* SHL/SLI - Vector shift left */
9416 static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
9417 int immh, int immb, int opcode, int rn, int rd)
9419 static const GVecGen2i shi_op[4] = {
9420 { .fni8 = gen_shl8_ins_i64,
9421 .fniv = gen_shl_ins_vec,
9422 .opc = INDEX_op_shli_vec,
9423 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9426 { .fni8 = gen_shl16_ins_i64,
9427 .fniv = gen_shl_ins_vec,
9428 .opc = INDEX_op_shli_vec,
9429 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9432 { .fni4 = gen_shl32_ins_i32,
9433 .fniv = gen_shl_ins_vec,
9434 .opc = INDEX_op_shli_vec,
9435 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9438 { .fni8 = gen_shl64_ins_i64,
9439 .fniv = gen_shl_ins_vec,
9440 .opc = INDEX_op_shli_vec,
9441 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
9445 int size = 32 - clz32(immh) - 1;
9446 int immhb = immh << 3 | immb;
9447 int shift = immhb - (8 << size);
9449 if (extract32(immh, 3, 1) && !is_q) {
9450 unallocated_encoding(s);
9454 if (size > 3 && !is_q) {
9455 unallocated_encoding(s);
9459 if (!fp_access_check(s)) {
9464 gen_gvec_op2i(s, is_q, rd, rn, shift, &shi_op[size]);
9466 gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size);
9470 /* USHLL/SHLL - Vector shift left with widening */
9471 static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
9472 int immh, int immb, int opcode, int rn, int rd)
9474 int size = 32 - clz32(immh) - 1;
9475 int immhb = immh << 3 | immb;
9476 int shift = immhb - (8 << size);
9478 int esize = 8 << size;
9479 int elements = dsize/esize;
9480 TCGv_i64 tcg_rn = new_tmp_a64(s);
9481 TCGv_i64 tcg_rd = new_tmp_a64(s);
9485 unallocated_encoding(s);
9489 if (!fp_access_check(s)) {
9493 /* For the LL variants the store is larger than the load,
9494 * so if rd == rn we would overwrite parts of our input.
9495 * So load everything right now and use shifts in the main loop.
9497 read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64);
9499 for (i = 0; i < elements; i++) {
9500 tcg_gen_shri_i64(tcg_rd, tcg_rn, i * esize);
9501 ext_and_shift_reg(tcg_rd, tcg_rd, size | (!is_u << 2), 0);
9502 tcg_gen_shli_i64(tcg_rd, tcg_rd, shift);
9503 write_vec_element(s, tcg_rd, rd, i, size + 1);
9507 /* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */
9508 static void handle_vec_simd_shrn(DisasContext *s, bool is_q,
9509 int immh, int immb, int opcode, int rn, int rd)
9511 int immhb = immh << 3 | immb;
9512 int size = 32 - clz32(immh) - 1;
9514 int esize = 8 << size;
9515 int elements = dsize/esize;
9516 int shift = (2 * esize) - immhb;
9517 bool round = extract32(opcode, 0, 1);
9518 TCGv_i64 tcg_rn, tcg_rd, tcg_final;
9522 if (extract32(immh, 3, 1)) {
9523 unallocated_encoding(s);
9527 if (!fp_access_check(s)) {
9531 tcg_rn = tcg_temp_new_i64();
9532 tcg_rd = tcg_temp_new_i64();
9533 tcg_final = tcg_temp_new_i64();
9534 read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64);
9537 uint64_t round_const = 1ULL << (shift - 1);
9538 tcg_round = tcg_const_i64(round_const);
9543 for (i = 0; i < elements; i++) {
9544 read_vec_element(s, tcg_rn, rn, i, size+1);
9545 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
9546 false, true, size+1, shift);
9548 tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
9552 write_vec_element(s, tcg_final, rd, 0, MO_64);
9554 write_vec_element(s, tcg_final, rd, 1, MO_64);
9557 tcg_temp_free_i64(tcg_round);
9559 tcg_temp_free_i64(tcg_rn);
9560 tcg_temp_free_i64(tcg_rd);
9561 tcg_temp_free_i64(tcg_final);
9563 clear_vec_high(s, is_q, rd);
9567 /* AdvSIMD shift by immediate
9568 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
9569 * +---+---+---+-------------+------+------+--------+---+------+------+
9570 * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
9571 * +---+---+---+-------------+------+------+--------+---+------+------+
9573 static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
9575 int rd = extract32(insn, 0, 5);
9576 int rn = extract32(insn, 5, 5);
9577 int opcode = extract32(insn, 11, 5);
9578 int immb = extract32(insn, 16, 3);
9579 int immh = extract32(insn, 19, 4);
9580 bool is_u = extract32(insn, 29, 1);
9581 bool is_q = extract32(insn, 30, 1);
9584 case 0x08: /* SRI */
9586 unallocated_encoding(s);
9590 case 0x00: /* SSHR / USHR */
9591 case 0x02: /* SSRA / USRA (accumulate) */
9592 case 0x04: /* SRSHR / URSHR (rounding) */
9593 case 0x06: /* SRSRA / URSRA (accum + rounding) */
9594 handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd);
9596 case 0x0a: /* SHL / SLI */
9597 handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd);
9599 case 0x10: /* SHRN */
9600 case 0x11: /* RSHRN / SQRSHRUN */
9602 handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb,
9605 handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd);
9608 case 0x12: /* SQSHRN / UQSHRN */
9609 case 0x13: /* SQRSHRN / UQRSHRN */
9610 handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb,
9613 case 0x14: /* SSHLL / USHLL */
9614 handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd);
9616 case 0x1c: /* SCVTF / UCVTF */
9617 handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb,
9620 case 0xc: /* SQSHLU */
9622 unallocated_encoding(s);
9625 handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd);
9627 case 0xe: /* SQSHL, UQSHL */
9628 handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd);
9630 case 0x1f: /* FCVTZS/ FCVTZU */
9631 handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd);
9634 unallocated_encoding(s);
9639 /* Generate code to do a "long" addition or subtraction, ie one done in
9640 * TCGv_i64 on vector lanes twice the width specified by size.
9642 static void gen_neon_addl(int size, bool is_sub, TCGv_i64 tcg_res,
9643 TCGv_i64 tcg_op1, TCGv_i64 tcg_op2)
9645 static NeonGenTwo64OpFn * const fns[3][2] = {
9646 { gen_helper_neon_addl_u16, gen_helper_neon_subl_u16 },
9647 { gen_helper_neon_addl_u32, gen_helper_neon_subl_u32 },
9648 { tcg_gen_add_i64, tcg_gen_sub_i64 },
9650 NeonGenTwo64OpFn *genfn;
9653 genfn = fns[size][is_sub];
9654 genfn(tcg_res, tcg_op1, tcg_op2);
9657 static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
9658 int opcode, int rd, int rn, int rm)
9660 /* 3-reg-different widening insns: 64 x 64 -> 128 */
9661 TCGv_i64 tcg_res[2];
9664 tcg_res[0] = tcg_temp_new_i64();
9665 tcg_res[1] = tcg_temp_new_i64();
9667 /* Does this op do an adding accumulate, a subtracting accumulate,
9668 * or no accumulate at all?
9686 read_vec_element(s, tcg_res[0], rd, 0, MO_64);
9687 read_vec_element(s, tcg_res[1], rd, 1, MO_64);
9690 /* size == 2 means two 32x32->64 operations; this is worth special
9691 * casing because we can generally handle it inline.
9694 for (pass = 0; pass < 2; pass++) {
9695 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9696 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9697 TCGv_i64 tcg_passres;
9698 TCGMemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
9700 int elt = pass + is_q * 2;
9702 read_vec_element(s, tcg_op1, rn, elt, memop);
9703 read_vec_element(s, tcg_op2, rm, elt, memop);
9706 tcg_passres = tcg_res[pass];
9708 tcg_passres = tcg_temp_new_i64();
9712 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
9713 tcg_gen_add_i64(tcg_passres, tcg_op1, tcg_op2);
9715 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
9716 tcg_gen_sub_i64(tcg_passres, tcg_op1, tcg_op2);
9718 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
9719 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
9721 TCGv_i64 tcg_tmp1 = tcg_temp_new_i64();
9722 TCGv_i64 tcg_tmp2 = tcg_temp_new_i64();
9724 tcg_gen_sub_i64(tcg_tmp1, tcg_op1, tcg_op2);
9725 tcg_gen_sub_i64(tcg_tmp2, tcg_op2, tcg_op1);
9726 tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
9728 tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2);
9729 tcg_temp_free_i64(tcg_tmp1);
9730 tcg_temp_free_i64(tcg_tmp2);
9733 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
9734 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
9735 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
9736 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
9738 case 9: /* SQDMLAL, SQDMLAL2 */
9739 case 11: /* SQDMLSL, SQDMLSL2 */
9740 case 13: /* SQDMULL, SQDMULL2 */
9741 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
9742 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
9743 tcg_passres, tcg_passres);
9746 g_assert_not_reached();
9749 if (opcode == 9 || opcode == 11) {
9750 /* saturating accumulate ops */
9752 tcg_gen_neg_i64(tcg_passres, tcg_passres);
9754 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
9755 tcg_res[pass], tcg_passres);
9756 } else if (accop > 0) {
9757 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
9758 } else if (accop < 0) {
9759 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
9763 tcg_temp_free_i64(tcg_passres);
9766 tcg_temp_free_i64(tcg_op1);
9767 tcg_temp_free_i64(tcg_op2);
9770 /* size 0 or 1, generally helper functions */
9771 for (pass = 0; pass < 2; pass++) {
9772 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
9773 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
9774 TCGv_i64 tcg_passres;
9775 int elt = pass + is_q * 2;
9777 read_vec_element_i32(s, tcg_op1, rn, elt, MO_32);
9778 read_vec_element_i32(s, tcg_op2, rm, elt, MO_32);
9781 tcg_passres = tcg_res[pass];
9783 tcg_passres = tcg_temp_new_i64();
9787 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
9788 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
9790 TCGv_i64 tcg_op2_64 = tcg_temp_new_i64();
9791 static NeonGenWidenFn * const widenfns[2][2] = {
9792 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
9793 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
9795 NeonGenWidenFn *widenfn = widenfns[size][is_u];
9797 widenfn(tcg_op2_64, tcg_op2);
9798 widenfn(tcg_passres, tcg_op1);
9799 gen_neon_addl(size, (opcode == 2), tcg_passres,
9800 tcg_passres, tcg_op2_64);
9801 tcg_temp_free_i64(tcg_op2_64);
9804 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
9805 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
9808 gen_helper_neon_abdl_u16(tcg_passres, tcg_op1, tcg_op2);
9810 gen_helper_neon_abdl_s16(tcg_passres, tcg_op1, tcg_op2);
9814 gen_helper_neon_abdl_u32(tcg_passres, tcg_op1, tcg_op2);
9816 gen_helper_neon_abdl_s32(tcg_passres, tcg_op1, tcg_op2);
9820 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
9821 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
9822 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
9825 gen_helper_neon_mull_u8(tcg_passres, tcg_op1, tcg_op2);
9827 gen_helper_neon_mull_s8(tcg_passres, tcg_op1, tcg_op2);
9831 gen_helper_neon_mull_u16(tcg_passres, tcg_op1, tcg_op2);
9833 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
9837 case 9: /* SQDMLAL, SQDMLAL2 */
9838 case 11: /* SQDMLSL, SQDMLSL2 */
9839 case 13: /* SQDMULL, SQDMULL2 */
9841 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
9842 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
9843 tcg_passres, tcg_passres);
9845 case 14: /* PMULL */
9847 gen_helper_neon_mull_p8(tcg_passres, tcg_op1, tcg_op2);
9850 g_assert_not_reached();
9852 tcg_temp_free_i32(tcg_op1);
9853 tcg_temp_free_i32(tcg_op2);
9856 if (opcode == 9 || opcode == 11) {
9857 /* saturating accumulate ops */
9859 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
9861 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
9865 gen_neon_addl(size, (accop < 0), tcg_res[pass],
9866 tcg_res[pass], tcg_passres);
9868 tcg_temp_free_i64(tcg_passres);
9873 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
9874 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
9875 tcg_temp_free_i64(tcg_res[0]);
9876 tcg_temp_free_i64(tcg_res[1]);
9879 static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size,
9880 int opcode, int rd, int rn, int rm)
9882 TCGv_i64 tcg_res[2];
9883 int part = is_q ? 2 : 0;
9886 for (pass = 0; pass < 2; pass++) {
9887 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9888 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
9889 TCGv_i64 tcg_op2_wide = tcg_temp_new_i64();
9890 static NeonGenWidenFn * const widenfns[3][2] = {
9891 { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
9892 { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
9893 { tcg_gen_ext_i32_i64, tcg_gen_extu_i32_i64 },
9895 NeonGenWidenFn *widenfn = widenfns[size][is_u];
9897 read_vec_element(s, tcg_op1, rn, pass, MO_64);
9898 read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32);
9899 widenfn(tcg_op2_wide, tcg_op2);
9900 tcg_temp_free_i32(tcg_op2);
9901 tcg_res[pass] = tcg_temp_new_i64();
9902 gen_neon_addl(size, (opcode == 3),
9903 tcg_res[pass], tcg_op1, tcg_op2_wide);
9904 tcg_temp_free_i64(tcg_op1);
9905 tcg_temp_free_i64(tcg_op2_wide);
9908 for (pass = 0; pass < 2; pass++) {
9909 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
9910 tcg_temp_free_i64(tcg_res[pass]);
9914 static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in)
9916 tcg_gen_addi_i64(in, in, 1U << 31);
9917 tcg_gen_extrh_i64_i32(res, in);
9920 static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
9921 int opcode, int rd, int rn, int rm)
9923 TCGv_i32 tcg_res[2];
9924 int part = is_q ? 2 : 0;
9927 for (pass = 0; pass < 2; pass++) {
9928 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9929 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9930 TCGv_i64 tcg_wideres = tcg_temp_new_i64();
9931 static NeonGenNarrowFn * const narrowfns[3][2] = {
9932 { gen_helper_neon_narrow_high_u8,
9933 gen_helper_neon_narrow_round_high_u8 },
9934 { gen_helper_neon_narrow_high_u16,
9935 gen_helper_neon_narrow_round_high_u16 },
9936 { tcg_gen_extrh_i64_i32, do_narrow_round_high_u32 },
9938 NeonGenNarrowFn *gennarrow = narrowfns[size][is_u];
9940 read_vec_element(s, tcg_op1, rn, pass, MO_64);
9941 read_vec_element(s, tcg_op2, rm, pass, MO_64);
9943 gen_neon_addl(size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2);
9945 tcg_temp_free_i64(tcg_op1);
9946 tcg_temp_free_i64(tcg_op2);
9948 tcg_res[pass] = tcg_temp_new_i32();
9949 gennarrow(tcg_res[pass], tcg_wideres);
9950 tcg_temp_free_i64(tcg_wideres);
9953 for (pass = 0; pass < 2; pass++) {
9954 write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32);
9955 tcg_temp_free_i32(tcg_res[pass]);
9957 clear_vec_high(s, is_q, rd);
9960 static void handle_pmull_64(DisasContext *s, int is_q, int rd, int rn, int rm)
9962 /* PMULL of 64 x 64 -> 128 is an odd special case because it
9963 * is the only three-reg-diff instruction which produces a
9964 * 128-bit wide result from a single operation. However since
9965 * it's possible to calculate the two halves more or less
9966 * separately we just use two helper calls.
9968 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
9969 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
9970 TCGv_i64 tcg_res = tcg_temp_new_i64();
9972 read_vec_element(s, tcg_op1, rn, is_q, MO_64);
9973 read_vec_element(s, tcg_op2, rm, is_q, MO_64);
9974 gen_helper_neon_pmull_64_lo(tcg_res, tcg_op1, tcg_op2);
9975 write_vec_element(s, tcg_res, rd, 0, MO_64);
9976 gen_helper_neon_pmull_64_hi(tcg_res, tcg_op1, tcg_op2);
9977 write_vec_element(s, tcg_res, rd, 1, MO_64);
9979 tcg_temp_free_i64(tcg_op1);
9980 tcg_temp_free_i64(tcg_op2);
9981 tcg_temp_free_i64(tcg_res);
9984 /* AdvSIMD three different
9985 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
9986 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
9987 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
9988 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
9990 static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
9992 /* Instructions in this group fall into three basic classes
9993 * (in each case with the operation working on each element in
9994 * the input vectors):
9995 * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra
9997 * (2) wide 64 x 128 -> 128
9998 * (3) narrowing 128 x 128 -> 64
9999 * Here we do initial decode, catch unallocated cases and
10000 * dispatch to separate functions for each class.
10002 int is_q = extract32(insn, 30, 1);
10003 int is_u = extract32(insn, 29, 1);
10004 int size = extract32(insn, 22, 2);
10005 int opcode = extract32(insn, 12, 4);
10006 int rm = extract32(insn, 16, 5);
10007 int rn = extract32(insn, 5, 5);
10008 int rd = extract32(insn, 0, 5);
10011 case 1: /* SADDW, SADDW2, UADDW, UADDW2 */
10012 case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */
10013 /* 64 x 128 -> 128 */
10015 unallocated_encoding(s);
10018 if (!fp_access_check(s)) {
10021 handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm);
10023 case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */
10024 case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */
10025 /* 128 x 128 -> 64 */
10027 unallocated_encoding(s);
10030 if (!fp_access_check(s)) {
10033 handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm);
10035 case 14: /* PMULL, PMULL2 */
10036 if (is_u || size == 1 || size == 2) {
10037 unallocated_encoding(s);
10041 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
10042 unallocated_encoding(s);
10045 if (!fp_access_check(s)) {
10048 handle_pmull_64(s, is_q, rd, rn, rm);
10052 case 9: /* SQDMLAL, SQDMLAL2 */
10053 case 11: /* SQDMLSL, SQDMLSL2 */
10054 case 13: /* SQDMULL, SQDMULL2 */
10055 if (is_u || size == 0) {
10056 unallocated_encoding(s);
10060 case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
10061 case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
10062 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
10063 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
10064 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
10065 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
10066 case 12: /* SMULL, SMULL2, UMULL, UMULL2 */
10067 /* 64 x 64 -> 128 */
10069 unallocated_encoding(s);
10073 if (!fp_access_check(s)) {
10077 handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm);
10080 /* opcode 15 not allocated */
10081 unallocated_encoding(s);
10086 static void gen_bsl_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
10088 tcg_gen_xor_i64(rn, rn, rm);
10089 tcg_gen_and_i64(rn, rn, rd);
10090 tcg_gen_xor_i64(rd, rm, rn);
10093 static void gen_bit_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
10095 tcg_gen_xor_i64(rn, rn, rd);
10096 tcg_gen_and_i64(rn, rn, rm);
10097 tcg_gen_xor_i64(rd, rd, rn);
10100 static void gen_bif_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
10102 tcg_gen_xor_i64(rn, rn, rd);
10103 tcg_gen_andc_i64(rn, rn, rm);
10104 tcg_gen_xor_i64(rd, rd, rn);
10107 static void gen_bsl_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
10109 tcg_gen_xor_vec(vece, rn, rn, rm);
10110 tcg_gen_and_vec(vece, rn, rn, rd);
10111 tcg_gen_xor_vec(vece, rd, rm, rn);
10114 static void gen_bit_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
10116 tcg_gen_xor_vec(vece, rn, rn, rd);
10117 tcg_gen_and_vec(vece, rn, rn, rm);
10118 tcg_gen_xor_vec(vece, rd, rd, rn);
10121 static void gen_bif_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
10123 tcg_gen_xor_vec(vece, rn, rn, rd);
10124 tcg_gen_andc_vec(vece, rn, rn, rm);
10125 tcg_gen_xor_vec(vece, rd, rd, rn);
10128 /* Logic op (opcode == 3) subgroup of C3.6.16. */
10129 static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
10131 static const GVecGen3 bsl_op = {
10132 .fni8 = gen_bsl_i64,
10133 .fniv = gen_bsl_vec,
10134 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
10137 static const GVecGen3 bit_op = {
10138 .fni8 = gen_bit_i64,
10139 .fniv = gen_bit_vec,
10140 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
10143 static const GVecGen3 bif_op = {
10144 .fni8 = gen_bif_i64,
10145 .fniv = gen_bif_vec,
10146 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
10150 int rd = extract32(insn, 0, 5);
10151 int rn = extract32(insn, 5, 5);
10152 int rm = extract32(insn, 16, 5);
10153 int size = extract32(insn, 22, 2);
10154 bool is_u = extract32(insn, 29, 1);
10155 bool is_q = extract32(insn, 30, 1);
10157 if (!fp_access_check(s)) {
10161 switch (size + 4 * is_u) {
10163 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_and, 0);
10166 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_andc, 0);
10169 if (rn == rm) { /* MOV */
10170 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_mov, 0);
10172 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_or, 0);
10176 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_orc, 0);
10179 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_xor, 0);
10182 case 5: /* BSL bitwise select */
10183 gen_gvec_op3(s, is_q, rd, rn, rm, &bsl_op);
10185 case 6: /* BIT, bitwise insert if true */
10186 gen_gvec_op3(s, is_q, rd, rn, rm, &bit_op);
10188 case 7: /* BIF, bitwise insert if false */
10189 gen_gvec_op3(s, is_q, rd, rn, rm, &bif_op);
10193 g_assert_not_reached();
10197 /* Pairwise op subgroup of C3.6.16.
10199 * This is called directly or via the handle_3same_float for float pairwise
10200 * operations where the opcode and size are calculated differently.
10202 static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
10203 int size, int rn, int rm, int rd)
10208 /* Floating point operations need fpst */
10209 if (opcode >= 0x58) {
10210 fpst = get_fpstatus_ptr(false);
10215 if (!fp_access_check(s)) {
10219 /* These operations work on the concatenated rm:rn, with each pair of
10220 * adjacent elements being operated on to produce an element in the result.
10223 TCGv_i64 tcg_res[2];
10225 for (pass = 0; pass < 2; pass++) {
10226 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10227 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10228 int passreg = (pass == 0) ? rn : rm;
10230 read_vec_element(s, tcg_op1, passreg, 0, MO_64);
10231 read_vec_element(s, tcg_op2, passreg, 1, MO_64);
10232 tcg_res[pass] = tcg_temp_new_i64();
10235 case 0x17: /* ADDP */
10236 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
10238 case 0x58: /* FMAXNMP */
10239 gen_helper_vfp_maxnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10241 case 0x5a: /* FADDP */
10242 gen_helper_vfp_addd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10244 case 0x5e: /* FMAXP */
10245 gen_helper_vfp_maxd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10247 case 0x78: /* FMINNMP */
10248 gen_helper_vfp_minnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10250 case 0x7e: /* FMINP */
10251 gen_helper_vfp_mind(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10254 g_assert_not_reached();
10257 tcg_temp_free_i64(tcg_op1);
10258 tcg_temp_free_i64(tcg_op2);
10261 for (pass = 0; pass < 2; pass++) {
10262 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
10263 tcg_temp_free_i64(tcg_res[pass]);
10266 int maxpass = is_q ? 4 : 2;
10267 TCGv_i32 tcg_res[4];
10269 for (pass = 0; pass < maxpass; pass++) {
10270 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10271 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10272 NeonGenTwoOpFn *genfn = NULL;
10273 int passreg = pass < (maxpass / 2) ? rn : rm;
10274 int passelt = (is_q && (pass & 1)) ? 2 : 0;
10276 read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_32);
10277 read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_32);
10278 tcg_res[pass] = tcg_temp_new_i32();
10281 case 0x17: /* ADDP */
10283 static NeonGenTwoOpFn * const fns[3] = {
10284 gen_helper_neon_padd_u8,
10285 gen_helper_neon_padd_u16,
10291 case 0x14: /* SMAXP, UMAXP */
10293 static NeonGenTwoOpFn * const fns[3][2] = {
10294 { gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 },
10295 { gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 },
10296 { tcg_gen_smax_i32, tcg_gen_umax_i32 },
10298 genfn = fns[size][u];
10301 case 0x15: /* SMINP, UMINP */
10303 static NeonGenTwoOpFn * const fns[3][2] = {
10304 { gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 },
10305 { gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 },
10306 { tcg_gen_smin_i32, tcg_gen_umin_i32 },
10308 genfn = fns[size][u];
10311 /* The FP operations are all on single floats (32 bit) */
10312 case 0x58: /* FMAXNMP */
10313 gen_helper_vfp_maxnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10315 case 0x5a: /* FADDP */
10316 gen_helper_vfp_adds(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10318 case 0x5e: /* FMAXP */
10319 gen_helper_vfp_maxs(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10321 case 0x78: /* FMINNMP */
10322 gen_helper_vfp_minnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10324 case 0x7e: /* FMINP */
10325 gen_helper_vfp_mins(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10328 g_assert_not_reached();
10331 /* FP ops called directly, otherwise call now */
10333 genfn(tcg_res[pass], tcg_op1, tcg_op2);
10336 tcg_temp_free_i32(tcg_op1);
10337 tcg_temp_free_i32(tcg_op2);
10340 for (pass = 0; pass < maxpass; pass++) {
10341 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
10342 tcg_temp_free_i32(tcg_res[pass]);
10344 clear_vec_high(s, is_q, rd);
10348 tcg_temp_free_ptr(fpst);
10352 /* Floating point op subgroup of C3.6.16. */
10353 static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
10355 /* For floating point ops, the U, size[1] and opcode bits
10356 * together indicate the operation. size[0] indicates single
10359 int fpopcode = extract32(insn, 11, 5)
10360 | (extract32(insn, 23, 1) << 5)
10361 | (extract32(insn, 29, 1) << 6);
10362 int is_q = extract32(insn, 30, 1);
10363 int size = extract32(insn, 22, 1);
10364 int rm = extract32(insn, 16, 5);
10365 int rn = extract32(insn, 5, 5);
10366 int rd = extract32(insn, 0, 5);
10368 int datasize = is_q ? 128 : 64;
10369 int esize = 32 << size;
10370 int elements = datasize / esize;
10372 if (size == 1 && !is_q) {
10373 unallocated_encoding(s);
10377 switch (fpopcode) {
10378 case 0x58: /* FMAXNMP */
10379 case 0x5a: /* FADDP */
10380 case 0x5e: /* FMAXP */
10381 case 0x78: /* FMINNMP */
10382 case 0x7e: /* FMINP */
10383 if (size && !is_q) {
10384 unallocated_encoding(s);
10387 handle_simd_3same_pair(s, is_q, 0, fpopcode, size ? MO_64 : MO_32,
10390 case 0x1b: /* FMULX */
10391 case 0x1f: /* FRECPS */
10392 case 0x3f: /* FRSQRTS */
10393 case 0x5d: /* FACGE */
10394 case 0x7d: /* FACGT */
10395 case 0x19: /* FMLA */
10396 case 0x39: /* FMLS */
10397 case 0x18: /* FMAXNM */
10398 case 0x1a: /* FADD */
10399 case 0x1c: /* FCMEQ */
10400 case 0x1e: /* FMAX */
10401 case 0x38: /* FMINNM */
10402 case 0x3a: /* FSUB */
10403 case 0x3e: /* FMIN */
10404 case 0x5b: /* FMUL */
10405 case 0x5c: /* FCMGE */
10406 case 0x5f: /* FDIV */
10407 case 0x7a: /* FABD */
10408 case 0x7c: /* FCMGT */
10409 if (!fp_access_check(s)) {
10413 handle_3same_float(s, size, elements, fpopcode, rd, rn, rm);
10416 unallocated_encoding(s);
10421 static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
10423 gen_helper_neon_mul_u8(a, a, b);
10424 gen_helper_neon_add_u8(d, d, a);
10427 static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
10429 gen_helper_neon_mul_u16(a, a, b);
10430 gen_helper_neon_add_u16(d, d, a);
10433 static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
10435 tcg_gen_mul_i32(a, a, b);
10436 tcg_gen_add_i32(d, d, a);
10439 static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
10441 tcg_gen_mul_i64(a, a, b);
10442 tcg_gen_add_i64(d, d, a);
10445 static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
10447 tcg_gen_mul_vec(vece, a, a, b);
10448 tcg_gen_add_vec(vece, d, d, a);
10451 static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
10453 gen_helper_neon_mul_u8(a, a, b);
10454 gen_helper_neon_sub_u8(d, d, a);
10457 static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
10459 gen_helper_neon_mul_u16(a, a, b);
10460 gen_helper_neon_sub_u16(d, d, a);
10463 static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
10465 tcg_gen_mul_i32(a, a, b);
10466 tcg_gen_sub_i32(d, d, a);
10469 static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
10471 tcg_gen_mul_i64(a, a, b);
10472 tcg_gen_sub_i64(d, d, a);
10475 static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
10477 tcg_gen_mul_vec(vece, a, a, b);
10478 tcg_gen_sub_vec(vece, d, d, a);
10481 /* Integer op subgroup of C3.6.16. */
10482 static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
10484 static const GVecGen3 cmtst_op[4] = {
10485 { .fni4 = gen_helper_neon_tst_u8,
10486 .fniv = gen_cmtst_vec,
10488 { .fni4 = gen_helper_neon_tst_u16,
10489 .fniv = gen_cmtst_vec,
10491 { .fni4 = gen_cmtst_i32,
10492 .fniv = gen_cmtst_vec,
10494 { .fni8 = gen_cmtst_i64,
10495 .fniv = gen_cmtst_vec,
10496 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
10499 static const GVecGen3 mla_op[4] = {
10500 { .fni4 = gen_mla8_i32,
10501 .fniv = gen_mla_vec,
10502 .opc = INDEX_op_mul_vec,
10505 { .fni4 = gen_mla16_i32,
10506 .fniv = gen_mla_vec,
10507 .opc = INDEX_op_mul_vec,
10510 { .fni4 = gen_mla32_i32,
10511 .fniv = gen_mla_vec,
10512 .opc = INDEX_op_mul_vec,
10515 { .fni8 = gen_mla64_i64,
10516 .fniv = gen_mla_vec,
10517 .opc = INDEX_op_mul_vec,
10518 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
10522 static const GVecGen3 mls_op[4] = {
10523 { .fni4 = gen_mls8_i32,
10524 .fniv = gen_mls_vec,
10525 .opc = INDEX_op_mul_vec,
10528 { .fni4 = gen_mls16_i32,
10529 .fniv = gen_mls_vec,
10530 .opc = INDEX_op_mul_vec,
10533 { .fni4 = gen_mls32_i32,
10534 .fniv = gen_mls_vec,
10535 .opc = INDEX_op_mul_vec,
10538 { .fni8 = gen_mls64_i64,
10539 .fniv = gen_mls_vec,
10540 .opc = INDEX_op_mul_vec,
10541 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
10546 int is_q = extract32(insn, 30, 1);
10547 int u = extract32(insn, 29, 1);
10548 int size = extract32(insn, 22, 2);
10549 int opcode = extract32(insn, 11, 5);
10550 int rm = extract32(insn, 16, 5);
10551 int rn = extract32(insn, 5, 5);
10552 int rd = extract32(insn, 0, 5);
10557 case 0x13: /* MUL, PMUL */
10558 if (u && size != 0) {
10559 unallocated_encoding(s);
10563 case 0x0: /* SHADD, UHADD */
10564 case 0x2: /* SRHADD, URHADD */
10565 case 0x4: /* SHSUB, UHSUB */
10566 case 0xc: /* SMAX, UMAX */
10567 case 0xd: /* SMIN, UMIN */
10568 case 0xe: /* SABD, UABD */
10569 case 0xf: /* SABA, UABA */
10570 case 0x12: /* MLA, MLS */
10572 unallocated_encoding(s);
10576 case 0x16: /* SQDMULH, SQRDMULH */
10577 if (size == 0 || size == 3) {
10578 unallocated_encoding(s);
10583 if (size == 3 && !is_q) {
10584 unallocated_encoding(s);
10590 if (!fp_access_check(s)) {
10595 case 0x10: /* ADD, SUB */
10597 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_sub, size);
10599 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_add, size);
10602 case 0x13: /* MUL, PMUL */
10603 if (!u) { /* MUL */
10604 gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_mul, size);
10608 case 0x12: /* MLA, MLS */
10610 gen_gvec_op3(s, is_q, rd, rn, rm, &mls_op[size]);
10612 gen_gvec_op3(s, is_q, rd, rn, rm, &mla_op[size]);
10616 if (!u) { /* CMTST */
10617 gen_gvec_op3(s, is_q, rd, rn, rm, &cmtst_op[size]);
10621 cond = TCG_COND_EQ;
10623 case 0x06: /* CMGT, CMHI */
10624 cond = u ? TCG_COND_GTU : TCG_COND_GT;
10626 case 0x07: /* CMGE, CMHS */
10627 cond = u ? TCG_COND_GEU : TCG_COND_GE;
10629 tcg_gen_gvec_cmp(cond, size, vec_full_reg_offset(s, rd),
10630 vec_full_reg_offset(s, rn),
10631 vec_full_reg_offset(s, rm),
10632 is_q ? 16 : 8, vec_full_reg_size(s));
10638 for (pass = 0; pass < 2; pass++) {
10639 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
10640 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
10641 TCGv_i64 tcg_res = tcg_temp_new_i64();
10643 read_vec_element(s, tcg_op1, rn, pass, MO_64);
10644 read_vec_element(s, tcg_op2, rm, pass, MO_64);
10646 handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2);
10648 write_vec_element(s, tcg_res, rd, pass, MO_64);
10650 tcg_temp_free_i64(tcg_res);
10651 tcg_temp_free_i64(tcg_op1);
10652 tcg_temp_free_i64(tcg_op2);
10655 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
10656 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10657 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10658 TCGv_i32 tcg_res = tcg_temp_new_i32();
10659 NeonGenTwoOpFn *genfn = NULL;
10660 NeonGenTwoOpEnvFn *genenvfn = NULL;
10662 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
10663 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
10666 case 0x0: /* SHADD, UHADD */
10668 static NeonGenTwoOpFn * const fns[3][2] = {
10669 { gen_helper_neon_hadd_s8, gen_helper_neon_hadd_u8 },
10670 { gen_helper_neon_hadd_s16, gen_helper_neon_hadd_u16 },
10671 { gen_helper_neon_hadd_s32, gen_helper_neon_hadd_u32 },
10673 genfn = fns[size][u];
10676 case 0x1: /* SQADD, UQADD */
10678 static NeonGenTwoOpEnvFn * const fns[3][2] = {
10679 { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
10680 { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
10681 { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
10683 genenvfn = fns[size][u];
10686 case 0x2: /* SRHADD, URHADD */
10688 static NeonGenTwoOpFn * const fns[3][2] = {
10689 { gen_helper_neon_rhadd_s8, gen_helper_neon_rhadd_u8 },
10690 { gen_helper_neon_rhadd_s16, gen_helper_neon_rhadd_u16 },
10691 { gen_helper_neon_rhadd_s32, gen_helper_neon_rhadd_u32 },
10693 genfn = fns[size][u];
10696 case 0x4: /* SHSUB, UHSUB */
10698 static NeonGenTwoOpFn * const fns[3][2] = {
10699 { gen_helper_neon_hsub_s8, gen_helper_neon_hsub_u8 },
10700 { gen_helper_neon_hsub_s16, gen_helper_neon_hsub_u16 },
10701 { gen_helper_neon_hsub_s32, gen_helper_neon_hsub_u32 },
10703 genfn = fns[size][u];
10706 case 0x5: /* SQSUB, UQSUB */
10708 static NeonGenTwoOpEnvFn * const fns[3][2] = {
10709 { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
10710 { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
10711 { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
10713 genenvfn = fns[size][u];
10716 case 0x8: /* SSHL, USHL */
10718 static NeonGenTwoOpFn * const fns[3][2] = {
10719 { gen_helper_neon_shl_s8, gen_helper_neon_shl_u8 },
10720 { gen_helper_neon_shl_s16, gen_helper_neon_shl_u16 },
10721 { gen_helper_neon_shl_s32, gen_helper_neon_shl_u32 },
10723 genfn = fns[size][u];
10726 case 0x9: /* SQSHL, UQSHL */
10728 static NeonGenTwoOpEnvFn * const fns[3][2] = {
10729 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
10730 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
10731 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
10733 genenvfn = fns[size][u];
10736 case 0xa: /* SRSHL, URSHL */
10738 static NeonGenTwoOpFn * const fns[3][2] = {
10739 { gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 },
10740 { gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 },
10741 { gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 },
10743 genfn = fns[size][u];
10746 case 0xb: /* SQRSHL, UQRSHL */
10748 static NeonGenTwoOpEnvFn * const fns[3][2] = {
10749 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
10750 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
10751 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
10753 genenvfn = fns[size][u];
10756 case 0xc: /* SMAX, UMAX */
10758 static NeonGenTwoOpFn * const fns[3][2] = {
10759 { gen_helper_neon_max_s8, gen_helper_neon_max_u8 },
10760 { gen_helper_neon_max_s16, gen_helper_neon_max_u16 },
10761 { tcg_gen_smax_i32, tcg_gen_umax_i32 },
10763 genfn = fns[size][u];
10767 case 0xd: /* SMIN, UMIN */
10769 static NeonGenTwoOpFn * const fns[3][2] = {
10770 { gen_helper_neon_min_s8, gen_helper_neon_min_u8 },
10771 { gen_helper_neon_min_s16, gen_helper_neon_min_u16 },
10772 { tcg_gen_smin_i32, tcg_gen_umin_i32 },
10774 genfn = fns[size][u];
10777 case 0xe: /* SABD, UABD */
10778 case 0xf: /* SABA, UABA */
10780 static NeonGenTwoOpFn * const fns[3][2] = {
10781 { gen_helper_neon_abd_s8, gen_helper_neon_abd_u8 },
10782 { gen_helper_neon_abd_s16, gen_helper_neon_abd_u16 },
10783 { gen_helper_neon_abd_s32, gen_helper_neon_abd_u32 },
10785 genfn = fns[size][u];
10788 case 0x13: /* MUL, PMUL */
10789 assert(u); /* PMUL */
10791 genfn = gen_helper_neon_mul_p8;
10793 case 0x16: /* SQDMULH, SQRDMULH */
10795 static NeonGenTwoOpEnvFn * const fns[2][2] = {
10796 { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
10797 { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
10799 assert(size == 1 || size == 2);
10800 genenvfn = fns[size - 1][u];
10804 g_assert_not_reached();
10808 genenvfn(tcg_res, cpu_env, tcg_op1, tcg_op2);
10810 genfn(tcg_res, tcg_op1, tcg_op2);
10813 if (opcode == 0xf) {
10814 /* SABA, UABA: accumulating ops */
10815 static NeonGenTwoOpFn * const fns[3] = {
10816 gen_helper_neon_add_u8,
10817 gen_helper_neon_add_u16,
10821 read_vec_element_i32(s, tcg_op1, rd, pass, MO_32);
10822 fns[size](tcg_res, tcg_op1, tcg_res);
10825 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
10827 tcg_temp_free_i32(tcg_res);
10828 tcg_temp_free_i32(tcg_op1);
10829 tcg_temp_free_i32(tcg_op2);
10832 clear_vec_high(s, is_q, rd);
10835 /* AdvSIMD three same
10836 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
10837 * +---+---+---+-----------+------+---+------+--------+---+------+------+
10838 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
10839 * +---+---+---+-----------+------+---+------+--------+---+------+------+
10841 static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn)
10843 int opcode = extract32(insn, 11, 5);
10846 case 0x3: /* logic ops */
10847 disas_simd_3same_logic(s, insn);
10849 case 0x17: /* ADDP */
10850 case 0x14: /* SMAXP, UMAXP */
10851 case 0x15: /* SMINP, UMINP */
10853 /* Pairwise operations */
10854 int is_q = extract32(insn, 30, 1);
10855 int u = extract32(insn, 29, 1);
10856 int size = extract32(insn, 22, 2);
10857 int rm = extract32(insn, 16, 5);
10858 int rn = extract32(insn, 5, 5);
10859 int rd = extract32(insn, 0, 5);
10860 if (opcode == 0x17) {
10861 if (u || (size == 3 && !is_q)) {
10862 unallocated_encoding(s);
10867 unallocated_encoding(s);
10871 handle_simd_3same_pair(s, is_q, u, opcode, size, rn, rm, rd);
10874 case 0x18 ... 0x31:
10875 /* floating point ops, sz[1] and U are part of opcode */
10876 disas_simd_3same_float(s, insn);
10879 disas_simd_3same_int(s, insn);
10885 * Advanced SIMD three same (ARMv8.2 FP16 variants)
10887 * 31 30 29 28 24 23 22 21 20 16 15 14 13 11 10 9 5 4 0
10888 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
10889 * | 0 | Q | U | 0 1 1 1 0 | a | 1 0 | Rm | 0 0 | opcode | 1 | Rn | Rd |
10890 * +---+---+---+-----------+---------+------+-----+--------+---+------+------+
10892 * This includes FMULX, FCMEQ (register), FRECPS, FRSQRTS, FCMGE
10893 * (register), FACGE, FABD, FCMGT (register) and FACGT.
10896 static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
10898 int opcode, fpopcode;
10899 int is_q, u, a, rm, rn, rd;
10900 int datasize, elements;
10903 bool pairwise = false;
10905 if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
10906 unallocated_encoding(s);
10910 if (!fp_access_check(s)) {
10914 /* For these floating point ops, the U, a and opcode bits
10915 * together indicate the operation.
10917 opcode = extract32(insn, 11, 3);
10918 u = extract32(insn, 29, 1);
10919 a = extract32(insn, 23, 1);
10920 is_q = extract32(insn, 30, 1);
10921 rm = extract32(insn, 16, 5);
10922 rn = extract32(insn, 5, 5);
10923 rd = extract32(insn, 0, 5);
10925 fpopcode = opcode | (a << 3) | (u << 4);
10926 datasize = is_q ? 128 : 64;
10927 elements = datasize / 16;
10929 switch (fpopcode) {
10930 case 0x10: /* FMAXNMP */
10931 case 0x12: /* FADDP */
10932 case 0x16: /* FMAXP */
10933 case 0x18: /* FMINNMP */
10934 case 0x1e: /* FMINP */
10939 fpst = get_fpstatus_ptr(true);
10942 int maxpass = is_q ? 8 : 4;
10943 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10944 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10945 TCGv_i32 tcg_res[8];
10947 for (pass = 0; pass < maxpass; pass++) {
10948 int passreg = pass < (maxpass / 2) ? rn : rm;
10949 int passelt = (pass << 1) & (maxpass - 1);
10951 read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_16);
10952 read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_16);
10953 tcg_res[pass] = tcg_temp_new_i32();
10955 switch (fpopcode) {
10956 case 0x10: /* FMAXNMP */
10957 gen_helper_advsimd_maxnumh(tcg_res[pass], tcg_op1, tcg_op2,
10960 case 0x12: /* FADDP */
10961 gen_helper_advsimd_addh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10963 case 0x16: /* FMAXP */
10964 gen_helper_advsimd_maxh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10966 case 0x18: /* FMINNMP */
10967 gen_helper_advsimd_minnumh(tcg_res[pass], tcg_op1, tcg_op2,
10970 case 0x1e: /* FMINP */
10971 gen_helper_advsimd_minh(tcg_res[pass], tcg_op1, tcg_op2, fpst);
10974 g_assert_not_reached();
10978 for (pass = 0; pass < maxpass; pass++) {
10979 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_16);
10980 tcg_temp_free_i32(tcg_res[pass]);
10983 tcg_temp_free_i32(tcg_op1);
10984 tcg_temp_free_i32(tcg_op2);
10987 for (pass = 0; pass < elements; pass++) {
10988 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
10989 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
10990 TCGv_i32 tcg_res = tcg_temp_new_i32();
10992 read_vec_element_i32(s, tcg_op1, rn, pass, MO_16);
10993 read_vec_element_i32(s, tcg_op2, rm, pass, MO_16);
10995 switch (fpopcode) {
10996 case 0x0: /* FMAXNM */
10997 gen_helper_advsimd_maxnumh(tcg_res, tcg_op1, tcg_op2, fpst);
10999 case 0x1: /* FMLA */
11000 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
11001 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
11004 case 0x2: /* FADD */
11005 gen_helper_advsimd_addh(tcg_res, tcg_op1, tcg_op2, fpst);
11007 case 0x3: /* FMULX */
11008 gen_helper_advsimd_mulxh(tcg_res, tcg_op1, tcg_op2, fpst);
11010 case 0x4: /* FCMEQ */
11011 gen_helper_advsimd_ceq_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11013 case 0x6: /* FMAX */
11014 gen_helper_advsimd_maxh(tcg_res, tcg_op1, tcg_op2, fpst);
11016 case 0x7: /* FRECPS */
11017 gen_helper_recpsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11019 case 0x8: /* FMINNM */
11020 gen_helper_advsimd_minnumh(tcg_res, tcg_op1, tcg_op2, fpst);
11022 case 0x9: /* FMLS */
11023 /* As usual for ARM, separate negation for fused multiply-add */
11024 tcg_gen_xori_i32(tcg_op1, tcg_op1, 0x8000);
11025 read_vec_element_i32(s, tcg_res, rd, pass, MO_16);
11026 gen_helper_advsimd_muladdh(tcg_res, tcg_op1, tcg_op2, tcg_res,
11029 case 0xa: /* FSUB */
11030 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
11032 case 0xe: /* FMIN */
11033 gen_helper_advsimd_minh(tcg_res, tcg_op1, tcg_op2, fpst);
11035 case 0xf: /* FRSQRTS */
11036 gen_helper_rsqrtsf_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11038 case 0x13: /* FMUL */
11039 gen_helper_advsimd_mulh(tcg_res, tcg_op1, tcg_op2, fpst);
11041 case 0x14: /* FCMGE */
11042 gen_helper_advsimd_cge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11044 case 0x15: /* FACGE */
11045 gen_helper_advsimd_acge_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11047 case 0x17: /* FDIV */
11048 gen_helper_advsimd_divh(tcg_res, tcg_op1, tcg_op2, fpst);
11050 case 0x1a: /* FABD */
11051 gen_helper_advsimd_subh(tcg_res, tcg_op1, tcg_op2, fpst);
11052 tcg_gen_andi_i32(tcg_res, tcg_res, 0x7fff);
11054 case 0x1c: /* FCMGT */
11055 gen_helper_advsimd_cgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11057 case 0x1d: /* FACGT */
11058 gen_helper_advsimd_acgt_f16(tcg_res, tcg_op1, tcg_op2, fpst);
11061 fprintf(stderr, "%s: insn %#04x, fpop %#2x @ %#" PRIx64 "\n",
11062 __func__, insn, fpopcode, s->pc);
11063 g_assert_not_reached();
11066 write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
11067 tcg_temp_free_i32(tcg_res);
11068 tcg_temp_free_i32(tcg_op1);
11069 tcg_temp_free_i32(tcg_op2);
11073 tcg_temp_free_ptr(fpst);
11075 clear_vec_high(s, is_q, rd);
11078 /* AdvSIMD three same extra
11079 * 31 30 29 28 24 23 22 21 20 16 15 14 11 10 9 5 4 0
11080 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
11081 * | 0 | Q | U | 0 1 1 1 0 | size | 0 | Rm | 1 | opcode | 1 | Rn | Rd |
11082 * +---+---+---+-----------+------+---+------+---+--------+---+----+----+
11084 static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
11086 int rd = extract32(insn, 0, 5);
11087 int rn = extract32(insn, 5, 5);
11088 int opcode = extract32(insn, 11, 4);
11089 int rm = extract32(insn, 16, 5);
11090 int size = extract32(insn, 22, 2);
11091 bool u = extract32(insn, 29, 1);
11092 bool is_q = extract32(insn, 30, 1);
11095 switch (u * 16 + opcode) {
11096 case 0x10: /* SQRDMLAH (vector) */
11097 case 0x11: /* SQRDMLSH (vector) */
11098 if (size != 1 && size != 2) {
11099 unallocated_encoding(s);
11102 feature = ARM_FEATURE_V8_RDM;
11104 case 0x8: /* FCMLA, #0 */
11105 case 0x9: /* FCMLA, #90 */
11106 case 0xa: /* FCMLA, #180 */
11107 case 0xb: /* FCMLA, #270 */
11108 case 0xc: /* FCADD, #90 */
11109 case 0xe: /* FCADD, #270 */
11111 || (size == 1 && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))
11112 || (size == 3 && !is_q)) {
11113 unallocated_encoding(s);
11116 feature = ARM_FEATURE_V8_FCMA;
11119 unallocated_encoding(s);
11122 if (!arm_dc_feature(s, feature)) {
11123 unallocated_encoding(s);
11126 if (!fp_access_check(s)) {
11131 case 0x0: /* SQRDMLAH (vector) */
11134 gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlah_s16);
11137 gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlah_s32);
11140 g_assert_not_reached();
11144 case 0x1: /* SQRDMLSH (vector) */
11147 gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlsh_s16);
11150 gen_gvec_op3_env(s, is_q, rd, rn, rm, gen_helper_gvec_qrdmlsh_s32);
11153 g_assert_not_reached();
11157 case 0x8: /* FCMLA, #0 */
11158 case 0x9: /* FCMLA, #90 */
11159 case 0xa: /* FCMLA, #180 */
11160 case 0xb: /* FCMLA, #270 */
11161 rot = extract32(opcode, 0, 2);
11164 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, true, rot,
11165 gen_helper_gvec_fcmlah);
11168 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot,
11169 gen_helper_gvec_fcmlas);
11172 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, false, rot,
11173 gen_helper_gvec_fcmlad);
11176 g_assert_not_reached();
11180 case 0xc: /* FCADD, #90 */
11181 case 0xe: /* FCADD, #270 */
11182 rot = extract32(opcode, 1, 1);
11185 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11186 gen_helper_gvec_fcaddh);
11189 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11190 gen_helper_gvec_fcadds);
11193 gen_gvec_op3_fpst(s, is_q, rd, rn, rm, size == 1, rot,
11194 gen_helper_gvec_fcaddd);
11197 g_assert_not_reached();
11202 g_assert_not_reached();
11206 static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q,
11207 int size, int rn, int rd)
11209 /* Handle 2-reg-misc ops which are widening (so each size element
11210 * in the source becomes a 2*size element in the destination.
11211 * The only instruction like this is FCVTL.
11216 /* 32 -> 64 bit fp conversion */
11217 TCGv_i64 tcg_res[2];
11218 int srcelt = is_q ? 2 : 0;
11220 for (pass = 0; pass < 2; pass++) {
11221 TCGv_i32 tcg_op = tcg_temp_new_i32();
11222 tcg_res[pass] = tcg_temp_new_i64();
11224 read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32);
11225 gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, cpu_env);
11226 tcg_temp_free_i32(tcg_op);
11228 for (pass = 0; pass < 2; pass++) {
11229 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11230 tcg_temp_free_i64(tcg_res[pass]);
11233 /* 16 -> 32 bit fp conversion */
11234 int srcelt = is_q ? 4 : 0;
11235 TCGv_i32 tcg_res[4];
11237 for (pass = 0; pass < 4; pass++) {
11238 tcg_res[pass] = tcg_temp_new_i32();
11240 read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16);
11241 gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass],
11244 for (pass = 0; pass < 4; pass++) {
11245 write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
11246 tcg_temp_free_i32(tcg_res[pass]);
11251 static void handle_rev(DisasContext *s, int opcode, bool u,
11252 bool is_q, int size, int rn, int rd)
11254 int op = (opcode << 1) | u;
11255 int opsz = op + size;
11256 int grp_size = 3 - opsz;
11257 int dsize = is_q ? 128 : 64;
11261 unallocated_encoding(s);
11265 if (!fp_access_check(s)) {
11270 /* Special case bytes, use bswap op on each group of elements */
11271 int groups = dsize / (8 << grp_size);
11273 for (i = 0; i < groups; i++) {
11274 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
11276 read_vec_element(s, tcg_tmp, rn, i, grp_size);
11277 switch (grp_size) {
11279 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
11282 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
11285 tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp);
11288 g_assert_not_reached();
11290 write_vec_element(s, tcg_tmp, rd, i, grp_size);
11291 tcg_temp_free_i64(tcg_tmp);
11293 clear_vec_high(s, is_q, rd);
11295 int revmask = (1 << grp_size) - 1;
11296 int esize = 8 << size;
11297 int elements = dsize / esize;
11298 TCGv_i64 tcg_rn = tcg_temp_new_i64();
11299 TCGv_i64 tcg_rd = tcg_const_i64(0);
11300 TCGv_i64 tcg_rd_hi = tcg_const_i64(0);
11302 for (i = 0; i < elements; i++) {
11303 int e_rev = (i & 0xf) ^ revmask;
11304 int off = e_rev * esize;
11305 read_vec_element(s, tcg_rn, rn, i, size);
11307 tcg_gen_deposit_i64(tcg_rd_hi, tcg_rd_hi,
11308 tcg_rn, off - 64, esize);
11310 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, off, esize);
11313 write_vec_element(s, tcg_rd, rd, 0, MO_64);
11314 write_vec_element(s, tcg_rd_hi, rd, 1, MO_64);
11316 tcg_temp_free_i64(tcg_rd_hi);
11317 tcg_temp_free_i64(tcg_rd);
11318 tcg_temp_free_i64(tcg_rn);
11322 static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u,
11323 bool is_q, int size, int rn, int rd)
11325 /* Implement the pairwise operations from 2-misc:
11326 * SADDLP, UADDLP, SADALP, UADALP.
11327 * These all add pairs of elements in the input to produce a
11328 * double-width result element in the output (possibly accumulating).
11330 bool accum = (opcode == 0x6);
11331 int maxpass = is_q ? 2 : 1;
11333 TCGv_i64 tcg_res[2];
11336 /* 32 + 32 -> 64 op */
11337 TCGMemOp memop = size + (u ? 0 : MO_SIGN);
11339 for (pass = 0; pass < maxpass; pass++) {
11340 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
11341 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
11343 tcg_res[pass] = tcg_temp_new_i64();
11345 read_vec_element(s, tcg_op1, rn, pass * 2, memop);
11346 read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop);
11347 tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
11349 read_vec_element(s, tcg_op1, rd, pass, MO_64);
11350 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
11353 tcg_temp_free_i64(tcg_op1);
11354 tcg_temp_free_i64(tcg_op2);
11357 for (pass = 0; pass < maxpass; pass++) {
11358 TCGv_i64 tcg_op = tcg_temp_new_i64();
11359 NeonGenOneOpFn *genfn;
11360 static NeonGenOneOpFn * const fns[2][2] = {
11361 { gen_helper_neon_addlp_s8, gen_helper_neon_addlp_u8 },
11362 { gen_helper_neon_addlp_s16, gen_helper_neon_addlp_u16 },
11365 genfn = fns[size][u];
11367 tcg_res[pass] = tcg_temp_new_i64();
11369 read_vec_element(s, tcg_op, rn, pass, MO_64);
11370 genfn(tcg_res[pass], tcg_op);
11373 read_vec_element(s, tcg_op, rd, pass, MO_64);
11375 gen_helper_neon_addl_u16(tcg_res[pass],
11376 tcg_res[pass], tcg_op);
11378 gen_helper_neon_addl_u32(tcg_res[pass],
11379 tcg_res[pass], tcg_op);
11382 tcg_temp_free_i64(tcg_op);
11386 tcg_res[1] = tcg_const_i64(0);
11388 for (pass = 0; pass < 2; pass++) {
11389 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11390 tcg_temp_free_i64(tcg_res[pass]);
11394 static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd)
11396 /* Implement SHLL and SHLL2 */
11398 int part = is_q ? 2 : 0;
11399 TCGv_i64 tcg_res[2];
11401 for (pass = 0; pass < 2; pass++) {
11402 static NeonGenWidenFn * const widenfns[3] = {
11403 gen_helper_neon_widen_u8,
11404 gen_helper_neon_widen_u16,
11405 tcg_gen_extu_i32_i64,
11407 NeonGenWidenFn *widenfn = widenfns[size];
11408 TCGv_i32 tcg_op = tcg_temp_new_i32();
11410 read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32);
11411 tcg_res[pass] = tcg_temp_new_i64();
11412 widenfn(tcg_res[pass], tcg_op);
11413 tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size);
11415 tcg_temp_free_i32(tcg_op);
11418 for (pass = 0; pass < 2; pass++) {
11419 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
11420 tcg_temp_free_i64(tcg_res[pass]);
11424 /* AdvSIMD two reg misc
11425 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
11426 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
11427 * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
11428 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
11430 static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
11432 int size = extract32(insn, 22, 2);
11433 int opcode = extract32(insn, 12, 5);
11434 bool u = extract32(insn, 29, 1);
11435 bool is_q = extract32(insn, 30, 1);
11436 int rn = extract32(insn, 5, 5);
11437 int rd = extract32(insn, 0, 5);
11438 bool need_fpstatus = false;
11439 bool need_rmode = false;
11441 TCGv_i32 tcg_rmode;
11442 TCGv_ptr tcg_fpstatus;
11445 case 0x0: /* REV64, REV32 */
11446 case 0x1: /* REV16 */
11447 handle_rev(s, opcode, u, is_q, size, rn, rd);
11449 case 0x5: /* CNT, NOT, RBIT */
11450 if (u && size == 0) {
11453 } else if (u && size == 1) {
11456 } else if (!u && size == 0) {
11460 unallocated_encoding(s);
11462 case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */
11463 case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */
11465 unallocated_encoding(s);
11468 if (!fp_access_check(s)) {
11472 handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd);
11474 case 0x4: /* CLS, CLZ */
11476 unallocated_encoding(s);
11480 case 0x2: /* SADDLP, UADDLP */
11481 case 0x6: /* SADALP, UADALP */
11483 unallocated_encoding(s);
11486 if (!fp_access_check(s)) {
11489 handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd);
11491 case 0x13: /* SHLL, SHLL2 */
11492 if (u == 0 || size == 3) {
11493 unallocated_encoding(s);
11496 if (!fp_access_check(s)) {
11499 handle_shll(s, is_q, size, rn, rd);
11501 case 0xa: /* CMLT */
11503 unallocated_encoding(s);
11507 case 0x8: /* CMGT, CMGE */
11508 case 0x9: /* CMEQ, CMLE */
11509 case 0xb: /* ABS, NEG */
11510 if (size == 3 && !is_q) {
11511 unallocated_encoding(s);
11515 case 0x3: /* SUQADD, USQADD */
11516 if (size == 3 && !is_q) {
11517 unallocated_encoding(s);
11520 if (!fp_access_check(s)) {
11523 handle_2misc_satacc(s, false, u, is_q, size, rn, rd);
11525 case 0x7: /* SQABS, SQNEG */
11526 if (size == 3 && !is_q) {
11527 unallocated_encoding(s);
11532 case 0x16 ... 0x1d:
11535 /* Floating point: U, size[1] and opcode indicate operation;
11536 * size[0] indicates single or double precision.
11538 int is_double = extract32(size, 0, 1);
11539 opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
11540 size = is_double ? 3 : 2;
11542 case 0x2f: /* FABS */
11543 case 0x6f: /* FNEG */
11544 if (size == 3 && !is_q) {
11545 unallocated_encoding(s);
11549 case 0x1d: /* SCVTF */
11550 case 0x5d: /* UCVTF */
11552 bool is_signed = (opcode == 0x1d) ? true : false;
11553 int elements = is_double ? 2 : is_q ? 4 : 2;
11554 if (is_double && !is_q) {
11555 unallocated_encoding(s);
11558 if (!fp_access_check(s)) {
11561 handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size);
11564 case 0x2c: /* FCMGT (zero) */
11565 case 0x2d: /* FCMEQ (zero) */
11566 case 0x2e: /* FCMLT (zero) */
11567 case 0x6c: /* FCMGE (zero) */
11568 case 0x6d: /* FCMLE (zero) */
11569 if (size == 3 && !is_q) {
11570 unallocated_encoding(s);
11573 handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd);
11575 case 0x7f: /* FSQRT */
11576 if (size == 3 && !is_q) {
11577 unallocated_encoding(s);
11581 case 0x1a: /* FCVTNS */
11582 case 0x1b: /* FCVTMS */
11583 case 0x3a: /* FCVTPS */
11584 case 0x3b: /* FCVTZS */
11585 case 0x5a: /* FCVTNU */
11586 case 0x5b: /* FCVTMU */
11587 case 0x7a: /* FCVTPU */
11588 case 0x7b: /* FCVTZU */
11589 need_fpstatus = true;
11591 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
11592 if (size == 3 && !is_q) {
11593 unallocated_encoding(s);
11597 case 0x5c: /* FCVTAU */
11598 case 0x1c: /* FCVTAS */
11599 need_fpstatus = true;
11601 rmode = FPROUNDING_TIEAWAY;
11602 if (size == 3 && !is_q) {
11603 unallocated_encoding(s);
11607 case 0x3c: /* URECPE */
11609 unallocated_encoding(s);
11613 case 0x3d: /* FRECPE */
11614 case 0x7d: /* FRSQRTE */
11615 if (size == 3 && !is_q) {
11616 unallocated_encoding(s);
11619 if (!fp_access_check(s)) {
11622 handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd);
11624 case 0x56: /* FCVTXN, FCVTXN2 */
11626 unallocated_encoding(s);
11630 case 0x16: /* FCVTN, FCVTN2 */
11631 /* handle_2misc_narrow does a 2*size -> size operation, but these
11632 * instructions encode the source size rather than dest size.
11634 if (!fp_access_check(s)) {
11637 handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
11639 case 0x17: /* FCVTL, FCVTL2 */
11640 if (!fp_access_check(s)) {
11643 handle_2misc_widening(s, opcode, is_q, size, rn, rd);
11645 case 0x18: /* FRINTN */
11646 case 0x19: /* FRINTM */
11647 case 0x38: /* FRINTP */
11648 case 0x39: /* FRINTZ */
11650 rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
11652 case 0x59: /* FRINTX */
11653 case 0x79: /* FRINTI */
11654 need_fpstatus = true;
11655 if (size == 3 && !is_q) {
11656 unallocated_encoding(s);
11660 case 0x58: /* FRINTA */
11662 rmode = FPROUNDING_TIEAWAY;
11663 need_fpstatus = true;
11664 if (size == 3 && !is_q) {
11665 unallocated_encoding(s);
11669 case 0x7c: /* URSQRTE */
11671 unallocated_encoding(s);
11674 need_fpstatus = true;
11677 unallocated_encoding(s);
11683 unallocated_encoding(s);
11687 if (!fp_access_check(s)) {
11691 if (need_fpstatus || need_rmode) {
11692 tcg_fpstatus = get_fpstatus_ptr(false);
11694 tcg_fpstatus = NULL;
11697 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
11698 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
11705 if (u && size == 0) { /* NOT */
11706 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_not, 0);
11712 gen_gvec_fn2(s, is_q, rd, rn, tcg_gen_gvec_neg, size);
11719 /* All 64-bit element operations can be shared with scalar 2misc */
11722 /* Coverity claims (size == 3 && !is_q) has been eliminated
11723 * from all paths leading to here.
11725 tcg_debug_assert(is_q);
11726 for (pass = 0; pass < 2; pass++) {
11727 TCGv_i64 tcg_op = tcg_temp_new_i64();
11728 TCGv_i64 tcg_res = tcg_temp_new_i64();
11730 read_vec_element(s, tcg_op, rn, pass, MO_64);
11732 handle_2misc_64(s, opcode, u, tcg_res, tcg_op,
11733 tcg_rmode, tcg_fpstatus);
11735 write_vec_element(s, tcg_res, rd, pass, MO_64);
11737 tcg_temp_free_i64(tcg_res);
11738 tcg_temp_free_i64(tcg_op);
11743 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
11744 TCGv_i32 tcg_op = tcg_temp_new_i32();
11745 TCGv_i32 tcg_res = tcg_temp_new_i32();
11748 read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
11751 /* Special cases for 32 bit elements */
11753 case 0xa: /* CMLT */
11754 /* 32 bit integer comparison against zero, result is
11755 * test ? (2^32 - 1) : 0. We implement via setcond(test)
11758 cond = TCG_COND_LT;
11760 tcg_gen_setcondi_i32(cond, tcg_res, tcg_op, 0);
11761 tcg_gen_neg_i32(tcg_res, tcg_res);
11763 case 0x8: /* CMGT, CMGE */
11764 cond = u ? TCG_COND_GE : TCG_COND_GT;
11766 case 0x9: /* CMEQ, CMLE */
11767 cond = u ? TCG_COND_LE : TCG_COND_EQ;
11769 case 0x4: /* CLS */
11771 tcg_gen_clzi_i32(tcg_res, tcg_op, 32);
11773 tcg_gen_clrsb_i32(tcg_res, tcg_op);
11776 case 0x7: /* SQABS, SQNEG */
11778 gen_helper_neon_qneg_s32(tcg_res, cpu_env, tcg_op);
11780 gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op);
11783 case 0xb: /* ABS, NEG */
11785 tcg_gen_neg_i32(tcg_res, tcg_op);
11787 TCGv_i32 tcg_zero = tcg_const_i32(0);
11788 tcg_gen_neg_i32(tcg_res, tcg_op);
11789 tcg_gen_movcond_i32(TCG_COND_GT, tcg_res, tcg_op,
11790 tcg_zero, tcg_op, tcg_res);
11791 tcg_temp_free_i32(tcg_zero);
11794 case 0x2f: /* FABS */
11795 gen_helper_vfp_abss(tcg_res, tcg_op);
11797 case 0x6f: /* FNEG */
11798 gen_helper_vfp_negs(tcg_res, tcg_op);
11800 case 0x7f: /* FSQRT */
11801 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
11803 case 0x1a: /* FCVTNS */
11804 case 0x1b: /* FCVTMS */
11805 case 0x1c: /* FCVTAS */
11806 case 0x3a: /* FCVTPS */
11807 case 0x3b: /* FCVTZS */
11809 TCGv_i32 tcg_shift = tcg_const_i32(0);
11810 gen_helper_vfp_tosls(tcg_res, tcg_op,
11811 tcg_shift, tcg_fpstatus);
11812 tcg_temp_free_i32(tcg_shift);
11815 case 0x5a: /* FCVTNU */
11816 case 0x5b: /* FCVTMU */
11817 case 0x5c: /* FCVTAU */
11818 case 0x7a: /* FCVTPU */
11819 case 0x7b: /* FCVTZU */
11821 TCGv_i32 tcg_shift = tcg_const_i32(0);
11822 gen_helper_vfp_touls(tcg_res, tcg_op,
11823 tcg_shift, tcg_fpstatus);
11824 tcg_temp_free_i32(tcg_shift);
11827 case 0x18: /* FRINTN */
11828 case 0x19: /* FRINTM */
11829 case 0x38: /* FRINTP */
11830 case 0x39: /* FRINTZ */
11831 case 0x58: /* FRINTA */
11832 case 0x79: /* FRINTI */
11833 gen_helper_rints(tcg_res, tcg_op, tcg_fpstatus);
11835 case 0x59: /* FRINTX */
11836 gen_helper_rints_exact(tcg_res, tcg_op, tcg_fpstatus);
11838 case 0x7c: /* URSQRTE */
11839 gen_helper_rsqrte_u32(tcg_res, tcg_op, tcg_fpstatus);
11842 g_assert_not_reached();
11845 /* Use helpers for 8 and 16 bit elements */
11847 case 0x5: /* CNT, RBIT */
11848 /* For these two insns size is part of the opcode specifier
11849 * (handled earlier); they always operate on byte elements.
11852 gen_helper_neon_rbit_u8(tcg_res, tcg_op);
11854 gen_helper_neon_cnt_u8(tcg_res, tcg_op);
11857 case 0x7: /* SQABS, SQNEG */
11859 NeonGenOneOpEnvFn *genfn;
11860 static NeonGenOneOpEnvFn * const fns[2][2] = {
11861 { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
11862 { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
11864 genfn = fns[size][u];
11865 genfn(tcg_res, cpu_env, tcg_op);
11868 case 0x8: /* CMGT, CMGE */
11869 case 0x9: /* CMEQ, CMLE */
11870 case 0xa: /* CMLT */
11872 static NeonGenTwoOpFn * const fns[3][2] = {
11873 { gen_helper_neon_cgt_s8, gen_helper_neon_cgt_s16 },
11874 { gen_helper_neon_cge_s8, gen_helper_neon_cge_s16 },
11875 { gen_helper_neon_ceq_u8, gen_helper_neon_ceq_u16 },
11877 NeonGenTwoOpFn *genfn;
11880 TCGv_i32 tcg_zero = tcg_const_i32(0);
11882 /* comp = index into [CMGT, CMGE, CMEQ, CMLE, CMLT] */
11883 comp = (opcode - 0x8) * 2 + u;
11884 /* ...but LE, LT are implemented as reverse GE, GT */
11885 reverse = (comp > 2);
11889 genfn = fns[comp][size];
11891 genfn(tcg_res, tcg_zero, tcg_op);
11893 genfn(tcg_res, tcg_op, tcg_zero);
11895 tcg_temp_free_i32(tcg_zero);
11898 case 0xb: /* ABS, NEG */
11900 TCGv_i32 tcg_zero = tcg_const_i32(0);
11902 gen_helper_neon_sub_u16(tcg_res, tcg_zero, tcg_op);
11904 gen_helper_neon_sub_u8(tcg_res, tcg_zero, tcg_op);
11906 tcg_temp_free_i32(tcg_zero);
11909 gen_helper_neon_abs_s16(tcg_res, tcg_op);
11911 gen_helper_neon_abs_s8(tcg_res, tcg_op);
11915 case 0x4: /* CLS, CLZ */
11918 gen_helper_neon_clz_u8(tcg_res, tcg_op);
11920 gen_helper_neon_clz_u16(tcg_res, tcg_op);
11924 gen_helper_neon_cls_s8(tcg_res, tcg_op);
11926 gen_helper_neon_cls_s16(tcg_res, tcg_op);
11931 g_assert_not_reached();
11935 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
11937 tcg_temp_free_i32(tcg_res);
11938 tcg_temp_free_i32(tcg_op);
11941 clear_vec_high(s, is_q, rd);
11944 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
11945 tcg_temp_free_i32(tcg_rmode);
11947 if (need_fpstatus) {
11948 tcg_temp_free_ptr(tcg_fpstatus);
11952 /* AdvSIMD [scalar] two register miscellaneous (FP16)
11954 * 31 30 29 28 27 24 23 22 21 17 16 12 11 10 9 5 4 0
11955 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
11956 * | 0 | Q | U | S | 1 1 1 0 | a | 1 1 1 1 0 0 | opcode | 1 0 | Rn | Rd |
11957 * +---+---+---+---+---------+---+-------------+--------+-----+------+------+
11958 * mask: 1000 1111 0111 1110 0000 1100 0000 0000 0x8f7e 0c00
11959 * val: 0000 1110 0111 1000 0000 1000 0000 0000 0x0e78 0800
11961 * This actually covers two groups where scalar access is governed by
11962 * bit 28. A bunch of the instructions (float to integral) only exist
11963 * in the vector form and are un-allocated for the scalar decode. Also
11964 * in the scalar decode Q is always 1.
11966 static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn)
11968 int fpop, opcode, a, u;
11972 bool only_in_vector = false;
11975 TCGv_i32 tcg_rmode = NULL;
11976 TCGv_ptr tcg_fpstatus = NULL;
11977 bool need_rmode = false;
11978 bool need_fpst = true;
11981 if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
11982 unallocated_encoding(s);
11986 rd = extract32(insn, 0, 5);
11987 rn = extract32(insn, 5, 5);
11989 a = extract32(insn, 23, 1);
11990 u = extract32(insn, 29, 1);
11991 is_scalar = extract32(insn, 28, 1);
11992 is_q = extract32(insn, 30, 1);
11994 opcode = extract32(insn, 12, 5);
11995 fpop = deposit32(opcode, 5, 1, a);
11996 fpop = deposit32(fpop, 6, 1, u);
11998 rd = extract32(insn, 0, 5);
11999 rn = extract32(insn, 5, 5);
12002 case 0x1d: /* SCVTF */
12003 case 0x5d: /* UCVTF */
12010 elements = (is_q ? 8 : 4);
12013 if (!fp_access_check(s)) {
12016 handle_simd_intfp_conv(s, rd, rn, elements, !u, 0, MO_16);
12020 case 0x2c: /* FCMGT (zero) */
12021 case 0x2d: /* FCMEQ (zero) */
12022 case 0x2e: /* FCMLT (zero) */
12023 case 0x6c: /* FCMGE (zero) */
12024 case 0x6d: /* FCMLE (zero) */
12025 handle_2misc_fcmp_zero(s, fpop, is_scalar, 0, is_q, MO_16, rn, rd);
12027 case 0x3d: /* FRECPE */
12028 case 0x3f: /* FRECPX */
12030 case 0x18: /* FRINTN */
12032 only_in_vector = true;
12033 rmode = FPROUNDING_TIEEVEN;
12035 case 0x19: /* FRINTM */
12037 only_in_vector = true;
12038 rmode = FPROUNDING_NEGINF;
12040 case 0x38: /* FRINTP */
12042 only_in_vector = true;
12043 rmode = FPROUNDING_POSINF;
12045 case 0x39: /* FRINTZ */
12047 only_in_vector = true;
12048 rmode = FPROUNDING_ZERO;
12050 case 0x58: /* FRINTA */
12052 only_in_vector = true;
12053 rmode = FPROUNDING_TIEAWAY;
12055 case 0x59: /* FRINTX */
12056 case 0x79: /* FRINTI */
12057 only_in_vector = true;
12058 /* current rounding mode */
12060 case 0x1a: /* FCVTNS */
12062 rmode = FPROUNDING_TIEEVEN;
12064 case 0x1b: /* FCVTMS */
12066 rmode = FPROUNDING_NEGINF;
12068 case 0x1c: /* FCVTAS */
12070 rmode = FPROUNDING_TIEAWAY;
12072 case 0x3a: /* FCVTPS */
12074 rmode = FPROUNDING_POSINF;
12076 case 0x3b: /* FCVTZS */
12078 rmode = FPROUNDING_ZERO;
12080 case 0x5a: /* FCVTNU */
12082 rmode = FPROUNDING_TIEEVEN;
12084 case 0x5b: /* FCVTMU */
12086 rmode = FPROUNDING_NEGINF;
12088 case 0x5c: /* FCVTAU */
12090 rmode = FPROUNDING_TIEAWAY;
12092 case 0x7a: /* FCVTPU */
12094 rmode = FPROUNDING_POSINF;
12096 case 0x7b: /* FCVTZU */
12098 rmode = FPROUNDING_ZERO;
12100 case 0x2f: /* FABS */
12101 case 0x6f: /* FNEG */
12104 case 0x7d: /* FRSQRTE */
12105 case 0x7f: /* FSQRT (vector) */
12108 fprintf(stderr, "%s: insn %#04x fpop %#2x\n", __func__, insn, fpop);
12109 g_assert_not_reached();
12113 /* Check additional constraints for the scalar encoding */
12116 unallocated_encoding(s);
12119 /* FRINTxx is only in the vector form */
12120 if (only_in_vector) {
12121 unallocated_encoding(s);
12126 if (!fp_access_check(s)) {
12130 if (need_rmode || need_fpst) {
12131 tcg_fpstatus = get_fpstatus_ptr(true);
12135 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
12136 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12140 TCGv_i32 tcg_op = tcg_temp_new_i32();
12141 TCGv_i32 tcg_res = tcg_temp_new_i32();
12143 read_vec_element_i32(s, tcg_op, rn, 0, MO_16);
12146 case 0x1a: /* FCVTNS */
12147 case 0x1b: /* FCVTMS */
12148 case 0x1c: /* FCVTAS */
12149 case 0x3a: /* FCVTPS */
12150 case 0x3b: /* FCVTZS */
12151 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
12153 case 0x3d: /* FRECPE */
12154 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
12156 case 0x3f: /* FRECPX */
12157 gen_helper_frecpx_f16(tcg_res, tcg_op, tcg_fpstatus);
12159 case 0x5a: /* FCVTNU */
12160 case 0x5b: /* FCVTMU */
12161 case 0x5c: /* FCVTAU */
12162 case 0x7a: /* FCVTPU */
12163 case 0x7b: /* FCVTZU */
12164 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
12166 case 0x6f: /* FNEG */
12167 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
12169 case 0x7d: /* FRSQRTE */
12170 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
12173 g_assert_not_reached();
12176 /* limit any sign extension going on */
12177 tcg_gen_andi_i32(tcg_res, tcg_res, 0xffff);
12178 write_fp_sreg(s, rd, tcg_res);
12180 tcg_temp_free_i32(tcg_res);
12181 tcg_temp_free_i32(tcg_op);
12183 for (pass = 0; pass < (is_q ? 8 : 4); pass++) {
12184 TCGv_i32 tcg_op = tcg_temp_new_i32();
12185 TCGv_i32 tcg_res = tcg_temp_new_i32();
12187 read_vec_element_i32(s, tcg_op, rn, pass, MO_16);
12190 case 0x1a: /* FCVTNS */
12191 case 0x1b: /* FCVTMS */
12192 case 0x1c: /* FCVTAS */
12193 case 0x3a: /* FCVTPS */
12194 case 0x3b: /* FCVTZS */
12195 gen_helper_advsimd_f16tosinth(tcg_res, tcg_op, tcg_fpstatus);
12197 case 0x3d: /* FRECPE */
12198 gen_helper_recpe_f16(tcg_res, tcg_op, tcg_fpstatus);
12200 case 0x5a: /* FCVTNU */
12201 case 0x5b: /* FCVTMU */
12202 case 0x5c: /* FCVTAU */
12203 case 0x7a: /* FCVTPU */
12204 case 0x7b: /* FCVTZU */
12205 gen_helper_advsimd_f16touinth(tcg_res, tcg_op, tcg_fpstatus);
12207 case 0x18: /* FRINTN */
12208 case 0x19: /* FRINTM */
12209 case 0x38: /* FRINTP */
12210 case 0x39: /* FRINTZ */
12211 case 0x58: /* FRINTA */
12212 case 0x79: /* FRINTI */
12213 gen_helper_advsimd_rinth(tcg_res, tcg_op, tcg_fpstatus);
12215 case 0x59: /* FRINTX */
12216 gen_helper_advsimd_rinth_exact(tcg_res, tcg_op, tcg_fpstatus);
12218 case 0x2f: /* FABS */
12219 tcg_gen_andi_i32(tcg_res, tcg_op, 0x7fff);
12221 case 0x6f: /* FNEG */
12222 tcg_gen_xori_i32(tcg_res, tcg_op, 0x8000);
12224 case 0x7d: /* FRSQRTE */
12225 gen_helper_rsqrte_f16(tcg_res, tcg_op, tcg_fpstatus);
12227 case 0x7f: /* FSQRT */
12228 gen_helper_sqrt_f16(tcg_res, tcg_op, tcg_fpstatus);
12231 g_assert_not_reached();
12234 write_vec_element_i32(s, tcg_res, rd, pass, MO_16);
12236 tcg_temp_free_i32(tcg_res);
12237 tcg_temp_free_i32(tcg_op);
12240 clear_vec_high(s, is_q, rd);
12244 gen_helper_set_rmode(tcg_rmode, tcg_rmode, tcg_fpstatus);
12245 tcg_temp_free_i32(tcg_rmode);
12248 if (tcg_fpstatus) {
12249 tcg_temp_free_ptr(tcg_fpstatus);
12253 /* AdvSIMD scalar x indexed element
12254 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
12255 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12256 * | 0 1 | U | 1 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
12257 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
12258 * AdvSIMD vector x indexed element
12259 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
12260 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12261 * | 0 | Q | U | 0 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
12262 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
12264 static void disas_simd_indexed(DisasContext *s, uint32_t insn)
12266 /* This encoding has two kinds of instruction:
12267 * normal, where we perform elt x idxelt => elt for each
12268 * element in the vector
12269 * long, where we perform elt x idxelt and generate a result of
12270 * double the width of the input element
12271 * The long ops have a 'part' specifier (ie come in INSN, INSN2 pairs).
12273 bool is_scalar = extract32(insn, 28, 1);
12274 bool is_q = extract32(insn, 30, 1);
12275 bool u = extract32(insn, 29, 1);
12276 int size = extract32(insn, 22, 2);
12277 int l = extract32(insn, 21, 1);
12278 int m = extract32(insn, 20, 1);
12279 /* Note that the Rm field here is only 4 bits, not 5 as it usually is */
12280 int rm = extract32(insn, 16, 4);
12281 int opcode = extract32(insn, 12, 4);
12282 int h = extract32(insn, 11, 1);
12283 int rn = extract32(insn, 5, 5);
12284 int rd = extract32(insn, 0, 5);
12285 bool is_long = false;
12287 bool is_fp16 = false;
12291 switch (16 * u + opcode) {
12292 case 0x08: /* MUL */
12293 case 0x10: /* MLA */
12294 case 0x14: /* MLS */
12296 unallocated_encoding(s);
12300 case 0x02: /* SMLAL, SMLAL2 */
12301 case 0x12: /* UMLAL, UMLAL2 */
12302 case 0x06: /* SMLSL, SMLSL2 */
12303 case 0x16: /* UMLSL, UMLSL2 */
12304 case 0x0a: /* SMULL, SMULL2 */
12305 case 0x1a: /* UMULL, UMULL2 */
12307 unallocated_encoding(s);
12312 case 0x03: /* SQDMLAL, SQDMLAL2 */
12313 case 0x07: /* SQDMLSL, SQDMLSL2 */
12314 case 0x0b: /* SQDMULL, SQDMULL2 */
12317 case 0x0c: /* SQDMULH */
12318 case 0x0d: /* SQRDMULH */
12320 case 0x01: /* FMLA */
12321 case 0x05: /* FMLS */
12322 case 0x09: /* FMUL */
12323 case 0x19: /* FMULX */
12326 case 0x1d: /* SQRDMLAH */
12327 case 0x1f: /* SQRDMLSH */
12328 if (!arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
12329 unallocated_encoding(s);
12333 case 0x11: /* FCMLA #0 */
12334 case 0x13: /* FCMLA #90 */
12335 case 0x15: /* FCMLA #180 */
12336 case 0x17: /* FCMLA #270 */
12337 if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)) {
12338 unallocated_encoding(s);
12344 unallocated_encoding(s);
12349 case 1: /* normal fp */
12350 /* convert insn encoded size to TCGMemOp size */
12352 case 0: /* half-precision */
12356 case MO_32: /* single precision */
12357 case MO_64: /* double precision */
12360 unallocated_encoding(s);
12365 case 2: /* complex fp */
12366 /* Each indexable element is a complex pair. */
12371 unallocated_encoding(s);
12379 unallocated_encoding(s);
12384 default: /* integer */
12388 unallocated_encoding(s);
12393 if (is_fp16 && !arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
12394 unallocated_encoding(s);
12398 /* Given TCGMemOp size, adjust register and indexing. */
12401 index = h << 2 | l << 1 | m;
12404 index = h << 1 | l;
12409 unallocated_encoding(s);
12416 g_assert_not_reached();
12419 if (!fp_access_check(s)) {
12424 fpst = get_fpstatus_ptr(is_fp16);
12429 switch (16 * u + opcode) {
12430 case 0x11: /* FCMLA #0 */
12431 case 0x13: /* FCMLA #90 */
12432 case 0x15: /* FCMLA #180 */
12433 case 0x17: /* FCMLA #270 */
12434 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
12435 vec_full_reg_offset(s, rn),
12436 vec_reg_offset(s, rm, index, size), fpst,
12437 is_q ? 16 : 8, vec_full_reg_size(s),
12438 extract32(insn, 13, 2), /* rot */
12440 ? gen_helper_gvec_fcmlas_idx
12441 : gen_helper_gvec_fcmlah_idx);
12442 tcg_temp_free_ptr(fpst);
12447 TCGv_i64 tcg_idx = tcg_temp_new_i64();
12450 assert(is_fp && is_q && !is_long);
12452 read_vec_element(s, tcg_idx, rm, index, MO_64);
12454 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
12455 TCGv_i64 tcg_op = tcg_temp_new_i64();
12456 TCGv_i64 tcg_res = tcg_temp_new_i64();
12458 read_vec_element(s, tcg_op, rn, pass, MO_64);
12460 switch (16 * u + opcode) {
12461 case 0x05: /* FMLS */
12462 /* As usual for ARM, separate negation for fused multiply-add */
12463 gen_helper_vfp_negd(tcg_op, tcg_op);
12465 case 0x01: /* FMLA */
12466 read_vec_element(s, tcg_res, rd, pass, MO_64);
12467 gen_helper_vfp_muladdd(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
12469 case 0x09: /* FMUL */
12470 gen_helper_vfp_muld(tcg_res, tcg_op, tcg_idx, fpst);
12472 case 0x19: /* FMULX */
12473 gen_helper_vfp_mulxd(tcg_res, tcg_op, tcg_idx, fpst);
12476 g_assert_not_reached();
12479 write_vec_element(s, tcg_res, rd, pass, MO_64);
12480 tcg_temp_free_i64(tcg_op);
12481 tcg_temp_free_i64(tcg_res);
12484 tcg_temp_free_i64(tcg_idx);
12485 clear_vec_high(s, !is_scalar, rd);
12486 } else if (!is_long) {
12487 /* 32 bit floating point, or 16 or 32 bit integer.
12488 * For the 16 bit scalar case we use the usual Neon helpers and
12489 * rely on the fact that 0 op 0 == 0 with no side effects.
12491 TCGv_i32 tcg_idx = tcg_temp_new_i32();
12492 int pass, maxpasses;
12497 maxpasses = is_q ? 4 : 2;
12500 read_vec_element_i32(s, tcg_idx, rm, index, size);
12502 if (size == 1 && !is_scalar) {
12503 /* The simplest way to handle the 16x16 indexed ops is to duplicate
12504 * the index into both halves of the 32 bit tcg_idx and then use
12505 * the usual Neon helpers.
12507 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
12510 for (pass = 0; pass < maxpasses; pass++) {
12511 TCGv_i32 tcg_op = tcg_temp_new_i32();
12512 TCGv_i32 tcg_res = tcg_temp_new_i32();
12514 read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32);
12516 switch (16 * u + opcode) {
12517 case 0x08: /* MUL */
12518 case 0x10: /* MLA */
12519 case 0x14: /* MLS */
12521 static NeonGenTwoOpFn * const fns[2][2] = {
12522 { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
12523 { tcg_gen_add_i32, tcg_gen_sub_i32 },
12525 NeonGenTwoOpFn *genfn;
12526 bool is_sub = opcode == 0x4;
12529 gen_helper_neon_mul_u16(tcg_res, tcg_op, tcg_idx);
12531 tcg_gen_mul_i32(tcg_res, tcg_op, tcg_idx);
12533 if (opcode == 0x8) {
12536 read_vec_element_i32(s, tcg_op, rd, pass, MO_32);
12537 genfn = fns[size - 1][is_sub];
12538 genfn(tcg_res, tcg_op, tcg_res);
12541 case 0x05: /* FMLS */
12542 case 0x01: /* FMLA */
12543 read_vec_element_i32(s, tcg_res, rd, pass,
12544 is_scalar ? size : MO_32);
12547 if (opcode == 0x5) {
12548 /* As usual for ARM, separate negation for fused
12550 tcg_gen_xori_i32(tcg_op, tcg_op, 0x80008000);
12553 gen_helper_advsimd_muladdh(tcg_res, tcg_op, tcg_idx,
12556 gen_helper_advsimd_muladd2h(tcg_res, tcg_op, tcg_idx,
12561 if (opcode == 0x5) {
12562 /* As usual for ARM, separate negation for
12563 * fused multiply-add */
12564 tcg_gen_xori_i32(tcg_op, tcg_op, 0x80000000);
12566 gen_helper_vfp_muladds(tcg_res, tcg_op, tcg_idx,
12570 g_assert_not_reached();
12573 case 0x09: /* FMUL */
12577 gen_helper_advsimd_mulh(tcg_res, tcg_op,
12580 gen_helper_advsimd_mul2h(tcg_res, tcg_op,
12585 gen_helper_vfp_muls(tcg_res, tcg_op, tcg_idx, fpst);
12588 g_assert_not_reached();
12591 case 0x19: /* FMULX */
12595 gen_helper_advsimd_mulxh(tcg_res, tcg_op,
12598 gen_helper_advsimd_mulx2h(tcg_res, tcg_op,
12603 gen_helper_vfp_mulxs(tcg_res, tcg_op, tcg_idx, fpst);
12606 g_assert_not_reached();
12609 case 0x0c: /* SQDMULH */
12611 gen_helper_neon_qdmulh_s16(tcg_res, cpu_env,
12614 gen_helper_neon_qdmulh_s32(tcg_res, cpu_env,
12618 case 0x0d: /* SQRDMULH */
12620 gen_helper_neon_qrdmulh_s16(tcg_res, cpu_env,
12623 gen_helper_neon_qrdmulh_s32(tcg_res, cpu_env,
12627 case 0x1d: /* SQRDMLAH */
12628 read_vec_element_i32(s, tcg_res, rd, pass,
12629 is_scalar ? size : MO_32);
12631 gen_helper_neon_qrdmlah_s16(tcg_res, cpu_env,
12632 tcg_op, tcg_idx, tcg_res);
12634 gen_helper_neon_qrdmlah_s32(tcg_res, cpu_env,
12635 tcg_op, tcg_idx, tcg_res);
12638 case 0x1f: /* SQRDMLSH */
12639 read_vec_element_i32(s, tcg_res, rd, pass,
12640 is_scalar ? size : MO_32);
12642 gen_helper_neon_qrdmlsh_s16(tcg_res, cpu_env,
12643 tcg_op, tcg_idx, tcg_res);
12645 gen_helper_neon_qrdmlsh_s32(tcg_res, cpu_env,
12646 tcg_op, tcg_idx, tcg_res);
12650 g_assert_not_reached();
12654 write_fp_sreg(s, rd, tcg_res);
12656 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
12659 tcg_temp_free_i32(tcg_op);
12660 tcg_temp_free_i32(tcg_res);
12663 tcg_temp_free_i32(tcg_idx);
12664 clear_vec_high(s, is_q, rd);
12666 /* long ops: 16x16->32 or 32x32->64 */
12667 TCGv_i64 tcg_res[2];
12669 bool satop = extract32(opcode, 0, 1);
12670 TCGMemOp memop = MO_32;
12677 TCGv_i64 tcg_idx = tcg_temp_new_i64();
12679 read_vec_element(s, tcg_idx, rm, index, memop);
12681 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
12682 TCGv_i64 tcg_op = tcg_temp_new_i64();
12683 TCGv_i64 tcg_passres;
12689 passelt = pass + (is_q * 2);
12692 read_vec_element(s, tcg_op, rn, passelt, memop);
12694 tcg_res[pass] = tcg_temp_new_i64();
12696 if (opcode == 0xa || opcode == 0xb) {
12697 /* Non-accumulating ops */
12698 tcg_passres = tcg_res[pass];
12700 tcg_passres = tcg_temp_new_i64();
12703 tcg_gen_mul_i64(tcg_passres, tcg_op, tcg_idx);
12704 tcg_temp_free_i64(tcg_op);
12707 /* saturating, doubling */
12708 gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
12709 tcg_passres, tcg_passres);
12712 if (opcode == 0xa || opcode == 0xb) {
12716 /* Accumulating op: handle accumulate step */
12717 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12720 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
12721 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
12723 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
12724 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
12726 case 0x7: /* SQDMLSL, SQDMLSL2 */
12727 tcg_gen_neg_i64(tcg_passres, tcg_passres);
12729 case 0x3: /* SQDMLAL, SQDMLAL2 */
12730 gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
12735 g_assert_not_reached();
12737 tcg_temp_free_i64(tcg_passres);
12739 tcg_temp_free_i64(tcg_idx);
12741 clear_vec_high(s, !is_scalar, rd);
12743 TCGv_i32 tcg_idx = tcg_temp_new_i32();
12746 read_vec_element_i32(s, tcg_idx, rm, index, size);
12749 /* The simplest way to handle the 16x16 indexed ops is to
12750 * duplicate the index into both halves of the 32 bit tcg_idx
12751 * and then use the usual Neon helpers.
12753 tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
12756 for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
12757 TCGv_i32 tcg_op = tcg_temp_new_i32();
12758 TCGv_i64 tcg_passres;
12761 read_vec_element_i32(s, tcg_op, rn, pass, size);
12763 read_vec_element_i32(s, tcg_op, rn,
12764 pass + (is_q * 2), MO_32);
12767 tcg_res[pass] = tcg_temp_new_i64();
12769 if (opcode == 0xa || opcode == 0xb) {
12770 /* Non-accumulating ops */
12771 tcg_passres = tcg_res[pass];
12773 tcg_passres = tcg_temp_new_i64();
12776 if (memop & MO_SIGN) {
12777 gen_helper_neon_mull_s16(tcg_passres, tcg_op, tcg_idx);
12779 gen_helper_neon_mull_u16(tcg_passres, tcg_op, tcg_idx);
12782 gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
12783 tcg_passres, tcg_passres);
12785 tcg_temp_free_i32(tcg_op);
12787 if (opcode == 0xa || opcode == 0xb) {
12791 /* Accumulating op: handle accumulate step */
12792 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12795 case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
12796 gen_helper_neon_addl_u32(tcg_res[pass], tcg_res[pass],
12799 case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
12800 gen_helper_neon_subl_u32(tcg_res[pass], tcg_res[pass],
12803 case 0x7: /* SQDMLSL, SQDMLSL2 */
12804 gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
12806 case 0x3: /* SQDMLAL, SQDMLAL2 */
12807 gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
12812 g_assert_not_reached();
12814 tcg_temp_free_i64(tcg_passres);
12816 tcg_temp_free_i32(tcg_idx);
12819 tcg_gen_ext32u_i64(tcg_res[0], tcg_res[0]);
12824 tcg_res[1] = tcg_const_i64(0);
12827 for (pass = 0; pass < 2; pass++) {
12828 write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
12829 tcg_temp_free_i64(tcg_res[pass]);
12834 tcg_temp_free_ptr(fpst);
12839 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
12840 * +-----------------+------+-----------+--------+-----+------+------+
12841 * | 0 1 0 0 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
12842 * +-----------------+------+-----------+--------+-----+------+------+
12844 static void disas_crypto_aes(DisasContext *s, uint32_t insn)
12846 int size = extract32(insn, 22, 2);
12847 int opcode = extract32(insn, 12, 5);
12848 int rn = extract32(insn, 5, 5);
12849 int rd = extract32(insn, 0, 5);
12851 TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
12852 TCGv_i32 tcg_decrypt;
12853 CryptoThreeOpIntFn *genfn;
12855 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
12857 unallocated_encoding(s);
12862 case 0x4: /* AESE */
12864 genfn = gen_helper_crypto_aese;
12866 case 0x6: /* AESMC */
12868 genfn = gen_helper_crypto_aesmc;
12870 case 0x5: /* AESD */
12872 genfn = gen_helper_crypto_aese;
12874 case 0x7: /* AESIMC */
12876 genfn = gen_helper_crypto_aesmc;
12879 unallocated_encoding(s);
12883 if (!fp_access_check(s)) {
12887 tcg_rd_ptr = vec_full_reg_ptr(s, rd);
12888 tcg_rn_ptr = vec_full_reg_ptr(s, rn);
12889 tcg_decrypt = tcg_const_i32(decrypt);
12891 genfn(tcg_rd_ptr, tcg_rn_ptr, tcg_decrypt);
12893 tcg_temp_free_ptr(tcg_rd_ptr);
12894 tcg_temp_free_ptr(tcg_rn_ptr);
12895 tcg_temp_free_i32(tcg_decrypt);
12898 /* Crypto three-reg SHA
12899 * 31 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
12900 * +-----------------+------+---+------+---+--------+-----+------+------+
12901 * | 0 1 0 1 1 1 1 0 | size | 0 | Rm | 0 | opcode | 0 0 | Rn | Rd |
12902 * +-----------------+------+---+------+---+--------+-----+------+------+
12904 static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
12906 int size = extract32(insn, 22, 2);
12907 int opcode = extract32(insn, 12, 3);
12908 int rm = extract32(insn, 16, 5);
12909 int rn = extract32(insn, 5, 5);
12910 int rd = extract32(insn, 0, 5);
12911 CryptoThreeOpFn *genfn;
12912 TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
12913 int feature = ARM_FEATURE_V8_SHA256;
12916 unallocated_encoding(s);
12921 case 0: /* SHA1C */
12922 case 1: /* SHA1P */
12923 case 2: /* SHA1M */
12924 case 3: /* SHA1SU0 */
12926 feature = ARM_FEATURE_V8_SHA1;
12928 case 4: /* SHA256H */
12929 genfn = gen_helper_crypto_sha256h;
12931 case 5: /* SHA256H2 */
12932 genfn = gen_helper_crypto_sha256h2;
12934 case 6: /* SHA256SU1 */
12935 genfn = gen_helper_crypto_sha256su1;
12938 unallocated_encoding(s);
12942 if (!arm_dc_feature(s, feature)) {
12943 unallocated_encoding(s);
12947 if (!fp_access_check(s)) {
12951 tcg_rd_ptr = vec_full_reg_ptr(s, rd);
12952 tcg_rn_ptr = vec_full_reg_ptr(s, rn);
12953 tcg_rm_ptr = vec_full_reg_ptr(s, rm);
12956 genfn(tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr);
12958 TCGv_i32 tcg_opcode = tcg_const_i32(opcode);
12960 gen_helper_crypto_sha1_3reg(tcg_rd_ptr, tcg_rn_ptr,
12961 tcg_rm_ptr, tcg_opcode);
12962 tcg_temp_free_i32(tcg_opcode);
12965 tcg_temp_free_ptr(tcg_rd_ptr);
12966 tcg_temp_free_ptr(tcg_rn_ptr);
12967 tcg_temp_free_ptr(tcg_rm_ptr);
12970 /* Crypto two-reg SHA
12971 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
12972 * +-----------------+------+-----------+--------+-----+------+------+
12973 * | 0 1 0 1 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
12974 * +-----------------+------+-----------+--------+-----+------+------+
12976 static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
12978 int size = extract32(insn, 22, 2);
12979 int opcode = extract32(insn, 12, 5);
12980 int rn = extract32(insn, 5, 5);
12981 int rd = extract32(insn, 0, 5);
12982 CryptoTwoOpFn *genfn;
12984 TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
12987 unallocated_encoding(s);
12992 case 0: /* SHA1H */
12993 feature = ARM_FEATURE_V8_SHA1;
12994 genfn = gen_helper_crypto_sha1h;
12996 case 1: /* SHA1SU1 */
12997 feature = ARM_FEATURE_V8_SHA1;
12998 genfn = gen_helper_crypto_sha1su1;
13000 case 2: /* SHA256SU0 */
13001 feature = ARM_FEATURE_V8_SHA256;
13002 genfn = gen_helper_crypto_sha256su0;
13005 unallocated_encoding(s);
13009 if (!arm_dc_feature(s, feature)) {
13010 unallocated_encoding(s);
13014 if (!fp_access_check(s)) {
13018 tcg_rd_ptr = vec_full_reg_ptr(s, rd);
13019 tcg_rn_ptr = vec_full_reg_ptr(s, rn);
13021 genfn(tcg_rd_ptr, tcg_rn_ptr);
13023 tcg_temp_free_ptr(tcg_rd_ptr);
13024 tcg_temp_free_ptr(tcg_rn_ptr);
13027 /* Crypto three-reg SHA512
13028 * 31 21 20 16 15 14 13 12 11 10 9 5 4 0
13029 * +-----------------------+------+---+---+-----+--------+------+------+
13030 * | 1 1 0 0 1 1 1 0 0 1 1 | Rm | 1 | O | 0 0 | opcode | Rn | Rd |
13031 * +-----------------------+------+---+---+-----+--------+------+------+
13033 static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
13035 int opcode = extract32(insn, 10, 2);
13036 int o = extract32(insn, 14, 1);
13037 int rm = extract32(insn, 16, 5);
13038 int rn = extract32(insn, 5, 5);
13039 int rd = extract32(insn, 0, 5);
13041 CryptoThreeOpFn *genfn;
13045 case 0: /* SHA512H */
13046 feature = ARM_FEATURE_V8_SHA512;
13047 genfn = gen_helper_crypto_sha512h;
13049 case 1: /* SHA512H2 */
13050 feature = ARM_FEATURE_V8_SHA512;
13051 genfn = gen_helper_crypto_sha512h2;
13053 case 2: /* SHA512SU1 */
13054 feature = ARM_FEATURE_V8_SHA512;
13055 genfn = gen_helper_crypto_sha512su1;
13058 feature = ARM_FEATURE_V8_SHA3;
13064 case 0: /* SM3PARTW1 */
13065 feature = ARM_FEATURE_V8_SM3;
13066 genfn = gen_helper_crypto_sm3partw1;
13068 case 1: /* SM3PARTW2 */
13069 feature = ARM_FEATURE_V8_SM3;
13070 genfn = gen_helper_crypto_sm3partw2;
13072 case 2: /* SM4EKEY */
13073 feature = ARM_FEATURE_V8_SM4;
13074 genfn = gen_helper_crypto_sm4ekey;
13077 unallocated_encoding(s);
13082 if (!arm_dc_feature(s, feature)) {
13083 unallocated_encoding(s);
13087 if (!fp_access_check(s)) {
13092 TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
13094 tcg_rd_ptr = vec_full_reg_ptr(s, rd);
13095 tcg_rn_ptr = vec_full_reg_ptr(s, rn);
13096 tcg_rm_ptr = vec_full_reg_ptr(s, rm);
13098 genfn(tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr);
13100 tcg_temp_free_ptr(tcg_rd_ptr);
13101 tcg_temp_free_ptr(tcg_rn_ptr);
13102 tcg_temp_free_ptr(tcg_rm_ptr);
13104 TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
13107 tcg_op1 = tcg_temp_new_i64();
13108 tcg_op2 = tcg_temp_new_i64();
13109 tcg_res[0] = tcg_temp_new_i64();
13110 tcg_res[1] = tcg_temp_new_i64();
13112 for (pass = 0; pass < 2; pass++) {
13113 read_vec_element(s, tcg_op1, rn, pass, MO_64);
13114 read_vec_element(s, tcg_op2, rm, pass, MO_64);
13116 tcg_gen_rotli_i64(tcg_res[pass], tcg_op2, 1);
13117 tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
13119 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
13120 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
13122 tcg_temp_free_i64(tcg_op1);
13123 tcg_temp_free_i64(tcg_op2);
13124 tcg_temp_free_i64(tcg_res[0]);
13125 tcg_temp_free_i64(tcg_res[1]);
13129 /* Crypto two-reg SHA512
13130 * 31 12 11 10 9 5 4 0
13131 * +-----------------------------------------+--------+------+------+
13132 * | 1 1 0 0 1 1 1 0 1 1 0 0 0 0 0 0 1 0 0 0 | opcode | Rn | Rd |
13133 * +-----------------------------------------+--------+------+------+
13135 static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn)
13137 int opcode = extract32(insn, 10, 2);
13138 int rn = extract32(insn, 5, 5);
13139 int rd = extract32(insn, 0, 5);
13140 TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
13142 CryptoTwoOpFn *genfn;
13145 case 0: /* SHA512SU0 */
13146 feature = ARM_FEATURE_V8_SHA512;
13147 genfn = gen_helper_crypto_sha512su0;
13150 feature = ARM_FEATURE_V8_SM4;
13151 genfn = gen_helper_crypto_sm4e;
13154 unallocated_encoding(s);
13158 if (!arm_dc_feature(s, feature)) {
13159 unallocated_encoding(s);
13163 if (!fp_access_check(s)) {
13167 tcg_rd_ptr = vec_full_reg_ptr(s, rd);
13168 tcg_rn_ptr = vec_full_reg_ptr(s, rn);
13170 genfn(tcg_rd_ptr, tcg_rn_ptr);
13172 tcg_temp_free_ptr(tcg_rd_ptr);
13173 tcg_temp_free_ptr(tcg_rn_ptr);
13176 /* Crypto four-register
13177 * 31 23 22 21 20 16 15 14 10 9 5 4 0
13178 * +-------------------+-----+------+---+------+------+------+
13179 * | 1 1 0 0 1 1 1 0 0 | Op0 | Rm | 0 | Ra | Rn | Rd |
13180 * +-------------------+-----+------+---+------+------+------+
13182 static void disas_crypto_four_reg(DisasContext *s, uint32_t insn)
13184 int op0 = extract32(insn, 21, 2);
13185 int rm = extract32(insn, 16, 5);
13186 int ra = extract32(insn, 10, 5);
13187 int rn = extract32(insn, 5, 5);
13188 int rd = extract32(insn, 0, 5);
13194 feature = ARM_FEATURE_V8_SHA3;
13196 case 2: /* SM3SS1 */
13197 feature = ARM_FEATURE_V8_SM3;
13200 unallocated_encoding(s);
13204 if (!arm_dc_feature(s, feature)) {
13205 unallocated_encoding(s);
13209 if (!fp_access_check(s)) {
13214 TCGv_i64 tcg_op1, tcg_op2, tcg_op3, tcg_res[2];
13217 tcg_op1 = tcg_temp_new_i64();
13218 tcg_op2 = tcg_temp_new_i64();
13219 tcg_op3 = tcg_temp_new_i64();
13220 tcg_res[0] = tcg_temp_new_i64();
13221 tcg_res[1] = tcg_temp_new_i64();
13223 for (pass = 0; pass < 2; pass++) {
13224 read_vec_element(s, tcg_op1, rn, pass, MO_64);
13225 read_vec_element(s, tcg_op2, rm, pass, MO_64);
13226 read_vec_element(s, tcg_op3, ra, pass, MO_64);
13230 tcg_gen_xor_i64(tcg_res[pass], tcg_op2, tcg_op3);
13233 tcg_gen_andc_i64(tcg_res[pass], tcg_op2, tcg_op3);
13235 tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
13237 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
13238 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
13240 tcg_temp_free_i64(tcg_op1);
13241 tcg_temp_free_i64(tcg_op2);
13242 tcg_temp_free_i64(tcg_op3);
13243 tcg_temp_free_i64(tcg_res[0]);
13244 tcg_temp_free_i64(tcg_res[1]);
13246 TCGv_i32 tcg_op1, tcg_op2, tcg_op3, tcg_res, tcg_zero;
13248 tcg_op1 = tcg_temp_new_i32();
13249 tcg_op2 = tcg_temp_new_i32();
13250 tcg_op3 = tcg_temp_new_i32();
13251 tcg_res = tcg_temp_new_i32();
13252 tcg_zero = tcg_const_i32(0);
13254 read_vec_element_i32(s, tcg_op1, rn, 3, MO_32);
13255 read_vec_element_i32(s, tcg_op2, rm, 3, MO_32);
13256 read_vec_element_i32(s, tcg_op3, ra, 3, MO_32);
13258 tcg_gen_rotri_i32(tcg_res, tcg_op1, 20);
13259 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op2);
13260 tcg_gen_add_i32(tcg_res, tcg_res, tcg_op3);
13261 tcg_gen_rotri_i32(tcg_res, tcg_res, 25);
13263 write_vec_element_i32(s, tcg_zero, rd, 0, MO_32);
13264 write_vec_element_i32(s, tcg_zero, rd, 1, MO_32);
13265 write_vec_element_i32(s, tcg_zero, rd, 2, MO_32);
13266 write_vec_element_i32(s, tcg_res, rd, 3, MO_32);
13268 tcg_temp_free_i32(tcg_op1);
13269 tcg_temp_free_i32(tcg_op2);
13270 tcg_temp_free_i32(tcg_op3);
13271 tcg_temp_free_i32(tcg_res);
13272 tcg_temp_free_i32(tcg_zero);
13277 * 31 21 20 16 15 10 9 5 4 0
13278 * +-----------------------+------+--------+------+------+
13279 * | 1 1 0 0 1 1 1 0 1 0 0 | Rm | imm6 | Rn | Rd |
13280 * +-----------------------+------+--------+------+------+
13282 static void disas_crypto_xar(DisasContext *s, uint32_t insn)
13284 int rm = extract32(insn, 16, 5);
13285 int imm6 = extract32(insn, 10, 6);
13286 int rn = extract32(insn, 5, 5);
13287 int rd = extract32(insn, 0, 5);
13288 TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
13291 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA3)) {
13292 unallocated_encoding(s);
13296 if (!fp_access_check(s)) {
13300 tcg_op1 = tcg_temp_new_i64();
13301 tcg_op2 = tcg_temp_new_i64();
13302 tcg_res[0] = tcg_temp_new_i64();
13303 tcg_res[1] = tcg_temp_new_i64();
13305 for (pass = 0; pass < 2; pass++) {
13306 read_vec_element(s, tcg_op1, rn, pass, MO_64);
13307 read_vec_element(s, tcg_op2, rm, pass, MO_64);
13309 tcg_gen_xor_i64(tcg_res[pass], tcg_op1, tcg_op2);
13310 tcg_gen_rotri_i64(tcg_res[pass], tcg_res[pass], imm6);
13312 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
13313 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
13315 tcg_temp_free_i64(tcg_op1);
13316 tcg_temp_free_i64(tcg_op2);
13317 tcg_temp_free_i64(tcg_res[0]);
13318 tcg_temp_free_i64(tcg_res[1]);
13321 /* Crypto three-reg imm2
13322 * 31 21 20 16 15 14 13 12 11 10 9 5 4 0
13323 * +-----------------------+------+-----+------+--------+------+------+
13324 * | 1 1 0 0 1 1 1 0 0 1 0 | Rm | 1 0 | imm2 | opcode | Rn | Rd |
13325 * +-----------------------+------+-----+------+--------+------+------+
13327 static void disas_crypto_three_reg_imm2(DisasContext *s, uint32_t insn)
13329 int opcode = extract32(insn, 10, 2);
13330 int imm2 = extract32(insn, 12, 2);
13331 int rm = extract32(insn, 16, 5);
13332 int rn = extract32(insn, 5, 5);
13333 int rd = extract32(insn, 0, 5);
13334 TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
13335 TCGv_i32 tcg_imm2, tcg_opcode;
13337 if (!arm_dc_feature(s, ARM_FEATURE_V8_SM3)) {
13338 unallocated_encoding(s);
13342 if (!fp_access_check(s)) {
13346 tcg_rd_ptr = vec_full_reg_ptr(s, rd);
13347 tcg_rn_ptr = vec_full_reg_ptr(s, rn);
13348 tcg_rm_ptr = vec_full_reg_ptr(s, rm);
13349 tcg_imm2 = tcg_const_i32(imm2);
13350 tcg_opcode = tcg_const_i32(opcode);
13352 gen_helper_crypto_sm3tt(tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr, tcg_imm2,
13355 tcg_temp_free_ptr(tcg_rd_ptr);
13356 tcg_temp_free_ptr(tcg_rn_ptr);
13357 tcg_temp_free_ptr(tcg_rm_ptr);
13358 tcg_temp_free_i32(tcg_imm2);
13359 tcg_temp_free_i32(tcg_opcode);
13362 /* C3.6 Data processing - SIMD, inc Crypto
13364 * As the decode gets a little complex we are using a table based
13365 * approach for this part of the decode.
13367 static const AArch64DecodeTable data_proc_simd[] = {
13368 /* pattern , mask , fn */
13369 { 0x0e200400, 0x9f200400, disas_simd_three_reg_same },
13370 { 0x0e008400, 0x9f208400, disas_simd_three_reg_same_extra },
13371 { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff },
13372 { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
13373 { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes },
13374 { 0x0e000400, 0x9fe08400, disas_simd_copy },
13375 { 0x0f000000, 0x9f000400, disas_simd_indexed }, /* vector indexed */
13376 /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
13377 { 0x0f000400, 0x9ff80400, disas_simd_mod_imm },
13378 { 0x0f000400, 0x9f800400, disas_simd_shift_imm },
13379 { 0x0e000000, 0xbf208c00, disas_simd_tb },
13380 { 0x0e000800, 0xbf208c00, disas_simd_zip_trn },
13381 { 0x2e000000, 0xbf208400, disas_simd_ext },
13382 { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same },
13383 { 0x5e008400, 0xdf208400, disas_simd_scalar_three_reg_same_extra },
13384 { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff },
13385 { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
13386 { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise },
13387 { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy },
13388 { 0x5f000000, 0xdf000400, disas_simd_indexed }, /* scalar indexed */
13389 { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
13390 { 0x4e280800, 0xff3e0c00, disas_crypto_aes },
13391 { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha },
13392 { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha },
13393 { 0xce608000, 0xffe0b000, disas_crypto_three_reg_sha512 },
13394 { 0xcec08000, 0xfffff000, disas_crypto_two_reg_sha512 },
13395 { 0xce000000, 0xff808000, disas_crypto_four_reg },
13396 { 0xce800000, 0xffe00000, disas_crypto_xar },
13397 { 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2 },
13398 { 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16 },
13399 { 0x0e780800, 0x8f7e0c00, disas_simd_two_reg_misc_fp16 },
13400 { 0x5e400400, 0xdf60c400, disas_simd_scalar_three_reg_same_fp16 },
13401 { 0x00000000, 0x00000000, NULL }
13404 static void disas_data_proc_simd(DisasContext *s, uint32_t insn)
13406 /* Note that this is called with all non-FP cases from
13407 * table C3-6 so it must UNDEF for entries not specifically
13408 * allocated to instructions in that table.
13410 AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn);
13414 unallocated_encoding(s);
13418 /* C3.6 Data processing - SIMD and floating point */
13419 static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
13421 if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) {
13422 disas_data_proc_fp(s, insn);
13424 /* SIMD, including crypto */
13425 disas_data_proc_simd(s, insn);
13429 /* C3.1 A64 instruction index by encoding */
13430 static void disas_a64_insn(CPUARMState *env, DisasContext *s)
13434 insn = arm_ldl_code(env, s->pc, s->sctlr_b);
13438 s->fp_access_checked = false;
13440 switch (extract32(insn, 25, 4)) {
13441 case 0x0: case 0x1: case 0x2: case 0x3: /* UNALLOCATED */
13442 unallocated_encoding(s);
13444 case 0x8: case 0x9: /* Data processing - immediate */
13445 disas_data_proc_imm(s, insn);
13447 case 0xa: case 0xb: /* Branch, exception generation and system insns */
13448 disas_b_exc_sys(s, insn);
13453 case 0xe: /* Loads and stores */
13454 disas_ldst(s, insn);
13457 case 0xd: /* Data processing - register */
13458 disas_data_proc_reg(s, insn);
13461 case 0xf: /* Data processing - SIMD and floating point */
13462 disas_data_proc_simd_fp(s, insn);
13465 assert(FALSE); /* all 15 cases should be handled above */
13469 /* if we allocated any temporaries, free them here */
13473 static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
13476 DisasContext *dc = container_of(dcbase, DisasContext, base);
13477 CPUARMState *env = cpu->env_ptr;
13478 ARMCPU *arm_cpu = arm_env_get_cpu(env);
13481 dc->pc = dc->base.pc_first;
13485 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
13486 * there is no secure EL1, so we route exceptions to EL3.
13488 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
13489 !arm_el_is_aa64(env, 3);
13492 dc->be_data = ARM_TBFLAG_BE_DATA(dc->base.tb->flags) ? MO_BE : MO_LE;
13493 dc->condexec_mask = 0;
13494 dc->condexec_cond = 0;
13495 dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(dc->base.tb->flags));
13496 dc->tbi0 = ARM_TBFLAG_TBI0(dc->base.tb->flags);
13497 dc->tbi1 = ARM_TBFLAG_TBI1(dc->base.tb->flags);
13498 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
13499 #if !defined(CONFIG_USER_ONLY)
13500 dc->user = (dc->current_el == 0);
13502 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(dc->base.tb->flags);
13503 dc->sve_excp_el = ARM_TBFLAG_SVEEXC_EL(dc->base.tb->flags);
13504 dc->sve_len = (ARM_TBFLAG_ZCR_LEN(dc->base.tb->flags) + 1) * 16;
13506 dc->vec_stride = 0;
13507 dc->cp_regs = arm_cpu->cp_regs;
13508 dc->features = env->features;
13510 /* Single step state. The code-generation logic here is:
13512 * generate code with no special handling for single-stepping (except
13513 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
13514 * this happens anyway because those changes are all system register or
13516 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
13517 * emit code for one insn
13518 * emit code to clear PSTATE.SS
13519 * emit code to generate software step exception for completed step
13520 * end TB (as usual for having generated an exception)
13521 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
13522 * emit code to generate a software step exception
13525 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(dc->base.tb->flags);
13526 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(dc->base.tb->flags);
13527 dc->is_ldex = false;
13528 dc->ss_same_el = (arm_debug_target_el(env) == dc->current_el);
13530 /* Bound the number of insns to execute to those left on the page. */
13531 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
13533 /* If architectural single step active, limit to 1. */
13534 if (dc->ss_active) {
13537 dc->base.max_insns = MIN(dc->base.max_insns, bound);
13539 init_tmp_a64_array(dc);
13542 static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
13544 tcg_clear_temp_count();
13547 static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
13549 DisasContext *dc = container_of(dcbase, DisasContext, base);
13551 tcg_gen_insn_start(dc->pc, 0, 0);
13552 dc->insn_start = tcg_last_op();
13555 static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
13556 const CPUBreakpoint *bp)
13558 DisasContext *dc = container_of(dcbase, DisasContext, base);
13560 if (bp->flags & BP_CPU) {
13561 gen_a64_set_pc_im(dc->pc);
13562 gen_helper_check_breakpoints(cpu_env);
13563 /* End the TB early; it likely won't be executed */
13564 dc->base.is_jmp = DISAS_TOO_MANY;
13566 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
13567 /* The address covered by the breakpoint must be
13568 included in [tb->pc, tb->pc + tb->size) in order
13569 to for it to be properly cleared -- thus we
13570 increment the PC here so that the logic setting
13571 tb->size below does the right thing. */
13573 dc->base.is_jmp = DISAS_NORETURN;
13579 static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
13581 DisasContext *dc = container_of(dcbase, DisasContext, base);
13582 CPUARMState *env = cpu->env_ptr;
13584 if (dc->ss_active && !dc->pstate_ss) {
13585 /* Singlestep state is Active-pending.
13586 * If we're in this state at the start of a TB then either
13587 * a) we just took an exception to an EL which is being debugged
13588 * and this is the first insn in the exception handler
13589 * b) debug exceptions were masked and we just unmasked them
13590 * without changing EL (eg by clearing PSTATE.D)
13591 * In either case we're going to take a swstep exception in the
13592 * "did not step an insn" case, and so the syndrome ISV and EX
13593 * bits should be zero.
13595 assert(dc->base.num_insns == 1);
13596 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
13597 default_exception_el(dc));
13598 dc->base.is_jmp = DISAS_NORETURN;
13600 disas_a64_insn(env, dc);
13603 dc->base.pc_next = dc->pc;
13604 translator_loop_temp_check(&dc->base);
13607 static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
13609 DisasContext *dc = container_of(dcbase, DisasContext, base);
13611 if (unlikely(dc->base.singlestep_enabled || dc->ss_active)) {
13612 /* Note that this means single stepping WFI doesn't halt the CPU.
13613 * For conditional branch insns this is harmless unreachable code as
13614 * gen_goto_tb() has already handled emitting the debug exception
13615 * (and thus a tb-jump is not possible when singlestepping).
13617 switch (dc->base.is_jmp) {
13619 gen_a64_set_pc_im(dc->pc);
13623 if (dc->base.singlestep_enabled) {
13624 gen_exception_internal(EXCP_DEBUG);
13626 gen_step_complete_exception(dc);
13629 case DISAS_NORETURN:
13633 switch (dc->base.is_jmp) {
13635 case DISAS_TOO_MANY:
13636 gen_goto_tb(dc, 1, dc->pc);
13640 gen_a64_set_pc_im(dc->pc);
13643 tcg_gen_exit_tb(0);
13646 tcg_gen_lookup_and_goto_ptr();
13648 case DISAS_NORETURN:
13652 gen_a64_set_pc_im(dc->pc);
13653 gen_helper_wfe(cpu_env);
13656 gen_a64_set_pc_im(dc->pc);
13657 gen_helper_yield(cpu_env);
13661 /* This is a special case because we don't want to just halt the CPU
13662 * if trying to debug across a WFI.
13664 TCGv_i32 tmp = tcg_const_i32(4);
13666 gen_a64_set_pc_im(dc->pc);
13667 gen_helper_wfi(cpu_env, tmp);
13668 tcg_temp_free_i32(tmp);
13669 /* The helper doesn't necessarily throw an exception, but we
13670 * must go back to the main loop to check for interrupts anyway.
13672 tcg_gen_exit_tb(0);
13678 /* Functions above can change dc->pc, so re-align db->pc_next */
13679 dc->base.pc_next = dc->pc;
13682 static void aarch64_tr_disas_log(const DisasContextBase *dcbase,
13685 DisasContext *dc = container_of(dcbase, DisasContext, base);
13687 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
13688 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
13691 const TranslatorOps aarch64_translator_ops = {
13692 .init_disas_context = aarch64_tr_init_disas_context,
13693 .tb_start = aarch64_tr_tb_start,
13694 .insn_start = aarch64_tr_insn_start,
13695 .breakpoint_check = aarch64_tr_breakpoint_check,
13696 .translate_insn = aarch64_tr_translate_insn,
13697 .tb_stop = aarch64_tr_tb_stop,
13698 .disas_log = aarch64_tr_disas_log,