6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "translate.h"
29 #include "qemu/host-utils.h"
31 #include "exec/gen-icount.h"
37 static TCGv_i64 cpu_X[32];
38 static TCGv_i64 cpu_pc;
39 static TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF;
41 /* Load/store exclusive handling */
42 static TCGv_i64 cpu_exclusive_addr;
43 static TCGv_i64 cpu_exclusive_val;
44 static TCGv_i64 cpu_exclusive_high;
45 #ifdef CONFIG_USER_ONLY
46 static TCGv_i64 cpu_exclusive_test;
47 static TCGv_i32 cpu_exclusive_info;
50 static const char *regnames[] = {
51 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
52 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
53 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
54 "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
58 A64_SHIFT_TYPE_LSL = 0,
59 A64_SHIFT_TYPE_LSR = 1,
60 A64_SHIFT_TYPE_ASR = 2,
61 A64_SHIFT_TYPE_ROR = 3
64 /* Table based decoder typedefs - used when the relevant bits for decode
65 * are too awkwardly scattered across the instruction (eg SIMD).
67 typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn);
69 typedef struct AArch64DecodeTable {
72 AArch64DecodeFn *disas_fn;
75 /* Function prototype for gen_ functions for calling Neon helpers */
76 typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32);
77 typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
79 /* initialize TCG globals. */
80 void a64_translate_init(void)
84 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
85 offsetof(CPUARMState, pc),
87 for (i = 0; i < 32; i++) {
88 cpu_X[i] = tcg_global_mem_new_i64(TCG_AREG0,
89 offsetof(CPUARMState, xregs[i]),
93 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
94 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
95 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
96 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
98 cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
99 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
100 cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
101 offsetof(CPUARMState, exclusive_val), "exclusive_val");
102 cpu_exclusive_high = tcg_global_mem_new_i64(TCG_AREG0,
103 offsetof(CPUARMState, exclusive_high), "exclusive_high");
104 #ifdef CONFIG_USER_ONLY
105 cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
106 offsetof(CPUARMState, exclusive_test), "exclusive_test");
107 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
108 offsetof(CPUARMState, exclusive_info), "exclusive_info");
112 void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
113 fprintf_function cpu_fprintf, int flags)
115 ARMCPU *cpu = ARM_CPU(cs);
116 CPUARMState *env = &cpu->env;
117 uint32_t psr = pstate_read(env);
120 cpu_fprintf(f, "PC=%016"PRIx64" SP=%016"PRIx64"\n",
121 env->pc, env->xregs[31]);
122 for (i = 0; i < 31; i++) {
123 cpu_fprintf(f, "X%02d=%016"PRIx64, i, env->xregs[i]);
125 cpu_fprintf(f, "\n");
130 cpu_fprintf(f, "PSTATE=%08x (flags %c%c%c%c)\n",
132 psr & PSTATE_N ? 'N' : '-',
133 psr & PSTATE_Z ? 'Z' : '-',
134 psr & PSTATE_C ? 'C' : '-',
135 psr & PSTATE_V ? 'V' : '-');
136 cpu_fprintf(f, "\n");
138 if (flags & CPU_DUMP_FPU) {
140 for (i = 0; i < numvfpregs; i += 2) {
141 uint64_t vlo = float64_val(env->vfp.regs[i * 2]);
142 uint64_t vhi = float64_val(env->vfp.regs[(i * 2) + 1]);
143 cpu_fprintf(f, "q%02d=%016" PRIx64 ":%016" PRIx64 " ",
145 vlo = float64_val(env->vfp.regs[(i + 1) * 2]);
146 vhi = float64_val(env->vfp.regs[((i + 1) * 2) + 1]);
147 cpu_fprintf(f, "q%02d=%016" PRIx64 ":%016" PRIx64 "\n",
150 cpu_fprintf(f, "FPCR: %08x FPSR: %08x\n",
151 vfp_get_fpcr(env), vfp_get_fpsr(env));
155 static int get_mem_index(DisasContext *s)
157 #ifdef CONFIG_USER_ONLY
164 void gen_a64_set_pc_im(uint64_t val)
166 tcg_gen_movi_i64(cpu_pc, val);
169 static void gen_exception(int excp)
171 TCGv_i32 tmp = tcg_temp_new_i32();
172 tcg_gen_movi_i32(tmp, excp);
173 gen_helper_exception(cpu_env, tmp);
174 tcg_temp_free_i32(tmp);
177 static void gen_exception_insn(DisasContext *s, int offset, int excp)
179 gen_a64_set_pc_im(s->pc - offset);
181 s->is_jmp = DISAS_EXC;
184 static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
186 /* No direct tb linking with singlestep or deterministic io */
187 if (s->singlestep_enabled || (s->tb->cflags & CF_LAST_IO)) {
191 /* Only link tbs from inside the same guest page */
192 if ((s->tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
199 static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
201 TranslationBlock *tb;
204 if (use_goto_tb(s, n, dest)) {
206 gen_a64_set_pc_im(dest);
207 tcg_gen_exit_tb((tcg_target_long)tb + n);
208 s->is_jmp = DISAS_TB_JUMP;
210 gen_a64_set_pc_im(dest);
211 if (s->singlestep_enabled) {
212 gen_exception(EXCP_DEBUG);
215 s->is_jmp = DISAS_JUMP;
219 static void unallocated_encoding(DisasContext *s)
221 gen_exception_insn(s, 4, EXCP_UDEF);
224 #define unsupported_encoding(s, insn) \
226 qemu_log_mask(LOG_UNIMP, \
227 "%s:%d: unsupported instruction encoding 0x%08x " \
228 "at pc=%016" PRIx64 "\n", \
229 __FILE__, __LINE__, insn, s->pc - 4); \
230 unallocated_encoding(s); \
233 static void init_tmp_a64_array(DisasContext *s)
235 #ifdef CONFIG_DEBUG_TCG
237 for (i = 0; i < ARRAY_SIZE(s->tmp_a64); i++) {
238 TCGV_UNUSED_I64(s->tmp_a64[i]);
241 s->tmp_a64_count = 0;
244 static void free_tmp_a64(DisasContext *s)
247 for (i = 0; i < s->tmp_a64_count; i++) {
248 tcg_temp_free_i64(s->tmp_a64[i]);
250 init_tmp_a64_array(s);
253 static TCGv_i64 new_tmp_a64(DisasContext *s)
255 assert(s->tmp_a64_count < TMP_A64_MAX);
256 return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
259 static TCGv_i64 new_tmp_a64_zero(DisasContext *s)
261 TCGv_i64 t = new_tmp_a64(s);
262 tcg_gen_movi_i64(t, 0);
267 * Register access functions
269 * These functions are used for directly accessing a register in where
270 * changes to the final register value are likely to be made. If you
271 * need to use a register for temporary calculation (e.g. index type
272 * operations) use the read_* form.
274 * B1.2.1 Register mappings
276 * In instruction register encoding 31 can refer to ZR (zero register) or
277 * the SP (stack pointer) depending on context. In QEMU's case we map SP
278 * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
279 * This is the point of the _sp forms.
281 static TCGv_i64 cpu_reg(DisasContext *s, int reg)
284 return new_tmp_a64_zero(s);
290 /* register access for when 31 == SP */
291 static TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
296 /* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
297 * representing the register contents. This TCGv is an auto-freed
298 * temporary so it need not be explicitly freed, and may be modified.
300 static TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
302 TCGv_i64 v = new_tmp_a64(s);
305 tcg_gen_mov_i64(v, cpu_X[reg]);
307 tcg_gen_ext32u_i64(v, cpu_X[reg]);
310 tcg_gen_movi_i64(v, 0);
315 static TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
317 TCGv_i64 v = new_tmp_a64(s);
319 tcg_gen_mov_i64(v, cpu_X[reg]);
321 tcg_gen_ext32u_i64(v, cpu_X[reg]);
326 /* Return the offset into CPUARMState of an element of specified
327 * size, 'element' places in from the least significant end of
328 * the FP/vector register Qn.
330 static inline int vec_reg_offset(int regno, int element, TCGMemOp size)
332 int offs = offsetof(CPUARMState, vfp.regs[regno * 2]);
333 #ifdef HOST_WORDS_BIGENDIAN
334 /* This is complicated slightly because vfp.regs[2n] is
335 * still the low half and vfp.regs[2n+1] the high half
336 * of the 128 bit vector, even on big endian systems.
337 * Calculate the offset assuming a fully bigendian 128 bits,
338 * then XOR to account for the order of the two 64 bit halves.
340 offs += (16 - ((element + 1) * (1 << size)));
343 offs += element * (1 << size);
348 /* Return the offset into CPUARMState of a slice (from
349 * the least significant end) of FP register Qn (ie
351 * (Note that this is not the same mapping as for A32; see cpu.h)
353 static inline int fp_reg_offset(int regno, TCGMemOp size)
355 int offs = offsetof(CPUARMState, vfp.regs[regno * 2]);
356 #ifdef HOST_WORDS_BIGENDIAN
357 offs += (8 - (1 << size));
362 /* Offset of the high half of the 128 bit vector Qn */
363 static inline int fp_reg_hi_offset(int regno)
365 return offsetof(CPUARMState, vfp.regs[regno * 2 + 1]);
368 /* Convenience accessors for reading and writing single and double
369 * FP registers. Writing clears the upper parts of the associated
370 * 128 bit vector register, as required by the architecture.
371 * Note that unlike the GP register accessors, the values returned
372 * by the read functions must be manually freed.
374 static TCGv_i64 read_fp_dreg(DisasContext *s, int reg)
376 TCGv_i64 v = tcg_temp_new_i64();
378 tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(reg, MO_64));
382 static TCGv_i32 read_fp_sreg(DisasContext *s, int reg)
384 TCGv_i32 v = tcg_temp_new_i32();
386 tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(reg, MO_32));
390 static void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
392 TCGv_i64 tcg_zero = tcg_const_i64(0);
394 tcg_gen_st_i64(v, cpu_env, fp_reg_offset(reg, MO_64));
395 tcg_gen_st_i64(tcg_zero, cpu_env, fp_reg_hi_offset(reg));
396 tcg_temp_free_i64(tcg_zero);
399 static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v)
401 TCGv_i64 tmp = tcg_temp_new_i64();
403 tcg_gen_extu_i32_i64(tmp, v);
404 write_fp_dreg(s, reg, tmp);
405 tcg_temp_free_i64(tmp);
408 static TCGv_ptr get_fpstatus_ptr(void)
410 TCGv_ptr statusptr = tcg_temp_new_ptr();
413 /* In A64 all instructions (both FP and Neon) use the FPCR;
414 * there is no equivalent of the A32 Neon "standard FPSCR value"
415 * and all operations use vfp.fp_status.
417 offset = offsetof(CPUARMState, vfp.fp_status);
418 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
422 /* Set ZF and NF based on a 64 bit result. This is alas fiddlier
423 * than the 32 bit equivalent.
425 static inline void gen_set_NZ64(TCGv_i64 result)
427 TCGv_i64 flag = tcg_temp_new_i64();
429 tcg_gen_setcondi_i64(TCG_COND_NE, flag, result, 0);
430 tcg_gen_trunc_i64_i32(cpu_ZF, flag);
431 tcg_gen_shri_i64(flag, result, 32);
432 tcg_gen_trunc_i64_i32(cpu_NF, flag);
433 tcg_temp_free_i64(flag);
436 /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
437 static inline void gen_logic_CC(int sf, TCGv_i64 result)
440 gen_set_NZ64(result);
442 tcg_gen_trunc_i64_i32(cpu_ZF, result);
443 tcg_gen_trunc_i64_i32(cpu_NF, result);
445 tcg_gen_movi_i32(cpu_CF, 0);
446 tcg_gen_movi_i32(cpu_VF, 0);
449 /* dest = T0 + T1; compute C, N, V and Z flags */
450 static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
453 TCGv_i64 result, flag, tmp;
454 result = tcg_temp_new_i64();
455 flag = tcg_temp_new_i64();
456 tmp = tcg_temp_new_i64();
458 tcg_gen_movi_i64(tmp, 0);
459 tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
461 tcg_gen_trunc_i64_i32(cpu_CF, flag);
463 gen_set_NZ64(result);
465 tcg_gen_xor_i64(flag, result, t0);
466 tcg_gen_xor_i64(tmp, t0, t1);
467 tcg_gen_andc_i64(flag, flag, tmp);
468 tcg_temp_free_i64(tmp);
469 tcg_gen_shri_i64(flag, flag, 32);
470 tcg_gen_trunc_i64_i32(cpu_VF, flag);
472 tcg_gen_mov_i64(dest, result);
473 tcg_temp_free_i64(result);
474 tcg_temp_free_i64(flag);
476 /* 32 bit arithmetic */
477 TCGv_i32 t0_32 = tcg_temp_new_i32();
478 TCGv_i32 t1_32 = tcg_temp_new_i32();
479 TCGv_i32 tmp = tcg_temp_new_i32();
481 tcg_gen_movi_i32(tmp, 0);
482 tcg_gen_trunc_i64_i32(t0_32, t0);
483 tcg_gen_trunc_i64_i32(t1_32, t1);
484 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
485 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
486 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
487 tcg_gen_xor_i32(tmp, t0_32, t1_32);
488 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
489 tcg_gen_extu_i32_i64(dest, cpu_NF);
491 tcg_temp_free_i32(tmp);
492 tcg_temp_free_i32(t0_32);
493 tcg_temp_free_i32(t1_32);
497 /* dest = T0 - T1; compute C, N, V and Z flags */
498 static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
501 /* 64 bit arithmetic */
502 TCGv_i64 result, flag, tmp;
504 result = tcg_temp_new_i64();
505 flag = tcg_temp_new_i64();
506 tcg_gen_sub_i64(result, t0, t1);
508 gen_set_NZ64(result);
510 tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
511 tcg_gen_trunc_i64_i32(cpu_CF, flag);
513 tcg_gen_xor_i64(flag, result, t0);
514 tmp = tcg_temp_new_i64();
515 tcg_gen_xor_i64(tmp, t0, t1);
516 tcg_gen_and_i64(flag, flag, tmp);
517 tcg_temp_free_i64(tmp);
518 tcg_gen_shri_i64(flag, flag, 32);
519 tcg_gen_trunc_i64_i32(cpu_VF, flag);
520 tcg_gen_mov_i64(dest, result);
521 tcg_temp_free_i64(flag);
522 tcg_temp_free_i64(result);
524 /* 32 bit arithmetic */
525 TCGv_i32 t0_32 = tcg_temp_new_i32();
526 TCGv_i32 t1_32 = tcg_temp_new_i32();
529 tcg_gen_trunc_i64_i32(t0_32, t0);
530 tcg_gen_trunc_i64_i32(t1_32, t1);
531 tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
532 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
533 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
534 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
535 tmp = tcg_temp_new_i32();
536 tcg_gen_xor_i32(tmp, t0_32, t1_32);
537 tcg_temp_free_i32(t0_32);
538 tcg_temp_free_i32(t1_32);
539 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
540 tcg_temp_free_i32(tmp);
541 tcg_gen_extu_i32_i64(dest, cpu_NF);
545 /* dest = T0 + T1 + CF; do not compute flags. */
546 static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
548 TCGv_i64 flag = tcg_temp_new_i64();
549 tcg_gen_extu_i32_i64(flag, cpu_CF);
550 tcg_gen_add_i64(dest, t0, t1);
551 tcg_gen_add_i64(dest, dest, flag);
552 tcg_temp_free_i64(flag);
555 tcg_gen_ext32u_i64(dest, dest);
559 /* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
560 static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
563 TCGv_i64 result, cf_64, vf_64, tmp;
564 result = tcg_temp_new_i64();
565 cf_64 = tcg_temp_new_i64();
566 vf_64 = tcg_temp_new_i64();
567 tmp = tcg_const_i64(0);
569 tcg_gen_extu_i32_i64(cf_64, cpu_CF);
570 tcg_gen_add2_i64(result, cf_64, t0, tmp, cf_64, tmp);
571 tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, tmp);
572 tcg_gen_trunc_i64_i32(cpu_CF, cf_64);
573 gen_set_NZ64(result);
575 tcg_gen_xor_i64(vf_64, result, t0);
576 tcg_gen_xor_i64(tmp, t0, t1);
577 tcg_gen_andc_i64(vf_64, vf_64, tmp);
578 tcg_gen_shri_i64(vf_64, vf_64, 32);
579 tcg_gen_trunc_i64_i32(cpu_VF, vf_64);
581 tcg_gen_mov_i64(dest, result);
583 tcg_temp_free_i64(tmp);
584 tcg_temp_free_i64(vf_64);
585 tcg_temp_free_i64(cf_64);
586 tcg_temp_free_i64(result);
588 TCGv_i32 t0_32, t1_32, tmp;
589 t0_32 = tcg_temp_new_i32();
590 t1_32 = tcg_temp_new_i32();
591 tmp = tcg_const_i32(0);
593 tcg_gen_trunc_i64_i32(t0_32, t0);
594 tcg_gen_trunc_i64_i32(t1_32, t1);
595 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, cpu_CF, tmp);
596 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, tmp);
598 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
599 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
600 tcg_gen_xor_i32(tmp, t0_32, t1_32);
601 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
602 tcg_gen_extu_i32_i64(dest, cpu_NF);
604 tcg_temp_free_i32(tmp);
605 tcg_temp_free_i32(t1_32);
606 tcg_temp_free_i32(t0_32);
611 * Load/Store generators
615 * Store from GPR register to memory
617 static void do_gpr_st(DisasContext *s, TCGv_i64 source,
618 TCGv_i64 tcg_addr, int size)
621 tcg_gen_qemu_st_i64(source, tcg_addr, get_mem_index(s), MO_TE + size);
625 * Load from memory to GPR register
627 static void do_gpr_ld(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
628 int size, bool is_signed, bool extend)
630 TCGMemOp memop = MO_TE + size;
638 tcg_gen_qemu_ld_i64(dest, tcg_addr, get_mem_index(s), memop);
640 if (extend && is_signed) {
642 tcg_gen_ext32u_i64(dest, dest);
647 * Store from FP register to memory
649 static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
651 /* This writes the bottom N bits of a 128 bit wide vector to memory */
652 TCGv_i64 tmp = tcg_temp_new_i64();
653 tcg_gen_ld_i64(tmp, cpu_env, fp_reg_offset(srcidx, MO_64));
655 tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s), MO_TE + size);
657 TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
658 tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s), MO_TEQ);
659 tcg_gen_qemu_st64(tmp, tcg_addr, get_mem_index(s));
660 tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(srcidx));
661 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
662 tcg_gen_qemu_st_i64(tmp, tcg_hiaddr, get_mem_index(s), MO_TEQ);
663 tcg_temp_free_i64(tcg_hiaddr);
666 tcg_temp_free_i64(tmp);
670 * Load from memory to FP register
672 static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
674 /* This always zero-extends and writes to a full 128 bit wide vector */
675 TCGv_i64 tmplo = tcg_temp_new_i64();
679 TCGMemOp memop = MO_TE + size;
680 tmphi = tcg_const_i64(0);
681 tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
684 tmphi = tcg_temp_new_i64();
685 tcg_hiaddr = tcg_temp_new_i64();
687 tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), MO_TEQ);
688 tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
689 tcg_gen_qemu_ld_i64(tmphi, tcg_hiaddr, get_mem_index(s), MO_TEQ);
690 tcg_temp_free_i64(tcg_hiaddr);
693 tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(destidx, MO_64));
694 tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(destidx));
696 tcg_temp_free_i64(tmplo);
697 tcg_temp_free_i64(tmphi);
701 * Vector load/store helpers.
703 * The principal difference between this and a FP load is that we don't
704 * zero extend as we are filling a partial chunk of the vector register.
705 * These functions don't support 128 bit loads/stores, which would be
706 * normal load/store operations.
708 * The _i32 versions are useful when operating on 32 bit quantities
709 * (eg for floating point single or using Neon helper functions).
712 /* Get value of an element within a vector register */
713 static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
714 int element, TCGMemOp memop)
716 int vect_off = vec_reg_offset(srcidx, element, memop & MO_SIZE);
719 tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
722 tcg_gen_ld16u_i64(tcg_dest, cpu_env, vect_off);
725 tcg_gen_ld32u_i64(tcg_dest, cpu_env, vect_off);
728 tcg_gen_ld8s_i64(tcg_dest, cpu_env, vect_off);
731 tcg_gen_ld16s_i64(tcg_dest, cpu_env, vect_off);
734 tcg_gen_ld32s_i64(tcg_dest, cpu_env, vect_off);
738 tcg_gen_ld_i64(tcg_dest, cpu_env, vect_off);
741 g_assert_not_reached();
745 static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
746 int element, TCGMemOp memop)
748 int vect_off = vec_reg_offset(srcidx, element, memop & MO_SIZE);
751 tcg_gen_ld8u_i32(tcg_dest, cpu_env, vect_off);
754 tcg_gen_ld16u_i32(tcg_dest, cpu_env, vect_off);
757 tcg_gen_ld8s_i32(tcg_dest, cpu_env, vect_off);
760 tcg_gen_ld16s_i32(tcg_dest, cpu_env, vect_off);
764 tcg_gen_ld_i32(tcg_dest, cpu_env, vect_off);
767 g_assert_not_reached();
771 /* Set value of an element within a vector register */
772 static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
773 int element, TCGMemOp memop)
775 int vect_off = vec_reg_offset(destidx, element, memop & MO_SIZE);
778 tcg_gen_st8_i64(tcg_src, cpu_env, vect_off);
781 tcg_gen_st16_i64(tcg_src, cpu_env, vect_off);
784 tcg_gen_st32_i64(tcg_src, cpu_env, vect_off);
787 tcg_gen_st_i64(tcg_src, cpu_env, vect_off);
790 g_assert_not_reached();
794 static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
795 int destidx, int element, TCGMemOp memop)
797 int vect_off = vec_reg_offset(destidx, element, memop & MO_SIZE);
800 tcg_gen_st8_i32(tcg_src, cpu_env, vect_off);
803 tcg_gen_st16_i32(tcg_src, cpu_env, vect_off);
806 tcg_gen_st_i32(tcg_src, cpu_env, vect_off);
809 g_assert_not_reached();
813 /* Clear the high 64 bits of a 128 bit vector (in general non-quad
814 * vector ops all need to do this).
816 static void clear_vec_high(DisasContext *s, int rd)
818 TCGv_i64 tcg_zero = tcg_const_i64(0);
820 write_vec_element(s, tcg_zero, rd, 1, MO_64);
821 tcg_temp_free_i64(tcg_zero);
824 /* Store from vector register to memory */
825 static void do_vec_st(DisasContext *s, int srcidx, int element,
826 TCGv_i64 tcg_addr, int size)
828 TCGMemOp memop = MO_TE + size;
829 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
831 read_vec_element(s, tcg_tmp, srcidx, element, size);
832 tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop);
834 tcg_temp_free_i64(tcg_tmp);
837 /* Load from memory to vector register */
838 static void do_vec_ld(DisasContext *s, int destidx, int element,
839 TCGv_i64 tcg_addr, int size)
841 TCGMemOp memop = MO_TE + size;
842 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
844 tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop);
845 write_vec_element(s, tcg_tmp, destidx, element, size);
847 tcg_temp_free_i64(tcg_tmp);
851 * This utility function is for doing register extension with an
852 * optional shift. You will likely want to pass a temporary for the
853 * destination register. See DecodeRegExtend() in the ARM ARM.
855 static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
856 int option, unsigned int shift)
858 int extsize = extract32(option, 0, 2);
859 bool is_signed = extract32(option, 2, 1);
864 tcg_gen_ext8s_i64(tcg_out, tcg_in);
867 tcg_gen_ext16s_i64(tcg_out, tcg_in);
870 tcg_gen_ext32s_i64(tcg_out, tcg_in);
873 tcg_gen_mov_i64(tcg_out, tcg_in);
879 tcg_gen_ext8u_i64(tcg_out, tcg_in);
882 tcg_gen_ext16u_i64(tcg_out, tcg_in);
885 tcg_gen_ext32u_i64(tcg_out, tcg_in);
888 tcg_gen_mov_i64(tcg_out, tcg_in);
894 tcg_gen_shli_i64(tcg_out, tcg_out, shift);
898 static inline void gen_check_sp_alignment(DisasContext *s)
900 /* The AArch64 architecture mandates that (if enabled via PSTATE
901 * or SCTLR bits) there is a check that SP is 16-aligned on every
902 * SP-relative load or store (with an exception generated if it is not).
903 * In line with general QEMU practice regarding misaligned accesses,
904 * we omit these checks for the sake of guest program performance.
905 * This function is provided as a hook so we can more easily add these
906 * checks in future (possibly as a "favour catching guest program bugs
907 * over speed" user selectable option).
912 * This provides a simple table based table lookup decoder. It is
913 * intended to be used when the relevant bits for decode are too
914 * awkwardly placed and switch/if based logic would be confusing and
915 * deeply nested. Since it's a linear search through the table, tables
916 * should be kept small.
918 * It returns the first handler where insn & mask == pattern, or
919 * NULL if there is no match.
920 * The table is terminated by an empty mask (i.e. 0)
922 static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
925 const AArch64DecodeTable *tptr = table;
928 if ((insn & tptr->mask) == tptr->pattern) {
929 return tptr->disas_fn;
937 * the instruction disassembly implemented here matches
938 * the instruction encoding classifications in chapter 3 (C3)
939 * of the ARM Architecture Reference Manual (DDI0487A_a)
942 /* C3.2.7 Unconditional branch (immediate)
944 * +----+-----------+-------------------------------------+
945 * | op | 0 0 1 0 1 | imm26 |
946 * +----+-----------+-------------------------------------+
948 static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
950 uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4;
952 if (insn & (1 << 31)) {
953 /* C5.6.26 BL Branch with link */
954 tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
957 /* C5.6.20 B Branch / C5.6.26 BL Branch with link */
958 gen_goto_tb(s, 0, addr);
961 /* C3.2.1 Compare & branch (immediate)
962 * 31 30 25 24 23 5 4 0
963 * +----+-------------+----+---------------------+--------+
964 * | sf | 0 1 1 0 1 0 | op | imm19 | Rt |
965 * +----+-------------+----+---------------------+--------+
967 static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
969 unsigned int sf, op, rt;
974 sf = extract32(insn, 31, 1);
975 op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
976 rt = extract32(insn, 0, 5);
977 addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
979 tcg_cmp = read_cpu_reg(s, rt, sf);
980 label_match = gen_new_label();
982 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
983 tcg_cmp, 0, label_match);
985 gen_goto_tb(s, 0, s->pc);
986 gen_set_label(label_match);
987 gen_goto_tb(s, 1, addr);
990 /* C3.2.5 Test & branch (immediate)
991 * 31 30 25 24 23 19 18 5 4 0
992 * +----+-------------+----+-------+-------------+------+
993 * | b5 | 0 1 1 0 1 1 | op | b40 | imm14 | Rt |
994 * +----+-------------+----+-------+-------------+------+
996 static void disas_test_b_imm(DisasContext *s, uint32_t insn)
998 unsigned int bit_pos, op, rt;
1003 bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
1004 op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
1005 addr = s->pc + sextract32(insn, 5, 14) * 4 - 4;
1006 rt = extract32(insn, 0, 5);
1008 tcg_cmp = tcg_temp_new_i64();
1009 tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
1010 label_match = gen_new_label();
1011 tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
1012 tcg_cmp, 0, label_match);
1013 tcg_temp_free_i64(tcg_cmp);
1014 gen_goto_tb(s, 0, s->pc);
1015 gen_set_label(label_match);
1016 gen_goto_tb(s, 1, addr);
1019 /* C3.2.2 / C5.6.19 Conditional branch (immediate)
1020 * 31 25 24 23 5 4 3 0
1021 * +---------------+----+---------------------+----+------+
1022 * | 0 1 0 1 0 1 0 | o1 | imm19 | o0 | cond |
1023 * +---------------+----+---------------------+----+------+
1025 static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
1030 if ((insn & (1 << 4)) || (insn & (1 << 24))) {
1031 unallocated_encoding(s);
1034 addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
1035 cond = extract32(insn, 0, 4);
1038 /* genuinely conditional branches */
1039 int label_match = gen_new_label();
1040 arm_gen_test_cc(cond, label_match);
1041 gen_goto_tb(s, 0, s->pc);
1042 gen_set_label(label_match);
1043 gen_goto_tb(s, 1, addr);
1045 /* 0xe and 0xf are both "always" conditions */
1046 gen_goto_tb(s, 0, addr);
1051 static void handle_hint(DisasContext *s, uint32_t insn,
1052 unsigned int op1, unsigned int op2, unsigned int crm)
1054 unsigned int selector = crm << 3 | op2;
1057 unallocated_encoding(s);
1069 /* we treat all as NOP at least for now */
1072 /* default specified as NOP equivalent */
1077 static void gen_clrex(DisasContext *s, uint32_t insn)
1079 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
1082 /* CLREX, DSB, DMB, ISB */
1083 static void handle_sync(DisasContext *s, uint32_t insn,
1084 unsigned int op1, unsigned int op2, unsigned int crm)
1087 unallocated_encoding(s);
1098 /* We don't emulate caches so barriers are no-ops */
1101 unallocated_encoding(s);
1106 /* C5.6.130 MSR (immediate) - move immediate to processor state field */
1107 static void handle_msr_i(DisasContext *s, uint32_t insn,
1108 unsigned int op1, unsigned int op2, unsigned int crm)
1110 unsupported_encoding(s, insn);
1113 static void gen_get_nzcv(TCGv_i64 tcg_rt)
1115 TCGv_i32 tmp = tcg_temp_new_i32();
1116 TCGv_i32 nzcv = tcg_temp_new_i32();
1118 /* build bit 31, N */
1119 tcg_gen_andi_i32(nzcv, cpu_NF, (1 << 31));
1120 /* build bit 30, Z */
1121 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
1122 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
1123 /* build bit 29, C */
1124 tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
1125 /* build bit 28, V */
1126 tcg_gen_shri_i32(tmp, cpu_VF, 31);
1127 tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
1128 /* generate result */
1129 tcg_gen_extu_i32_i64(tcg_rt, nzcv);
1131 tcg_temp_free_i32(nzcv);
1132 tcg_temp_free_i32(tmp);
1135 static void gen_set_nzcv(TCGv_i64 tcg_rt)
1138 TCGv_i32 nzcv = tcg_temp_new_i32();
1140 /* take NZCV from R[t] */
1141 tcg_gen_trunc_i64_i32(nzcv, tcg_rt);
1144 tcg_gen_andi_i32(cpu_NF, nzcv, (1 << 31));
1146 tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
1147 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
1149 tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
1150 tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
1152 tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
1153 tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
1154 tcg_temp_free_i32(nzcv);
1157 /* C5.6.129 MRS - move from system register
1158 * C5.6.131 MSR (register) - move to system register
1161 * These are all essentially the same insn in 'read' and 'write'
1162 * versions, with varying op0 fields.
1164 static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
1165 unsigned int op0, unsigned int op1, unsigned int op2,
1166 unsigned int crn, unsigned int crm, unsigned int rt)
1168 const ARMCPRegInfo *ri;
1171 ri = get_arm_cp_reginfo(s->cp_regs,
1172 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
1173 crn, crm, op0, op1, op2));
1176 /* Unknown register */
1177 unallocated_encoding(s);
1181 /* Check access permissions */
1182 if (!cp_access_ok(s->current_pl, ri, isread)) {
1183 unallocated_encoding(s);
1187 /* Handle special cases first */
1188 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
1192 tcg_rt = cpu_reg(s, rt);
1194 gen_get_nzcv(tcg_rt);
1196 gen_set_nzcv(tcg_rt);
1203 if (use_icount && (ri->type & ARM_CP_IO)) {
1207 tcg_rt = cpu_reg(s, rt);
1210 if (ri->type & ARM_CP_CONST) {
1211 tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
1212 } else if (ri->readfn) {
1214 gen_a64_set_pc_im(s->pc - 4);
1215 tmpptr = tcg_const_ptr(ri);
1216 gen_helper_get_cp_reg64(tcg_rt, cpu_env, tmpptr);
1217 tcg_temp_free_ptr(tmpptr);
1219 tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
1222 if (ri->type & ARM_CP_CONST) {
1223 /* If not forbidden by access permissions, treat as WI */
1225 } else if (ri->writefn) {
1227 gen_a64_set_pc_im(s->pc - 4);
1228 tmpptr = tcg_const_ptr(ri);
1229 gen_helper_set_cp_reg64(cpu_env, tmpptr, tcg_rt);
1230 tcg_temp_free_ptr(tmpptr);
1232 tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
1236 if (use_icount && (ri->type & ARM_CP_IO)) {
1237 /* I/O operations must end the TB here (whether read or write) */
1239 s->is_jmp = DISAS_UPDATE;
1240 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
1241 /* We default to ending the TB on a coprocessor register write,
1242 * but allow this to be suppressed by the register definition
1243 * (usually only necessary to work around guest bugs).
1245 s->is_jmp = DISAS_UPDATE;
1250 * 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0
1251 * +---------------------+---+-----+-----+-------+-------+-----+------+
1252 * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt |
1253 * +---------------------+---+-----+-----+-------+-------+-----+------+
1255 static void disas_system(DisasContext *s, uint32_t insn)
1257 unsigned int l, op0, op1, crn, crm, op2, rt;
1258 l = extract32(insn, 21, 1);
1259 op0 = extract32(insn, 19, 2);
1260 op1 = extract32(insn, 16, 3);
1261 crn = extract32(insn, 12, 4);
1262 crm = extract32(insn, 8, 4);
1263 op2 = extract32(insn, 5, 3);
1264 rt = extract32(insn, 0, 5);
1267 if (l || rt != 31) {
1268 unallocated_encoding(s);
1272 case 2: /* C5.6.68 HINT */
1273 handle_hint(s, insn, op1, op2, crm);
1275 case 3: /* CLREX, DSB, DMB, ISB */
1276 handle_sync(s, insn, op1, op2, crm);
1278 case 4: /* C5.6.130 MSR (immediate) */
1279 handle_msr_i(s, insn, op1, op2, crm);
1282 unallocated_encoding(s);
1287 handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
1290 /* C3.2.3 Exception generation
1292 * 31 24 23 21 20 5 4 2 1 0
1293 * +-----------------+-----+------------------------+-----+----+
1294 * | 1 1 0 1 0 1 0 0 | opc | imm16 | op2 | LL |
1295 * +-----------------------+------------------------+----------+
1297 static void disas_exc(DisasContext *s, uint32_t insn)
1299 int opc = extract32(insn, 21, 3);
1300 int op2_ll = extract32(insn, 0, 5);
1304 /* SVC, HVC, SMC; since we don't support the Virtualization
1305 * or TrustZone extensions these all UNDEF except SVC.
1308 unallocated_encoding(s);
1311 gen_exception_insn(s, 0, EXCP_SWI);
1315 unallocated_encoding(s);
1319 gen_exception_insn(s, 0, EXCP_BKPT);
1323 unallocated_encoding(s);
1327 unsupported_encoding(s, insn);
1330 if (op2_ll < 1 || op2_ll > 3) {
1331 unallocated_encoding(s);
1334 /* DCPS1, DCPS2, DCPS3 */
1335 unsupported_encoding(s, insn);
1338 unallocated_encoding(s);
1343 /* C3.2.7 Unconditional branch (register)
1344 * 31 25 24 21 20 16 15 10 9 5 4 0
1345 * +---------------+-------+-------+-------+------+-------+
1346 * | 1 1 0 1 0 1 1 | opc | op2 | op3 | Rn | op4 |
1347 * +---------------+-------+-------+-------+------+-------+
1349 static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
1351 unsigned int opc, op2, op3, rn, op4;
1353 opc = extract32(insn, 21, 4);
1354 op2 = extract32(insn, 16, 5);
1355 op3 = extract32(insn, 10, 6);
1356 rn = extract32(insn, 5, 5);
1357 op4 = extract32(insn, 0, 5);
1359 if (op4 != 0x0 || op3 != 0x0 || op2 != 0x1f) {
1360 unallocated_encoding(s);
1369 tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
1374 unallocated_encoding(s);
1376 unsupported_encoding(s, insn);
1380 unallocated_encoding(s);
1384 tcg_gen_mov_i64(cpu_pc, cpu_reg(s, rn));
1385 s->is_jmp = DISAS_JUMP;
1388 /* C3.2 Branches, exception generating and system instructions */
1389 static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
1391 switch (extract32(insn, 25, 7)) {
1392 case 0x0a: case 0x0b:
1393 case 0x4a: case 0x4b: /* Unconditional branch (immediate) */
1394 disas_uncond_b_imm(s, insn);
1396 case 0x1a: case 0x5a: /* Compare & branch (immediate) */
1397 disas_comp_b_imm(s, insn);
1399 case 0x1b: case 0x5b: /* Test & branch (immediate) */
1400 disas_test_b_imm(s, insn);
1402 case 0x2a: /* Conditional branch (immediate) */
1403 disas_cond_b_imm(s, insn);
1405 case 0x6a: /* Exception generation / System */
1406 if (insn & (1 << 24)) {
1407 disas_system(s, insn);
1412 case 0x6b: /* Unconditional branch (register) */
1413 disas_uncond_b_reg(s, insn);
1416 unallocated_encoding(s);
1422 * Load/Store exclusive instructions are implemented by remembering
1423 * the value/address loaded, and seeing if these are the same
1424 * when the store is performed. This is not actually the architecturally
1425 * mandated semantics, but it works for typical guest code sequences
1426 * and avoids having to monitor regular stores.
1428 * In system emulation mode only one CPU will be running at once, so
1429 * this sequence is effectively atomic. In user emulation mode we
1430 * throw an exception and handle the atomic operation elsewhere.
1432 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
1433 TCGv_i64 addr, int size, bool is_pair)
1435 TCGv_i64 tmp = tcg_temp_new_i64();
1436 TCGMemOp memop = MO_TE + size;
1438 g_assert(size <= 3);
1439 tcg_gen_qemu_ld_i64(tmp, addr, get_mem_index(s), memop);
1442 TCGv_i64 addr2 = tcg_temp_new_i64();
1443 TCGv_i64 hitmp = tcg_temp_new_i64();
1445 g_assert(size >= 2);
1446 tcg_gen_addi_i64(addr2, addr, 1 << size);
1447 tcg_gen_qemu_ld_i64(hitmp, addr2, get_mem_index(s), memop);
1448 tcg_temp_free_i64(addr2);
1449 tcg_gen_mov_i64(cpu_exclusive_high, hitmp);
1450 tcg_gen_mov_i64(cpu_reg(s, rt2), hitmp);
1451 tcg_temp_free_i64(hitmp);
1454 tcg_gen_mov_i64(cpu_exclusive_val, tmp);
1455 tcg_gen_mov_i64(cpu_reg(s, rt), tmp);
1457 tcg_temp_free_i64(tmp);
1458 tcg_gen_mov_i64(cpu_exclusive_addr, addr);
1461 #ifdef CONFIG_USER_ONLY
1462 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
1463 TCGv_i64 addr, int size, int is_pair)
1465 tcg_gen_mov_i64(cpu_exclusive_test, addr);
1466 tcg_gen_movi_i32(cpu_exclusive_info,
1467 size | is_pair << 2 | (rd << 4) | (rt << 9) | (rt2 << 14));
1468 gen_exception_insn(s, 4, EXCP_STREX);
1471 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
1472 TCGv_i64 addr, int size, int is_pair)
1474 qemu_log_mask(LOG_UNIMP,
1475 "%s:%d: system mode store_exclusive unsupported "
1476 "at pc=%016" PRIx64 "\n",
1477 __FILE__, __LINE__, s->pc - 4);
1481 /* C3.3.6 Load/store exclusive
1483 * 31 30 29 24 23 22 21 20 16 15 14 10 9 5 4 0
1484 * +-----+-------------+----+---+----+------+----+-------+------+------+
1485 * | sz | 0 0 1 0 0 0 | o2 | L | o1 | Rs | o0 | Rt2 | Rn | Rt |
1486 * +-----+-------------+----+---+----+------+----+-------+------+------+
1488 * sz: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64 bit
1489 * L: 0 -> store, 1 -> load
1490 * o2: 0 -> exclusive, 1 -> not
1491 * o1: 0 -> single register, 1 -> register pair
1492 * o0: 1 -> load-acquire/store-release, 0 -> not
1494 * o0 == 0 AND o2 == 1 is un-allocated
1495 * o1 == 1 is un-allocated except for 32 and 64 bit sizes
1497 static void disas_ldst_excl(DisasContext *s, uint32_t insn)
1499 int rt = extract32(insn, 0, 5);
1500 int rn = extract32(insn, 5, 5);
1501 int rt2 = extract32(insn, 10, 5);
1502 int is_lasr = extract32(insn, 15, 1);
1503 int rs = extract32(insn, 16, 5);
1504 int is_pair = extract32(insn, 21, 1);
1505 int is_store = !extract32(insn, 22, 1);
1506 int is_excl = !extract32(insn, 23, 1);
1507 int size = extract32(insn, 30, 2);
1510 if ((!is_excl && !is_lasr) ||
1511 (is_pair && size < 2)) {
1512 unallocated_encoding(s);
1517 gen_check_sp_alignment(s);
1519 tcg_addr = read_cpu_reg_sp(s, rn, 1);
1521 /* Note that since TCG is single threaded load-acquire/store-release
1522 * semantics require no extra if (is_lasr) { ... } handling.
1527 gen_load_exclusive(s, rt, rt2, tcg_addr, size, is_pair);
1529 gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, is_pair);
1532 TCGv_i64 tcg_rt = cpu_reg(s, rt);
1534 do_gpr_st(s, tcg_rt, tcg_addr, size);
1536 do_gpr_ld(s, tcg_rt, tcg_addr, size, false, false);
1539 TCGv_i64 tcg_rt2 = cpu_reg(s, rt);
1540 tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
1542 do_gpr_st(s, tcg_rt2, tcg_addr, size);
1544 do_gpr_ld(s, tcg_rt2, tcg_addr, size, false, false);
1551 * C3.3.5 Load register (literal)
1553 * 31 30 29 27 26 25 24 23 5 4 0
1554 * +-----+-------+---+-----+-------------------+-------+
1555 * | opc | 0 1 1 | V | 0 0 | imm19 | Rt |
1556 * +-----+-------+---+-----+-------------------+-------+
1558 * V: 1 -> vector (simd/fp)
1559 * opc (non-vector): 00 -> 32 bit, 01 -> 64 bit,
1560 * 10-> 32 bit signed, 11 -> prefetch
1561 * opc (vector): 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit (11 unallocated)
1563 static void disas_ld_lit(DisasContext *s, uint32_t insn)
1565 int rt = extract32(insn, 0, 5);
1566 int64_t imm = sextract32(insn, 5, 19) << 2;
1567 bool is_vector = extract32(insn, 26, 1);
1568 int opc = extract32(insn, 30, 2);
1569 bool is_signed = false;
1571 TCGv_i64 tcg_rt, tcg_addr;
1575 unallocated_encoding(s);
1581 /* PRFM (literal) : prefetch */
1584 size = 2 + extract32(opc, 0, 1);
1585 is_signed = extract32(opc, 1, 1);
1588 tcg_rt = cpu_reg(s, rt);
1590 tcg_addr = tcg_const_i64((s->pc - 4) + imm);
1592 do_fp_ld(s, rt, tcg_addr, size);
1594 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false);
1596 tcg_temp_free_i64(tcg_addr);
1600 * C5.6.80 LDNP (Load Pair - non-temporal hint)
1601 * C5.6.81 LDP (Load Pair - non vector)
1602 * C5.6.82 LDPSW (Load Pair Signed Word - non vector)
1603 * C5.6.176 STNP (Store Pair - non-temporal hint)
1604 * C5.6.177 STP (Store Pair - non vector)
1605 * C6.3.165 LDNP (Load Pair of SIMD&FP - non-temporal hint)
1606 * C6.3.165 LDP (Load Pair of SIMD&FP)
1607 * C6.3.284 STNP (Store Pair of SIMD&FP - non-temporal hint)
1608 * C6.3.284 STP (Store Pair of SIMD&FP)
1610 * 31 30 29 27 26 25 24 23 22 21 15 14 10 9 5 4 0
1611 * +-----+-------+---+---+-------+---+-----------------------------+
1612 * | opc | 1 0 1 | V | 0 | index | L | imm7 | Rt2 | Rn | Rt |
1613 * +-----+-------+---+---+-------+---+-------+-------+------+------+
1615 * opc: LDP/STP/LDNP/STNP 00 -> 32 bit, 10 -> 64 bit
1617 * LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit
1618 * V: 0 -> GPR, 1 -> Vector
1619 * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index,
1620 * 10 -> signed offset, 11 -> pre-index
1621 * L: 0 -> Store 1 -> Load
1623 * Rt, Rt2 = GPR or SIMD registers to be stored
1624 * Rn = general purpose register containing address
1625 * imm7 = signed offset (multiple of 4 or 8 depending on size)
1627 static void disas_ldst_pair(DisasContext *s, uint32_t insn)
1629 int rt = extract32(insn, 0, 5);
1630 int rn = extract32(insn, 5, 5);
1631 int rt2 = extract32(insn, 10, 5);
1632 int64_t offset = sextract32(insn, 15, 7);
1633 int index = extract32(insn, 23, 2);
1634 bool is_vector = extract32(insn, 26, 1);
1635 bool is_load = extract32(insn, 22, 1);
1636 int opc = extract32(insn, 30, 2);
1638 bool is_signed = false;
1639 bool postindex = false;
1642 TCGv_i64 tcg_addr; /* calculated address */
1646 unallocated_encoding(s);
1653 size = 2 + extract32(opc, 1, 1);
1654 is_signed = extract32(opc, 0, 1);
1655 if (!is_load && is_signed) {
1656 unallocated_encoding(s);
1662 case 1: /* post-index */
1667 /* signed offset with "non-temporal" hint. Since we don't emulate
1668 * caches we don't care about hints to the cache system about
1669 * data access patterns, and handle this identically to plain
1673 /* There is no non-temporal-hint version of LDPSW */
1674 unallocated_encoding(s);
1679 case 2: /* signed offset, rn not updated */
1682 case 3: /* pre-index */
1691 gen_check_sp_alignment(s);
1694 tcg_addr = read_cpu_reg_sp(s, rn, 1);
1697 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
1702 do_fp_ld(s, rt, tcg_addr, size);
1704 do_fp_st(s, rt, tcg_addr, size);
1707 TCGv_i64 tcg_rt = cpu_reg(s, rt);
1709 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false);
1711 do_gpr_st(s, tcg_rt, tcg_addr, size);
1714 tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
1717 do_fp_ld(s, rt2, tcg_addr, size);
1719 do_fp_st(s, rt2, tcg_addr, size);
1722 TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
1724 do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false);
1726 do_gpr_st(s, tcg_rt2, tcg_addr, size);
1732 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset - (1 << size));
1734 tcg_gen_subi_i64(tcg_addr, tcg_addr, 1 << size);
1736 tcg_gen_mov_i64(cpu_reg_sp(s, rn), tcg_addr);
1741 * C3.3.8 Load/store (immediate post-indexed)
1742 * C3.3.9 Load/store (immediate pre-indexed)
1743 * C3.3.12 Load/store (unscaled immediate)
1745 * 31 30 29 27 26 25 24 23 22 21 20 12 11 10 9 5 4 0
1746 * +----+-------+---+-----+-----+---+--------+-----+------+------+
1747 * |size| 1 1 1 | V | 0 0 | opc | 0 | imm9 | idx | Rn | Rt |
1748 * +----+-------+---+-----+-----+---+--------+-----+------+------+
1750 * idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback)
1751 * V = 0 -> non-vector
1752 * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
1753 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
1755 static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn)
1757 int rt = extract32(insn, 0, 5);
1758 int rn = extract32(insn, 5, 5);
1759 int imm9 = sextract32(insn, 12, 9);
1760 int opc = extract32(insn, 22, 2);
1761 int size = extract32(insn, 30, 2);
1762 int idx = extract32(insn, 10, 2);
1763 bool is_signed = false;
1764 bool is_store = false;
1765 bool is_extended = false;
1766 bool is_vector = extract32(insn, 26, 1);
1773 size |= (opc & 2) << 1;
1775 unallocated_encoding(s);
1778 is_store = ((opc & 1) == 0);
1780 if (size == 3 && opc == 2) {
1781 /* PRFM - prefetch */
1784 if (opc == 3 && size > 1) {
1785 unallocated_encoding(s);
1788 is_store = (opc == 0);
1789 is_signed = opc & (1<<1);
1790 is_extended = (size < 3) && (opc & 1);
1812 gen_check_sp_alignment(s);
1814 tcg_addr = read_cpu_reg_sp(s, rn, 1);
1817 tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
1822 do_fp_st(s, rt, tcg_addr, size);
1824 do_fp_ld(s, rt, tcg_addr, size);
1827 TCGv_i64 tcg_rt = cpu_reg(s, rt);
1829 do_gpr_st(s, tcg_rt, tcg_addr, size);
1831 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended);
1836 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
1838 tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
1840 tcg_gen_mov_i64(tcg_rn, tcg_addr);
1845 * C3.3.10 Load/store (register offset)
1847 * 31 30 29 27 26 25 24 23 22 21 20 16 15 13 12 11 10 9 5 4 0
1848 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
1849 * |size| 1 1 1 | V | 0 0 | opc | 1 | Rm | opt | S| 1 0 | Rn | Rt |
1850 * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
1853 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
1854 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
1856 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
1857 * opc<0>: 0 -> store, 1 -> load
1858 * V: 1 -> vector/simd
1859 * opt: extend encoding (see DecodeRegExtend)
1860 * S: if S=1 then scale (essentially index by sizeof(size))
1861 * Rt: register to transfer into/out of
1862 * Rn: address register or SP for base
1863 * Rm: offset register or ZR for offset
1865 static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn)
1867 int rt = extract32(insn, 0, 5);
1868 int rn = extract32(insn, 5, 5);
1869 int shift = extract32(insn, 12, 1);
1870 int rm = extract32(insn, 16, 5);
1871 int opc = extract32(insn, 22, 2);
1872 int opt = extract32(insn, 13, 3);
1873 int size = extract32(insn, 30, 2);
1874 bool is_signed = false;
1875 bool is_store = false;
1876 bool is_extended = false;
1877 bool is_vector = extract32(insn, 26, 1);
1882 if (extract32(opt, 1, 1) == 0) {
1883 unallocated_encoding(s);
1888 size |= (opc & 2) << 1;
1890 unallocated_encoding(s);
1893 is_store = !extract32(opc, 0, 1);
1895 if (size == 3 && opc == 2) {
1896 /* PRFM - prefetch */
1899 if (opc == 3 && size > 1) {
1900 unallocated_encoding(s);
1903 is_store = (opc == 0);
1904 is_signed = extract32(opc, 1, 1);
1905 is_extended = (size < 3) && extract32(opc, 0, 1);
1909 gen_check_sp_alignment(s);
1911 tcg_addr = read_cpu_reg_sp(s, rn, 1);
1913 tcg_rm = read_cpu_reg(s, rm, 1);
1914 ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
1916 tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_rm);
1920 do_fp_st(s, rt, tcg_addr, size);
1922 do_fp_ld(s, rt, tcg_addr, size);
1925 TCGv_i64 tcg_rt = cpu_reg(s, rt);
1927 do_gpr_st(s, tcg_rt, tcg_addr, size);
1929 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended);
1935 * C3.3.13 Load/store (unsigned immediate)
1937 * 31 30 29 27 26 25 24 23 22 21 10 9 5
1938 * +----+-------+---+-----+-----+------------+-------+------+
1939 * |size| 1 1 1 | V | 0 1 | opc | imm12 | Rn | Rt |
1940 * +----+-------+---+-----+-----+------------+-------+------+
1943 * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
1944 * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
1946 * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
1947 * opc<0>: 0 -> store, 1 -> load
1948 * Rn: base address register (inc SP)
1949 * Rt: target register
1951 static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn)
1953 int rt = extract32(insn, 0, 5);
1954 int rn = extract32(insn, 5, 5);
1955 unsigned int imm12 = extract32(insn, 10, 12);
1956 bool is_vector = extract32(insn, 26, 1);
1957 int size = extract32(insn, 30, 2);
1958 int opc = extract32(insn, 22, 2);
1959 unsigned int offset;
1964 bool is_signed = false;
1965 bool is_extended = false;
1968 size |= (opc & 2) << 1;
1970 unallocated_encoding(s);
1973 is_store = !extract32(opc, 0, 1);
1975 if (size == 3 && opc == 2) {
1976 /* PRFM - prefetch */
1979 if (opc == 3 && size > 1) {
1980 unallocated_encoding(s);
1983 is_store = (opc == 0);
1984 is_signed = extract32(opc, 1, 1);
1985 is_extended = (size < 3) && extract32(opc, 0, 1);
1989 gen_check_sp_alignment(s);
1991 tcg_addr = read_cpu_reg_sp(s, rn, 1);
1992 offset = imm12 << size;
1993 tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
1997 do_fp_st(s, rt, tcg_addr, size);
1999 do_fp_ld(s, rt, tcg_addr, size);
2002 TCGv_i64 tcg_rt = cpu_reg(s, rt);
2004 do_gpr_st(s, tcg_rt, tcg_addr, size);
2006 do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended);
2011 /* Load/store register (immediate forms) */
2012 static void disas_ldst_reg_imm(DisasContext *s, uint32_t insn)
2014 switch (extract32(insn, 10, 2)) {
2015 case 0: case 1: case 3:
2016 /* Load/store register (unscaled immediate) */
2017 /* Load/store immediate pre/post-indexed */
2018 disas_ldst_reg_imm9(s, insn);
2021 /* Load/store register unprivileged */
2022 unsupported_encoding(s, insn);
2025 unallocated_encoding(s);
2030 /* Load/store register (all forms) */
2031 static void disas_ldst_reg(DisasContext *s, uint32_t insn)
2033 switch (extract32(insn, 24, 2)) {
2035 if (extract32(insn, 21, 1) == 1 && extract32(insn, 10, 2) == 2) {
2036 disas_ldst_reg_roffset(s, insn);
2038 disas_ldst_reg_imm(s, insn);
2042 disas_ldst_reg_unsigned_imm(s, insn);
2045 unallocated_encoding(s);
2050 /* C3.3.1 AdvSIMD load/store multiple structures
2052 * 31 30 29 23 22 21 16 15 12 11 10 9 5 4 0
2053 * +---+---+---------------+---+-------------+--------+------+------+------+
2054 * | 0 | Q | 0 0 1 1 0 0 0 | L | 0 0 0 0 0 0 | opcode | size | Rn | Rt |
2055 * +---+---+---------------+---+-------------+--------+------+------+------+
2057 * C3.3.2 AdvSIMD load/store multiple structures (post-indexed)
2059 * 31 30 29 23 22 21 20 16 15 12 11 10 9 5 4 0
2060 * +---+---+---------------+---+---+---------+--------+------+------+------+
2061 * | 0 | Q | 0 0 1 1 0 0 1 | L | 0 | Rm | opcode | size | Rn | Rt |
2062 * +---+---+---------------+---+---+---------+--------+------+------+------+
2064 * Rt: first (or only) SIMD&FP register to be transferred
2065 * Rn: base address or SP
2066 * Rm (post-index only): post-index register (when !31) or size dependent #imm
2068 static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
2070 int rt = extract32(insn, 0, 5);
2071 int rn = extract32(insn, 5, 5);
2072 int size = extract32(insn, 10, 2);
2073 int opcode = extract32(insn, 12, 4);
2074 bool is_store = !extract32(insn, 22, 1);
2075 bool is_postidx = extract32(insn, 23, 1);
2076 bool is_q = extract32(insn, 30, 1);
2077 TCGv_i64 tcg_addr, tcg_rn;
2079 int ebytes = 1 << size;
2080 int elements = (is_q ? 128 : 64) / (8 << size);
2081 int rpt; /* num iterations */
2082 int selem; /* structure elements */
2085 if (extract32(insn, 31, 1) || extract32(insn, 21, 1)) {
2086 unallocated_encoding(s);
2090 /* From the shared decode logic */
2121 unallocated_encoding(s);
2125 if (size == 3 && !is_q && selem != 1) {
2127 unallocated_encoding(s);
2132 gen_check_sp_alignment(s);
2135 tcg_rn = cpu_reg_sp(s, rn);
2136 tcg_addr = tcg_temp_new_i64();
2137 tcg_gen_mov_i64(tcg_addr, tcg_rn);
2139 for (r = 0; r < rpt; r++) {
2141 for (e = 0; e < elements; e++) {
2142 int tt = (rt + r) % 32;
2144 for (xs = 0; xs < selem; xs++) {
2146 do_vec_st(s, tt, e, tcg_addr, size);
2148 do_vec_ld(s, tt, e, tcg_addr, size);
2150 /* For non-quad operations, setting a slice of the low
2151 * 64 bits of the register clears the high 64 bits (in
2152 * the ARM ARM pseudocode this is implicit in the fact
2153 * that 'rval' is a 64 bit wide variable). We optimize
2154 * by noticing that we only need to do this the first
2155 * time we touch a register.
2157 if (!is_q && e == 0 && (r == 0 || xs == selem - 1)) {
2158 clear_vec_high(s, tt);
2161 tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
2168 int rm = extract32(insn, 16, 5);
2170 tcg_gen_mov_i64(tcg_rn, tcg_addr);
2172 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
2175 tcg_temp_free_i64(tcg_addr);
2178 /* C3.3.3 AdvSIMD load/store single structure
2180 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
2181 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2182 * | 0 | Q | 0 0 1 1 0 1 0 | L R | 0 0 0 0 0 | opc | S | size | Rn | Rt |
2183 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2185 * C3.3.4 AdvSIMD load/store single structure (post-indexed)
2187 * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
2188 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2189 * | 0 | Q | 0 0 1 1 0 1 1 | L R | Rm | opc | S | size | Rn | Rt |
2190 * +---+---+---------------+-----+-----------+-----+---+------+------+------+
2192 * Rt: first (or only) SIMD&FP register to be transferred
2193 * Rn: base address or SP
2194 * Rm (post-index only): post-index register (when !31) or size dependent #imm
2195 * index = encoded in Q:S:size dependent on size
2197 * lane_size = encoded in R, opc
2198 * transfer width = encoded in opc, S, size
2200 static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
2202 int rt = extract32(insn, 0, 5);
2203 int rn = extract32(insn, 5, 5);
2204 int size = extract32(insn, 10, 2);
2205 int S = extract32(insn, 12, 1);
2206 int opc = extract32(insn, 13, 3);
2207 int R = extract32(insn, 21, 1);
2208 int is_load = extract32(insn, 22, 1);
2209 int is_postidx = extract32(insn, 23, 1);
2210 int is_q = extract32(insn, 30, 1);
2212 int scale = extract32(opc, 1, 2);
2213 int selem = (extract32(opc, 0, 1) << 1 | R) + 1;
2214 bool replicate = false;
2215 int index = is_q << 3 | S << 2 | size;
2217 TCGv_i64 tcg_addr, tcg_rn;
2221 if (!is_load || S) {
2222 unallocated_encoding(s);
2231 if (extract32(size, 0, 1)) {
2232 unallocated_encoding(s);
2238 if (extract32(size, 1, 1)) {
2239 unallocated_encoding(s);
2242 if (!extract32(size, 0, 1)) {
2246 unallocated_encoding(s);
2254 g_assert_not_reached();
2257 ebytes = 1 << scale;
2260 gen_check_sp_alignment(s);
2263 tcg_rn = cpu_reg_sp(s, rn);
2264 tcg_addr = tcg_temp_new_i64();
2265 tcg_gen_mov_i64(tcg_addr, tcg_rn);
2267 for (xs = 0; xs < selem; xs++) {
2269 /* Load and replicate to all elements */
2271 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
2273 tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr,
2274 get_mem_index(s), MO_TE + scale);
2277 mulconst = 0x0101010101010101ULL;
2280 mulconst = 0x0001000100010001ULL;
2283 mulconst = 0x0000000100000001ULL;
2289 g_assert_not_reached();
2292 tcg_gen_muli_i64(tcg_tmp, tcg_tmp, mulconst);
2294 write_vec_element(s, tcg_tmp, rt, 0, MO_64);
2296 write_vec_element(s, tcg_tmp, rt, 1, MO_64);
2298 clear_vec_high(s, rt);
2300 tcg_temp_free_i64(tcg_tmp);
2302 /* Load/store one element per register */
2304 do_vec_ld(s, rt, index, tcg_addr, MO_TE + scale);
2306 do_vec_st(s, rt, index, tcg_addr, MO_TE + scale);
2309 tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
2314 int rm = extract32(insn, 16, 5);
2316 tcg_gen_mov_i64(tcg_rn, tcg_addr);
2318 tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
2321 tcg_temp_free_i64(tcg_addr);
2324 /* C3.3 Loads and stores */
2325 static void disas_ldst(DisasContext *s, uint32_t insn)
2327 switch (extract32(insn, 24, 6)) {
2328 case 0x08: /* Load/store exclusive */
2329 disas_ldst_excl(s, insn);
2331 case 0x18: case 0x1c: /* Load register (literal) */
2332 disas_ld_lit(s, insn);
2334 case 0x28: case 0x29:
2335 case 0x2c: case 0x2d: /* Load/store pair (all forms) */
2336 disas_ldst_pair(s, insn);
2338 case 0x38: case 0x39:
2339 case 0x3c: case 0x3d: /* Load/store register (all forms) */
2340 disas_ldst_reg(s, insn);
2342 case 0x0c: /* AdvSIMD load/store multiple structures */
2343 disas_ldst_multiple_struct(s, insn);
2345 case 0x0d: /* AdvSIMD load/store single structure */
2346 disas_ldst_single_struct(s, insn);
2349 unallocated_encoding(s);
2354 /* C3.4.6 PC-rel. addressing
2355 * 31 30 29 28 24 23 5 4 0
2356 * +----+-------+-----------+-------------------+------+
2357 * | op | immlo | 1 0 0 0 0 | immhi | Rd |
2358 * +----+-------+-----------+-------------------+------+
2360 static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
2362 unsigned int page, rd;
2366 page = extract32(insn, 31, 1);
2367 /* SignExtend(immhi:immlo) -> offset */
2368 offset = ((int64_t)sextract32(insn, 5, 19) << 2) | extract32(insn, 29, 2);
2369 rd = extract32(insn, 0, 5);
2373 /* ADRP (page based) */
2378 tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
2382 * C3.4.1 Add/subtract (immediate)
2384 * 31 30 29 28 24 23 22 21 10 9 5 4 0
2385 * +--+--+--+-----------+-----+-------------+-----+-----+
2386 * |sf|op| S| 1 0 0 0 1 |shift| imm12 | Rn | Rd |
2387 * +--+--+--+-----------+-----+-------------+-----+-----+
2389 * sf: 0 -> 32bit, 1 -> 64bit
2390 * op: 0 -> add , 1 -> sub
2392 * shift: 00 -> LSL imm by 0, 01 -> LSL imm by 12
2394 static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
2396 int rd = extract32(insn, 0, 5);
2397 int rn = extract32(insn, 5, 5);
2398 uint64_t imm = extract32(insn, 10, 12);
2399 int shift = extract32(insn, 22, 2);
2400 bool setflags = extract32(insn, 29, 1);
2401 bool sub_op = extract32(insn, 30, 1);
2402 bool is_64bit = extract32(insn, 31, 1);
2404 TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
2405 TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd);
2406 TCGv_i64 tcg_result;
2415 unallocated_encoding(s);
2419 tcg_result = tcg_temp_new_i64();
2422 tcg_gen_subi_i64(tcg_result, tcg_rn, imm);
2424 tcg_gen_addi_i64(tcg_result, tcg_rn, imm);
2427 TCGv_i64 tcg_imm = tcg_const_i64(imm);
2429 gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
2431 gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
2433 tcg_temp_free_i64(tcg_imm);
2437 tcg_gen_mov_i64(tcg_rd, tcg_result);
2439 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
2442 tcg_temp_free_i64(tcg_result);
2445 /* The input should be a value in the bottom e bits (with higher
2446 * bits zero); returns that value replicated into every element
2447 * of size e in a 64 bit integer.
2449 static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
2459 /* Return a value with the bottom len bits set (where 0 < len <= 64) */
2460 static inline uint64_t bitmask64(unsigned int length)
2462 assert(length > 0 && length <= 64);
2463 return ~0ULL >> (64 - length);
2466 /* Simplified variant of pseudocode DecodeBitMasks() for the case where we
2467 * only require the wmask. Returns false if the imms/immr/immn are a reserved
2468 * value (ie should cause a guest UNDEF exception), and true if they are
2469 * valid, in which case the decoded bit pattern is written to result.
2471 static bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
2472 unsigned int imms, unsigned int immr)
2475 unsigned e, levels, s, r;
2478 assert(immn < 2 && imms < 64 && immr < 64);
2480 /* The bit patterns we create here are 64 bit patterns which
2481 * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
2482 * 64 bits each. Each element contains the same value: a run
2483 * of between 1 and e-1 non-zero bits, rotated within the
2484 * element by between 0 and e-1 bits.
2486 * The element size and run length are encoded into immn (1 bit)
2487 * and imms (6 bits) as follows:
2488 * 64 bit elements: immn = 1, imms = <length of run - 1>
2489 * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
2490 * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
2491 * 8 bit elements: immn = 0, imms = 110 : <length of run - 1>
2492 * 4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
2493 * 2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
2494 * Notice that immn = 0, imms = 11111x is the only combination
2495 * not covered by one of the above options; this is reserved.
2496 * Further, <length of run - 1> all-ones is a reserved pattern.
2498 * In all cases the rotation is by immr % e (and immr is 6 bits).
2501 /* First determine the element size */
2502 len = 31 - clz32((immn << 6) | (~imms & 0x3f));
2504 /* This is the immn == 0, imms == 0x11111x case */
2514 /* <length of run - 1> mustn't be all-ones. */
2518 /* Create the value of one element: s+1 set bits rotated
2519 * by r within the element (which is e bits wide)...
2521 mask = bitmask64(s + 1);
2522 mask = (mask >> r) | (mask << (e - r));
2523 /* ...then replicate the element over the whole 64 bit value */
2524 mask = bitfield_replicate(mask, e);
2529 /* C3.4.4 Logical (immediate)
2530 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
2531 * +----+-----+-------------+---+------+------+------+------+
2532 * | sf | opc | 1 0 0 1 0 0 | N | immr | imms | Rn | Rd |
2533 * +----+-----+-------------+---+------+------+------+------+
2535 static void disas_logic_imm(DisasContext *s, uint32_t insn)
2537 unsigned int sf, opc, is_n, immr, imms, rn, rd;
2538 TCGv_i64 tcg_rd, tcg_rn;
2540 bool is_and = false;
2542 sf = extract32(insn, 31, 1);
2543 opc = extract32(insn, 29, 2);
2544 is_n = extract32(insn, 22, 1);
2545 immr = extract32(insn, 16, 6);
2546 imms = extract32(insn, 10, 6);
2547 rn = extract32(insn, 5, 5);
2548 rd = extract32(insn, 0, 5);
2551 unallocated_encoding(s);
2555 if (opc == 0x3) { /* ANDS */
2556 tcg_rd = cpu_reg(s, rd);
2558 tcg_rd = cpu_reg_sp(s, rd);
2560 tcg_rn = cpu_reg(s, rn);
2562 if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) {
2563 /* some immediate field values are reserved */
2564 unallocated_encoding(s);
2569 wmask &= 0xffffffff;
2573 case 0x3: /* ANDS */
2575 tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask);
2579 tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask);
2582 tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask);
2585 assert(FALSE); /* must handle all above */
2589 if (!sf && !is_and) {
2590 /* zero extend final result; we know we can skip this for AND
2591 * since the immediate had the high 32 bits clear.
2593 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
2596 if (opc == 3) { /* ANDS */
2597 gen_logic_CC(sf, tcg_rd);
2602 * C3.4.5 Move wide (immediate)
2604 * 31 30 29 28 23 22 21 20 5 4 0
2605 * +--+-----+-------------+-----+----------------+------+
2606 * |sf| opc | 1 0 0 1 0 1 | hw | imm16 | Rd |
2607 * +--+-----+-------------+-----+----------------+------+
2609 * sf: 0 -> 32 bit, 1 -> 64 bit
2610 * opc: 00 -> N, 10 -> Z, 11 -> K
2611 * hw: shift/16 (0,16, and sf only 32, 48)
2613 static void disas_movw_imm(DisasContext *s, uint32_t insn)
2615 int rd = extract32(insn, 0, 5);
2616 uint64_t imm = extract32(insn, 5, 16);
2617 int sf = extract32(insn, 31, 1);
2618 int opc = extract32(insn, 29, 2);
2619 int pos = extract32(insn, 21, 2) << 4;
2620 TCGv_i64 tcg_rd = cpu_reg(s, rd);
2623 if (!sf && (pos >= 32)) {
2624 unallocated_encoding(s);
2638 tcg_gen_movi_i64(tcg_rd, imm);
2641 tcg_imm = tcg_const_i64(imm);
2642 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_imm, pos, 16);
2643 tcg_temp_free_i64(tcg_imm);
2645 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
2649 unallocated_encoding(s);
2655 * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
2656 * +----+-----+-------------+---+------+------+------+------+
2657 * | sf | opc | 1 0 0 1 1 0 | N | immr | imms | Rn | Rd |
2658 * +----+-----+-------------+---+------+------+------+------+
2660 static void disas_bitfield(DisasContext *s, uint32_t insn)
2662 unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len;
2663 TCGv_i64 tcg_rd, tcg_tmp;
2665 sf = extract32(insn, 31, 1);
2666 opc = extract32(insn, 29, 2);
2667 n = extract32(insn, 22, 1);
2668 ri = extract32(insn, 16, 6);
2669 si = extract32(insn, 10, 6);
2670 rn = extract32(insn, 5, 5);
2671 rd = extract32(insn, 0, 5);
2672 bitsize = sf ? 64 : 32;
2674 if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) {
2675 unallocated_encoding(s);
2679 tcg_rd = cpu_reg(s, rd);
2680 tcg_tmp = read_cpu_reg(s, rn, sf);
2682 /* OPTME: probably worth recognizing common cases of ext{8,16,32}{u,s} */
2684 if (opc != 1) { /* SBFM or UBFM */
2685 tcg_gen_movi_i64(tcg_rd, 0);
2688 /* do the bit move operation */
2690 /* Wd<s-r:0> = Wn<s:r> */
2691 tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri);
2693 len = (si - ri) + 1;
2695 /* Wd<32+s-r,32-r> = Wn<s:0> */
2700 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
2702 if (opc == 0) { /* SBFM - sign extend the destination field */
2703 tcg_gen_shli_i64(tcg_rd, tcg_rd, 64 - (pos + len));
2704 tcg_gen_sari_i64(tcg_rd, tcg_rd, 64 - (pos + len));
2707 if (!sf) { /* zero extend final result */
2708 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
2713 * 31 30 29 28 23 22 21 20 16 15 10 9 5 4 0
2714 * +----+------+-------------+---+----+------+--------+------+------+
2715 * | sf | op21 | 1 0 0 1 1 1 | N | o0 | Rm | imms | Rn | Rd |
2716 * +----+------+-------------+---+----+------+--------+------+------+
2718 static void disas_extract(DisasContext *s, uint32_t insn)
2720 unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0;
2722 sf = extract32(insn, 31, 1);
2723 n = extract32(insn, 22, 1);
2724 rm = extract32(insn, 16, 5);
2725 imm = extract32(insn, 10, 6);
2726 rn = extract32(insn, 5, 5);
2727 rd = extract32(insn, 0, 5);
2728 op21 = extract32(insn, 29, 2);
2729 op0 = extract32(insn, 21, 1);
2730 bitsize = sf ? 64 : 32;
2732 if (sf != n || op21 || op0 || imm >= bitsize) {
2733 unallocated_encoding(s);
2735 TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
2737 tcg_rd = cpu_reg(s, rd);
2740 /* OPTME: we can special case rm==rn as a rotate */
2741 tcg_rm = read_cpu_reg(s, rm, sf);
2742 tcg_rn = read_cpu_reg(s, rn, sf);
2743 tcg_gen_shri_i64(tcg_rm, tcg_rm, imm);
2744 tcg_gen_shli_i64(tcg_rn, tcg_rn, bitsize - imm);
2745 tcg_gen_or_i64(tcg_rd, tcg_rm, tcg_rn);
2747 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
2750 /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
2751 * so an extract from bit 0 is a special case.
2754 tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm));
2756 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
2763 /* C3.4 Data processing - immediate */
2764 static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
2766 switch (extract32(insn, 23, 6)) {
2767 case 0x20: case 0x21: /* PC-rel. addressing */
2768 disas_pc_rel_adr(s, insn);
2770 case 0x22: case 0x23: /* Add/subtract (immediate) */
2771 disas_add_sub_imm(s, insn);
2773 case 0x24: /* Logical (immediate) */
2774 disas_logic_imm(s, insn);
2776 case 0x25: /* Move wide (immediate) */
2777 disas_movw_imm(s, insn);
2779 case 0x26: /* Bitfield */
2780 disas_bitfield(s, insn);
2782 case 0x27: /* Extract */
2783 disas_extract(s, insn);
2786 unallocated_encoding(s);
2791 /* Shift a TCGv src by TCGv shift_amount, put result in dst.
2792 * Note that it is the caller's responsibility to ensure that the
2793 * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
2794 * mandated semantics for out of range shifts.
2796 static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
2797 enum a64_shift_type shift_type, TCGv_i64 shift_amount)
2799 switch (shift_type) {
2800 case A64_SHIFT_TYPE_LSL:
2801 tcg_gen_shl_i64(dst, src, shift_amount);
2803 case A64_SHIFT_TYPE_LSR:
2804 tcg_gen_shr_i64(dst, src, shift_amount);
2806 case A64_SHIFT_TYPE_ASR:
2808 tcg_gen_ext32s_i64(dst, src);
2810 tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
2812 case A64_SHIFT_TYPE_ROR:
2814 tcg_gen_rotr_i64(dst, src, shift_amount);
2817 t0 = tcg_temp_new_i32();
2818 t1 = tcg_temp_new_i32();
2819 tcg_gen_trunc_i64_i32(t0, src);
2820 tcg_gen_trunc_i64_i32(t1, shift_amount);
2821 tcg_gen_rotr_i32(t0, t0, t1);
2822 tcg_gen_extu_i32_i64(dst, t0);
2823 tcg_temp_free_i32(t0);
2824 tcg_temp_free_i32(t1);
2828 assert(FALSE); /* all shift types should be handled */
2832 if (!sf) { /* zero extend final result */
2833 tcg_gen_ext32u_i64(dst, dst);
2837 /* Shift a TCGv src by immediate, put result in dst.
2838 * The shift amount must be in range (this should always be true as the
2839 * relevant instructions will UNDEF on bad shift immediates).
2841 static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
2842 enum a64_shift_type shift_type, unsigned int shift_i)
2844 assert(shift_i < (sf ? 64 : 32));
2847 tcg_gen_mov_i64(dst, src);
2849 TCGv_i64 shift_const;
2851 shift_const = tcg_const_i64(shift_i);
2852 shift_reg(dst, src, sf, shift_type, shift_const);
2853 tcg_temp_free_i64(shift_const);
2857 /* C3.5.10 Logical (shifted register)
2858 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
2859 * +----+-----+-----------+-------+---+------+--------+------+------+
2860 * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd |
2861 * +----+-----+-----------+-------+---+------+--------+------+------+
2863 static void disas_logic_reg(DisasContext *s, uint32_t insn)
2865 TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
2866 unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
2868 sf = extract32(insn, 31, 1);
2869 opc = extract32(insn, 29, 2);
2870 shift_type = extract32(insn, 22, 2);
2871 invert = extract32(insn, 21, 1);
2872 rm = extract32(insn, 16, 5);
2873 shift_amount = extract32(insn, 10, 6);
2874 rn = extract32(insn, 5, 5);
2875 rd = extract32(insn, 0, 5);
2877 if (!sf && (shift_amount & (1 << 5))) {
2878 unallocated_encoding(s);
2882 tcg_rd = cpu_reg(s, rd);
2884 if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
2885 /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
2886 * register-register MOV and MVN, so it is worth special casing.
2888 tcg_rm = cpu_reg(s, rm);
2890 tcg_gen_not_i64(tcg_rd, tcg_rm);
2892 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
2896 tcg_gen_mov_i64(tcg_rd, tcg_rm);
2898 tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
2904 tcg_rm = read_cpu_reg(s, rm, sf);
2907 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
2910 tcg_rn = cpu_reg(s, rn);
2912 switch (opc | (invert << 2)) {
2915 tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
2918 tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
2921 tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
2925 tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
2928 tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
2931 tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
2939 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
2943 gen_logic_CC(sf, tcg_rd);
2948 * C3.5.1 Add/subtract (extended register)
2950 * 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0|
2951 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
2952 * |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd |
2953 * +--+--+--+-----------+-----+--+-------+------+------+----+----+
2955 * sf: 0 -> 32bit, 1 -> 64bit
2956 * op: 0 -> add , 1 -> sub
2959 * option: extension type (see DecodeRegExtend)
2960 * imm3: optional shift to Rm
2962 * Rd = Rn + LSL(extend(Rm), amount)
2964 static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
2966 int rd = extract32(insn, 0, 5);
2967 int rn = extract32(insn, 5, 5);
2968 int imm3 = extract32(insn, 10, 3);
2969 int option = extract32(insn, 13, 3);
2970 int rm = extract32(insn, 16, 5);
2971 bool setflags = extract32(insn, 29, 1);
2972 bool sub_op = extract32(insn, 30, 1);
2973 bool sf = extract32(insn, 31, 1);
2975 TCGv_i64 tcg_rm, tcg_rn; /* temps */
2977 TCGv_i64 tcg_result;
2980 unallocated_encoding(s);
2984 /* non-flag setting ops may use SP */
2986 tcg_rn = read_cpu_reg_sp(s, rn, sf);
2987 tcg_rd = cpu_reg_sp(s, rd);
2989 tcg_rn = read_cpu_reg(s, rn, sf);
2990 tcg_rd = cpu_reg(s, rd);
2993 tcg_rm = read_cpu_reg(s, rm, sf);
2994 ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
2996 tcg_result = tcg_temp_new_i64();
3000 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
3002 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
3006 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
3008 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
3013 tcg_gen_mov_i64(tcg_rd, tcg_result);
3015 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
3018 tcg_temp_free_i64(tcg_result);
3022 * C3.5.2 Add/subtract (shifted register)
3024 * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
3025 * +--+--+--+-----------+-----+--+-------+---------+------+------+
3026 * |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd |
3027 * +--+--+--+-----------+-----+--+-------+---------+------+------+
3029 * sf: 0 -> 32bit, 1 -> 64bit
3030 * op: 0 -> add , 1 -> sub
3032 * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
3033 * imm6: Shift amount to apply to Rm before the add/sub
3035 static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
3037 int rd = extract32(insn, 0, 5);
3038 int rn = extract32(insn, 5, 5);
3039 int imm6 = extract32(insn, 10, 6);
3040 int rm = extract32(insn, 16, 5);
3041 int shift_type = extract32(insn, 22, 2);
3042 bool setflags = extract32(insn, 29, 1);
3043 bool sub_op = extract32(insn, 30, 1);
3044 bool sf = extract32(insn, 31, 1);
3046 TCGv_i64 tcg_rd = cpu_reg(s, rd);
3047 TCGv_i64 tcg_rn, tcg_rm;
3048 TCGv_i64 tcg_result;
3050 if ((shift_type == 3) || (!sf && (imm6 > 31))) {
3051 unallocated_encoding(s);
3055 tcg_rn = read_cpu_reg(s, rn, sf);
3056 tcg_rm = read_cpu_reg(s, rm, sf);
3058 shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
3060 tcg_result = tcg_temp_new_i64();
3064 tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
3066 tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
3070 gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
3072 gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
3077 tcg_gen_mov_i64(tcg_rd, tcg_result);
3079 tcg_gen_ext32u_i64(tcg_rd, tcg_result);
3082 tcg_temp_free_i64(tcg_result);
3085 /* C3.5.9 Data-processing (3 source)
3087 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0
3088 +--+------+-----------+------+------+----+------+------+------+
3089 |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd |
3090 +--+------+-----------+------+------+----+------+------+------+
3093 static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
3095 int rd = extract32(insn, 0, 5);
3096 int rn = extract32(insn, 5, 5);
3097 int ra = extract32(insn, 10, 5);
3098 int rm = extract32(insn, 16, 5);
3099 int op_id = (extract32(insn, 29, 3) << 4) |
3100 (extract32(insn, 21, 3) << 1) |
3101 extract32(insn, 15, 1);
3102 bool sf = extract32(insn, 31, 1);
3103 bool is_sub = extract32(op_id, 0, 1);
3104 bool is_high = extract32(op_id, 2, 1);
3105 bool is_signed = false;
3110 /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
3112 case 0x42: /* SMADDL */
3113 case 0x43: /* SMSUBL */
3114 case 0x44: /* SMULH */
3117 case 0x0: /* MADD (32bit) */
3118 case 0x1: /* MSUB (32bit) */
3119 case 0x40: /* MADD (64bit) */
3120 case 0x41: /* MSUB (64bit) */
3121 case 0x4a: /* UMADDL */
3122 case 0x4b: /* UMSUBL */
3123 case 0x4c: /* UMULH */
3126 unallocated_encoding(s);
3131 TCGv_i64 low_bits = tcg_temp_new_i64(); /* low bits discarded */
3132 TCGv_i64 tcg_rd = cpu_reg(s, rd);
3133 TCGv_i64 tcg_rn = cpu_reg(s, rn);
3134 TCGv_i64 tcg_rm = cpu_reg(s, rm);
3137 tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
3139 tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
3142 tcg_temp_free_i64(low_bits);
3146 tcg_op1 = tcg_temp_new_i64();
3147 tcg_op2 = tcg_temp_new_i64();
3148 tcg_tmp = tcg_temp_new_i64();
3151 tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
3152 tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
3155 tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
3156 tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
3158 tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
3159 tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
3163 if (ra == 31 && !is_sub) {
3164 /* Special-case MADD with rA == XZR; it is the standard MUL alias */
3165 tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
3167 tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
3169 tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
3171 tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
3176 tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
3179 tcg_temp_free_i64(tcg_op1);
3180 tcg_temp_free_i64(tcg_op2);
3181 tcg_temp_free_i64(tcg_tmp);
3184 /* C3.5.3 - Add/subtract (with carry)
3185 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0
3186 * +--+--+--+------------------------+------+---------+------+-----+
3187 * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | opcode2 | Rn | Rd |
3188 * +--+--+--+------------------------+------+---------+------+-----+
3192 static void disas_adc_sbc(DisasContext *s, uint32_t insn)
3194 unsigned int sf, op, setflags, rm, rn, rd;
3195 TCGv_i64 tcg_y, tcg_rn, tcg_rd;
3197 if (extract32(insn, 10, 6) != 0) {
3198 unallocated_encoding(s);
3202 sf = extract32(insn, 31, 1);
3203 op = extract32(insn, 30, 1);
3204 setflags = extract32(insn, 29, 1);
3205 rm = extract32(insn, 16, 5);
3206 rn = extract32(insn, 5, 5);
3207 rd = extract32(insn, 0, 5);
3209 tcg_rd = cpu_reg(s, rd);
3210 tcg_rn = cpu_reg(s, rn);
3213 tcg_y = new_tmp_a64(s);
3214 tcg_gen_not_i64(tcg_y, cpu_reg(s, rm));
3216 tcg_y = cpu_reg(s, rm);
3220 gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y);
3222 gen_adc(sf, tcg_rd, tcg_rn, tcg_y);
3226 /* C3.5.4 - C3.5.5 Conditional compare (immediate / register)
3227 * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
3228 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
3229 * |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv |
3230 * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
3233 static void disas_cc(DisasContext *s, uint32_t insn)
3235 unsigned int sf, op, y, cond, rn, nzcv, is_imm;
3236 int label_continue = -1;
3237 TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
3239 if (!extract32(insn, 29, 1)) {
3240 unallocated_encoding(s);
3243 if (insn & (1 << 10 | 1 << 4)) {
3244 unallocated_encoding(s);
3247 sf = extract32(insn, 31, 1);
3248 op = extract32(insn, 30, 1);
3249 is_imm = extract32(insn, 11, 1);
3250 y = extract32(insn, 16, 5); /* y = rm (reg) or imm5 (imm) */
3251 cond = extract32(insn, 12, 4);
3252 rn = extract32(insn, 5, 5);
3253 nzcv = extract32(insn, 0, 4);
3255 if (cond < 0x0e) { /* not always */
3256 int label_match = gen_new_label();
3257 label_continue = gen_new_label();
3258 arm_gen_test_cc(cond, label_match);
3260 tcg_tmp = tcg_temp_new_i64();
3261 tcg_gen_movi_i64(tcg_tmp, nzcv << 28);
3262 gen_set_nzcv(tcg_tmp);
3263 tcg_temp_free_i64(tcg_tmp);
3264 tcg_gen_br(label_continue);
3265 gen_set_label(label_match);
3267 /* match, or condition is always */
3269 tcg_y = new_tmp_a64(s);
3270 tcg_gen_movi_i64(tcg_y, y);
3272 tcg_y = cpu_reg(s, y);
3274 tcg_rn = cpu_reg(s, rn);
3276 tcg_tmp = tcg_temp_new_i64();
3278 gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
3280 gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y);
3282 tcg_temp_free_i64(tcg_tmp);
3284 if (cond < 0x0e) { /* continue */
3285 gen_set_label(label_continue);
3289 /* C3.5.6 Conditional select
3290 * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0
3291 * +----+----+---+-----------------+------+------+-----+------+------+
3292 * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd |
3293 * +----+----+---+-----------------+------+------+-----+------+------+
3295 static void disas_cond_select(DisasContext *s, uint32_t insn)
3297 unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
3298 TCGv_i64 tcg_rd, tcg_src;
3300 if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
3301 /* S == 1 or op2<1> == 1 */
3302 unallocated_encoding(s);
3305 sf = extract32(insn, 31, 1);
3306 else_inv = extract32(insn, 30, 1);
3307 rm = extract32(insn, 16, 5);
3308 cond = extract32(insn, 12, 4);
3309 else_inc = extract32(insn, 10, 1);
3310 rn = extract32(insn, 5, 5);
3311 rd = extract32(insn, 0, 5);
3314 /* silly no-op write; until we use movcond we must special-case
3315 * this to avoid a dead temporary across basic blocks.
3320 tcg_rd = cpu_reg(s, rd);
3322 if (cond >= 0x0e) { /* condition "always" */
3323 tcg_src = read_cpu_reg(s, rn, sf);
3324 tcg_gen_mov_i64(tcg_rd, tcg_src);
3326 /* OPTME: we could use movcond here, at the cost of duplicating
3327 * a lot of the arm_gen_test_cc() logic.
3329 int label_match = gen_new_label();
3330 int label_continue = gen_new_label();
3332 arm_gen_test_cc(cond, label_match);
3334 tcg_src = cpu_reg(s, rm);
3336 if (else_inv && else_inc) {
3337 tcg_gen_neg_i64(tcg_rd, tcg_src);
3338 } else if (else_inv) {
3339 tcg_gen_not_i64(tcg_rd, tcg_src);
3340 } else if (else_inc) {
3341 tcg_gen_addi_i64(tcg_rd, tcg_src, 1);
3343 tcg_gen_mov_i64(tcg_rd, tcg_src);
3346 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3348 tcg_gen_br(label_continue);
3350 gen_set_label(label_match);
3351 tcg_src = read_cpu_reg(s, rn, sf);
3352 tcg_gen_mov_i64(tcg_rd, tcg_src);
3354 gen_set_label(label_continue);
3358 static void handle_clz(DisasContext *s, unsigned int sf,
3359 unsigned int rn, unsigned int rd)
3361 TCGv_i64 tcg_rd, tcg_rn;
3362 tcg_rd = cpu_reg(s, rd);
3363 tcg_rn = cpu_reg(s, rn);
3366 gen_helper_clz64(tcg_rd, tcg_rn);
3368 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
3369 tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
3370 gen_helper_clz(tcg_tmp32, tcg_tmp32);
3371 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
3372 tcg_temp_free_i32(tcg_tmp32);
3376 static void handle_cls(DisasContext *s, unsigned int sf,
3377 unsigned int rn, unsigned int rd)
3379 TCGv_i64 tcg_rd, tcg_rn;
3380 tcg_rd = cpu_reg(s, rd);
3381 tcg_rn = cpu_reg(s, rn);
3384 gen_helper_cls64(tcg_rd, tcg_rn);
3386 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
3387 tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
3388 gen_helper_cls32(tcg_tmp32, tcg_tmp32);
3389 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
3390 tcg_temp_free_i32(tcg_tmp32);
3394 static void handle_rbit(DisasContext *s, unsigned int sf,
3395 unsigned int rn, unsigned int rd)
3397 TCGv_i64 tcg_rd, tcg_rn;
3398 tcg_rd = cpu_reg(s, rd);
3399 tcg_rn = cpu_reg(s, rn);
3402 gen_helper_rbit64(tcg_rd, tcg_rn);
3404 TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
3405 tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
3406 gen_helper_rbit(tcg_tmp32, tcg_tmp32);
3407 tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
3408 tcg_temp_free_i32(tcg_tmp32);
3412 /* C5.6.149 REV with sf==1, opcode==3 ("REV64") */
3413 static void handle_rev64(DisasContext *s, unsigned int sf,
3414 unsigned int rn, unsigned int rd)
3417 unallocated_encoding(s);
3420 tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
3423 /* C5.6.149 REV with sf==0, opcode==2
3424 * C5.6.151 REV32 (sf==1, opcode==2)
3426 static void handle_rev32(DisasContext *s, unsigned int sf,
3427 unsigned int rn, unsigned int rd)
3429 TCGv_i64 tcg_rd = cpu_reg(s, rd);
3432 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
3433 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
3435 /* bswap32_i64 requires zero high word */
3436 tcg_gen_ext32u_i64(tcg_tmp, tcg_rn);
3437 tcg_gen_bswap32_i64(tcg_rd, tcg_tmp);
3438 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
3439 tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
3440 tcg_gen_concat32_i64(tcg_rd, tcg_rd, tcg_tmp);
3442 tcg_temp_free_i64(tcg_tmp);
3444 tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rn));
3445 tcg_gen_bswap32_i64(tcg_rd, tcg_rd);
3449 /* C5.6.150 REV16 (opcode==1) */
3450 static void handle_rev16(DisasContext *s, unsigned int sf,
3451 unsigned int rn, unsigned int rd)
3453 TCGv_i64 tcg_rd = cpu_reg(s, rd);
3454 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
3455 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
3457 tcg_gen_andi_i64(tcg_tmp, tcg_rn, 0xffff);
3458 tcg_gen_bswap16_i64(tcg_rd, tcg_tmp);
3460 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 16);
3461 tcg_gen_andi_i64(tcg_tmp, tcg_tmp, 0xffff);
3462 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
3463 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, 16, 16);
3466 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
3467 tcg_gen_andi_i64(tcg_tmp, tcg_tmp, 0xffff);
3468 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
3469 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, 32, 16);
3471 tcg_gen_shri_i64(tcg_tmp, tcg_rn, 48);
3472 tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
3473 tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, 48, 16);
3476 tcg_temp_free_i64(tcg_tmp);
3479 /* C3.5.7 Data-processing (1 source)
3480 * 31 30 29 28 21 20 16 15 10 9 5 4 0
3481 * +----+---+---+-----------------+---------+--------+------+------+
3482 * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd |
3483 * +----+---+---+-----------------+---------+--------+------+------+
3485 static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
3487 unsigned int sf, opcode, rn, rd;
3489 if (extract32(insn, 29, 1) || extract32(insn, 16, 5)) {
3490 unallocated_encoding(s);
3494 sf = extract32(insn, 31, 1);
3495 opcode = extract32(insn, 10, 6);
3496 rn = extract32(insn, 5, 5);
3497 rd = extract32(insn, 0, 5);
3501 handle_rbit(s, sf, rn, rd);
3504 handle_rev16(s, sf, rn, rd);
3507 handle_rev32(s, sf, rn, rd);
3510 handle_rev64(s, sf, rn, rd);
3513 handle_clz(s, sf, rn, rd);
3516 handle_cls(s, sf, rn, rd);
3521 static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
3522 unsigned int rm, unsigned int rn, unsigned int rd)
3524 TCGv_i64 tcg_n, tcg_m, tcg_rd;
3525 tcg_rd = cpu_reg(s, rd);
3527 if (!sf && is_signed) {
3528 tcg_n = new_tmp_a64(s);
3529 tcg_m = new_tmp_a64(s);
3530 tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
3531 tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
3533 tcg_n = read_cpu_reg(s, rn, sf);
3534 tcg_m = read_cpu_reg(s, rm, sf);
3538 gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
3540 gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
3543 if (!sf) { /* zero extend final result */
3544 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
3548 /* C5.6.115 LSLV, C5.6.118 LSRV, C5.6.17 ASRV, C5.6.154 RORV */
3549 static void handle_shift_reg(DisasContext *s,
3550 enum a64_shift_type shift_type, unsigned int sf,
3551 unsigned int rm, unsigned int rn, unsigned int rd)
3553 TCGv_i64 tcg_shift = tcg_temp_new_i64();
3554 TCGv_i64 tcg_rd = cpu_reg(s, rd);
3555 TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
3557 tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
3558 shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
3559 tcg_temp_free_i64(tcg_shift);
3562 /* C3.5.8 Data-processing (2 source)
3563 * 31 30 29 28 21 20 16 15 10 9 5 4 0
3564 * +----+---+---+-----------------+------+--------+------+------+
3565 * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd |
3566 * +----+---+---+-----------------+------+--------+------+------+
3568 static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
3570 unsigned int sf, rm, opcode, rn, rd;
3571 sf = extract32(insn, 31, 1);
3572 rm = extract32(insn, 16, 5);
3573 opcode = extract32(insn, 10, 6);
3574 rn = extract32(insn, 5, 5);
3575 rd = extract32(insn, 0, 5);
3577 if (extract32(insn, 29, 1)) {
3578 unallocated_encoding(s);
3584 handle_div(s, false, sf, rm, rn, rd);
3587 handle_div(s, true, sf, rm, rn, rd);
3590 handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
3593 handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
3596 handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
3599 handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
3608 case 23: /* CRC32 */
3609 unsupported_encoding(s, insn);
3612 unallocated_encoding(s);
3617 /* C3.5 Data processing - register */
3618 static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
3620 switch (extract32(insn, 24, 5)) {
3621 case 0x0a: /* Logical (shifted register) */
3622 disas_logic_reg(s, insn);
3624 case 0x0b: /* Add/subtract */
3625 if (insn & (1 << 21)) { /* (extended register) */
3626 disas_add_sub_ext_reg(s, insn);
3628 disas_add_sub_reg(s, insn);
3631 case 0x1b: /* Data-processing (3 source) */
3632 disas_data_proc_3src(s, insn);
3635 switch (extract32(insn, 21, 3)) {
3636 case 0x0: /* Add/subtract (with carry) */
3637 disas_adc_sbc(s, insn);
3639 case 0x2: /* Conditional compare */
3640 disas_cc(s, insn); /* both imm and reg forms */
3642 case 0x4: /* Conditional select */
3643 disas_cond_select(s, insn);
3645 case 0x6: /* Data-processing */
3646 if (insn & (1 << 30)) { /* (1 source) */
3647 disas_data_proc_1src(s, insn);
3648 } else { /* (2 source) */
3649 disas_data_proc_2src(s, insn);
3653 unallocated_encoding(s);
3658 unallocated_encoding(s);
3663 static void handle_fp_compare(DisasContext *s, bool is_double,
3664 unsigned int rn, unsigned int rm,
3665 bool cmp_with_zero, bool signal_all_nans)
3667 TCGv_i64 tcg_flags = tcg_temp_new_i64();
3668 TCGv_ptr fpst = get_fpstatus_ptr();
3671 TCGv_i64 tcg_vn, tcg_vm;
3673 tcg_vn = read_fp_dreg(s, rn);
3674 if (cmp_with_zero) {
3675 tcg_vm = tcg_const_i64(0);
3677 tcg_vm = read_fp_dreg(s, rm);
3679 if (signal_all_nans) {
3680 gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
3682 gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
3684 tcg_temp_free_i64(tcg_vn);
3685 tcg_temp_free_i64(tcg_vm);
3687 TCGv_i32 tcg_vn, tcg_vm;
3689 tcg_vn = read_fp_sreg(s, rn);
3690 if (cmp_with_zero) {
3691 tcg_vm = tcg_const_i32(0);
3693 tcg_vm = read_fp_sreg(s, rm);
3695 if (signal_all_nans) {
3696 gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
3698 gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
3700 tcg_temp_free_i32(tcg_vn);
3701 tcg_temp_free_i32(tcg_vm);
3704 tcg_temp_free_ptr(fpst);
3706 gen_set_nzcv(tcg_flags);
3708 tcg_temp_free_i64(tcg_flags);
3711 /* C3.6.22 Floating point compare
3712 * 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0
3713 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
3714 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 |
3715 * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
3717 static void disas_fp_compare(DisasContext *s, uint32_t insn)
3719 unsigned int mos, type, rm, op, rn, opc, op2r;
3721 mos = extract32(insn, 29, 3);
3722 type = extract32(insn, 22, 2); /* 0 = single, 1 = double */
3723 rm = extract32(insn, 16, 5);
3724 op = extract32(insn, 14, 2);
3725 rn = extract32(insn, 5, 5);
3726 opc = extract32(insn, 3, 2);
3727 op2r = extract32(insn, 0, 3);
3729 if (mos || op || op2r || type > 1) {
3730 unallocated_encoding(s);
3734 handle_fp_compare(s, type, rn, rm, opc & 1, opc & 2);
3737 /* C3.6.23 Floating point conditional compare
3738 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
3739 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
3740 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv |
3741 * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
3743 static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
3745 unsigned int mos, type, rm, cond, rn, op, nzcv;
3747 int label_continue = -1;
3749 mos = extract32(insn, 29, 3);
3750 type = extract32(insn, 22, 2); /* 0 = single, 1 = double */
3751 rm = extract32(insn, 16, 5);
3752 cond = extract32(insn, 12, 4);
3753 rn = extract32(insn, 5, 5);
3754 op = extract32(insn, 4, 1);
3755 nzcv = extract32(insn, 0, 4);
3757 if (mos || type > 1) {
3758 unallocated_encoding(s);
3762 if (cond < 0x0e) { /* not always */
3763 int label_match = gen_new_label();
3764 label_continue = gen_new_label();
3765 arm_gen_test_cc(cond, label_match);
3767 tcg_flags = tcg_const_i64(nzcv << 28);
3768 gen_set_nzcv(tcg_flags);
3769 tcg_temp_free_i64(tcg_flags);
3770 tcg_gen_br(label_continue);
3771 gen_set_label(label_match);
3774 handle_fp_compare(s, type, rn, rm, false, op);
3777 gen_set_label(label_continue);
3781 /* copy src FP register to dst FP register; type specifies single or double */
3782 static void gen_mov_fp2fp(DisasContext *s, int type, int dst, int src)
3785 TCGv_i64 v = read_fp_dreg(s, src);
3786 write_fp_dreg(s, dst, v);
3787 tcg_temp_free_i64(v);
3789 TCGv_i32 v = read_fp_sreg(s, src);
3790 write_fp_sreg(s, dst, v);
3791 tcg_temp_free_i32(v);
3795 /* C3.6.24 Floating point conditional select
3796 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
3797 * +---+---+---+-----------+------+---+------+------+-----+------+------+
3798 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 1 1 | Rn | Rd |
3799 * +---+---+---+-----------+------+---+------+------+-----+------+------+
3801 static void disas_fp_csel(DisasContext *s, uint32_t insn)
3803 unsigned int mos, type, rm, cond, rn, rd;
3804 int label_continue = -1;
3806 mos = extract32(insn, 29, 3);
3807 type = extract32(insn, 22, 2); /* 0 = single, 1 = double */
3808 rm = extract32(insn, 16, 5);
3809 cond = extract32(insn, 12, 4);
3810 rn = extract32(insn, 5, 5);
3811 rd = extract32(insn, 0, 5);
3813 if (mos || type > 1) {
3814 unallocated_encoding(s);
3818 if (cond < 0x0e) { /* not always */
3819 int label_match = gen_new_label();
3820 label_continue = gen_new_label();
3821 arm_gen_test_cc(cond, label_match);
3823 gen_mov_fp2fp(s, type, rd, rm);
3824 tcg_gen_br(label_continue);
3825 gen_set_label(label_match);
3828 gen_mov_fp2fp(s, type, rd, rn);
3830 if (cond < 0x0e) { /* continue */
3831 gen_set_label(label_continue);
3835 /* C3.6.25 Floating-point data-processing (1 source) - single precision */
3836 static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
3842 fpst = get_fpstatus_ptr();
3843 tcg_op = read_fp_sreg(s, rn);
3844 tcg_res = tcg_temp_new_i32();
3847 case 0x0: /* FMOV */
3848 tcg_gen_mov_i32(tcg_res, tcg_op);
3850 case 0x1: /* FABS */
3851 gen_helper_vfp_abss(tcg_res, tcg_op);
3853 case 0x2: /* FNEG */
3854 gen_helper_vfp_negs(tcg_res, tcg_op);
3856 case 0x3: /* FSQRT */
3857 gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
3859 case 0x8: /* FRINTN */
3860 case 0x9: /* FRINTP */
3861 case 0xa: /* FRINTM */
3862 case 0xb: /* FRINTZ */
3863 case 0xc: /* FRINTA */
3865 TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
3867 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3868 gen_helper_rints(tcg_res, tcg_op, fpst);
3870 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3871 tcg_temp_free_i32(tcg_rmode);
3874 case 0xe: /* FRINTX */
3875 gen_helper_rints_exact(tcg_res, tcg_op, fpst);
3877 case 0xf: /* FRINTI */
3878 gen_helper_rints(tcg_res, tcg_op, fpst);
3884 write_fp_sreg(s, rd, tcg_res);
3886 tcg_temp_free_ptr(fpst);
3887 tcg_temp_free_i32(tcg_op);
3888 tcg_temp_free_i32(tcg_res);
3891 /* C3.6.25 Floating-point data-processing (1 source) - double precision */
3892 static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
3898 fpst = get_fpstatus_ptr();
3899 tcg_op = read_fp_dreg(s, rn);
3900 tcg_res = tcg_temp_new_i64();
3903 case 0x0: /* FMOV */
3904 tcg_gen_mov_i64(tcg_res, tcg_op);
3906 case 0x1: /* FABS */
3907 gen_helper_vfp_absd(tcg_res, tcg_op);
3909 case 0x2: /* FNEG */
3910 gen_helper_vfp_negd(tcg_res, tcg_op);
3912 case 0x3: /* FSQRT */
3913 gen_helper_vfp_sqrtd(tcg_res, tcg_op, cpu_env);
3915 case 0x8: /* FRINTN */
3916 case 0x9: /* FRINTP */
3917 case 0xa: /* FRINTM */
3918 case 0xb: /* FRINTZ */
3919 case 0xc: /* FRINTA */
3921 TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
3923 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3924 gen_helper_rintd(tcg_res, tcg_op, fpst);
3926 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3927 tcg_temp_free_i32(tcg_rmode);
3930 case 0xe: /* FRINTX */
3931 gen_helper_rintd_exact(tcg_res, tcg_op, fpst);
3933 case 0xf: /* FRINTI */
3934 gen_helper_rintd(tcg_res, tcg_op, fpst);
3940 write_fp_dreg(s, rd, tcg_res);
3942 tcg_temp_free_ptr(fpst);
3943 tcg_temp_free_i64(tcg_op);
3944 tcg_temp_free_i64(tcg_res);
3947 static void handle_fp_fcvt(DisasContext *s, int opcode,
3948 int rd, int rn, int dtype, int ntype)
3953 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
3955 /* Single to double */
3956 TCGv_i64 tcg_rd = tcg_temp_new_i64();
3957 gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, cpu_env);
3958 write_fp_dreg(s, rd, tcg_rd);
3959 tcg_temp_free_i64(tcg_rd);
3961 /* Single to half */
3962 TCGv_i32 tcg_rd = tcg_temp_new_i32();
3963 gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, cpu_env);
3964 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
3965 write_fp_sreg(s, rd, tcg_rd);
3966 tcg_temp_free_i32(tcg_rd);
3968 tcg_temp_free_i32(tcg_rn);
3973 TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
3974 TCGv_i32 tcg_rd = tcg_temp_new_i32();
3976 /* Double to single */
3977 gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, cpu_env);
3979 /* Double to half */
3980 gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, cpu_env);
3981 /* write_fp_sreg is OK here because top half of tcg_rd is zero */
3983 write_fp_sreg(s, rd, tcg_rd);
3984 tcg_temp_free_i32(tcg_rd);
3985 tcg_temp_free_i64(tcg_rn);
3990 TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
3991 tcg_gen_ext16u_i32(tcg_rn, tcg_rn);
3993 /* Half to single */
3994 TCGv_i32 tcg_rd = tcg_temp_new_i32();
3995 gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, cpu_env);
3996 write_fp_sreg(s, rd, tcg_rd);
3997 tcg_temp_free_i32(tcg_rd);
3999 /* Half to double */
4000 TCGv_i64 tcg_rd = tcg_temp_new_i64();
4001 gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, cpu_env);
4002 write_fp_dreg(s, rd, tcg_rd);
4003 tcg_temp_free_i64(tcg_rd);
4005 tcg_temp_free_i32(tcg_rn);
4013 /* C3.6.25 Floating point data-processing (1 source)
4014 * 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0
4015 * +---+---+---+-----------+------+---+--------+-----------+------+------+
4016 * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd |
4017 * +---+---+---+-----------+------+---+--------+-----------+------+------+
4019 static void disas_fp_1src(DisasContext *s, uint32_t insn)
4021 int type = extract32(insn, 22, 2);
4022 int opcode = extract32(insn, 15, 6);
4023 int rn = extract32(insn, 5, 5);
4024 int rd = extract32(insn, 0, 5);
4027 case 0x4: case 0x5: case 0x7:
4029 /* FCVT between half, single and double precision */
4030 int dtype = extract32(opcode, 0, 2);
4031 if (type == 2 || dtype == type) {
4032 unallocated_encoding(s);
4035 handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
4041 /* 32-to-32 and 64-to-64 ops */
4044 handle_fp_1src_single(s, opcode, rd, rn);
4047 handle_fp_1src_double(s, opcode, rd, rn);
4050 unallocated_encoding(s);
4054 unallocated_encoding(s);
4059 /* C3.6.26 Floating-point data-processing (2 source) - single precision */
4060 static void handle_fp_2src_single(DisasContext *s, int opcode,
4061 int rd, int rn, int rm)
4068 tcg_res = tcg_temp_new_i32();
4069 fpst = get_fpstatus_ptr();
4070 tcg_op1 = read_fp_sreg(s, rn);
4071 tcg_op2 = read_fp_sreg(s, rm);
4074 case 0x0: /* FMUL */
4075 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
4077 case 0x1: /* FDIV */
4078 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
4080 case 0x2: /* FADD */
4081 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
4083 case 0x3: /* FSUB */
4084 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
4086 case 0x4: /* FMAX */
4087 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
4089 case 0x5: /* FMIN */
4090 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
4092 case 0x6: /* FMAXNM */
4093 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
4095 case 0x7: /* FMINNM */
4096 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
4098 case 0x8: /* FNMUL */
4099 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
4100 gen_helper_vfp_negs(tcg_res, tcg_res);
4104 write_fp_sreg(s, rd, tcg_res);
4106 tcg_temp_free_ptr(fpst);
4107 tcg_temp_free_i32(tcg_op1);
4108 tcg_temp_free_i32(tcg_op2);
4109 tcg_temp_free_i32(tcg_res);
4112 /* C3.6.26 Floating-point data-processing (2 source) - double precision */
4113 static void handle_fp_2src_double(DisasContext *s, int opcode,
4114 int rd, int rn, int rm)
4121 tcg_res = tcg_temp_new_i64();
4122 fpst = get_fpstatus_ptr();
4123 tcg_op1 = read_fp_dreg(s, rn);
4124 tcg_op2 = read_fp_dreg(s, rm);
4127 case 0x0: /* FMUL */
4128 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
4130 case 0x1: /* FDIV */
4131 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
4133 case 0x2: /* FADD */
4134 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
4136 case 0x3: /* FSUB */
4137 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
4139 case 0x4: /* FMAX */
4140 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
4142 case 0x5: /* FMIN */
4143 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
4145 case 0x6: /* FMAXNM */
4146 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
4148 case 0x7: /* FMINNM */
4149 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
4151 case 0x8: /* FNMUL */
4152 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
4153 gen_helper_vfp_negd(tcg_res, tcg_res);
4157 write_fp_dreg(s, rd, tcg_res);
4159 tcg_temp_free_ptr(fpst);
4160 tcg_temp_free_i64(tcg_op1);
4161 tcg_temp_free_i64(tcg_op2);
4162 tcg_temp_free_i64(tcg_res);
4165 /* C3.6.26 Floating point data-processing (2 source)
4166 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
4167 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
4168 * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | opcode | 1 0 | Rn | Rd |
4169 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
4171 static void disas_fp_2src(DisasContext *s, uint32_t insn)
4173 int type = extract32(insn, 22, 2);
4174 int rd = extract32(insn, 0, 5);
4175 int rn = extract32(insn, 5, 5);
4176 int rm = extract32(insn, 16, 5);
4177 int opcode = extract32(insn, 12, 4);
4180 unallocated_encoding(s);
4186 handle_fp_2src_single(s, opcode, rd, rn, rm);
4189 handle_fp_2src_double(s, opcode, rd, rn, rm);
4192 unallocated_encoding(s);
4196 /* C3.6.27 Floating-point data-processing (3 source) - single precision */
4197 static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1,
4198 int rd, int rn, int rm, int ra)
4200 TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
4201 TCGv_i32 tcg_res = tcg_temp_new_i32();
4202 TCGv_ptr fpst = get_fpstatus_ptr();
4204 tcg_op1 = read_fp_sreg(s, rn);
4205 tcg_op2 = read_fp_sreg(s, rm);
4206 tcg_op3 = read_fp_sreg(s, ra);
4208 /* These are fused multiply-add, and must be done as one
4209 * floating point operation with no rounding between the
4210 * multiplication and addition steps.
4211 * NB that doing the negations here as separate steps is
4212 * correct : an input NaN should come out with its sign bit
4213 * flipped if it is a negated-input.
4216 gen_helper_vfp_negs(tcg_op3, tcg_op3);
4220 gen_helper_vfp_negs(tcg_op1, tcg_op1);
4223 gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
4225 write_fp_sreg(s, rd, tcg_res);
4227 tcg_temp_free_ptr(fpst);
4228 tcg_temp_free_i32(tcg_op1);
4229 tcg_temp_free_i32(tcg_op2);
4230 tcg_temp_free_i32(tcg_op3);
4231 tcg_temp_free_i32(tcg_res);
4234 /* C3.6.27 Floating-point data-processing (3 source) - double precision */
4235 static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1,
4236 int rd, int rn, int rm, int ra)
4238 TCGv_i64 tcg_op1, tcg_op2, tcg_op3;
4239 TCGv_i64 tcg_res = tcg_temp_new_i64();
4240 TCGv_ptr fpst = get_fpstatus_ptr();
4242 tcg_op1 = read_fp_dreg(s, rn);
4243 tcg_op2 = read_fp_dreg(s, rm);
4244 tcg_op3 = read_fp_dreg(s, ra);
4246 /* These are fused multiply-add, and must be done as one
4247 * floating point operation with no rounding between the
4248 * multiplication and addition steps.
4249 * NB that doing the negations here as separate steps is
4250 * correct : an input NaN should come out with its sign bit
4251 * flipped if it is a negated-input.
4254 gen_helper_vfp_negd(tcg_op3, tcg_op3);
4258 gen_helper_vfp_negd(tcg_op1, tcg_op1);
4261 gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
4263 write_fp_dreg(s, rd, tcg_res);
4265 tcg_temp_free_ptr(fpst);
4266 tcg_temp_free_i64(tcg_op1);
4267 tcg_temp_free_i64(tcg_op2);
4268 tcg_temp_free_i64(tcg_op3);
4269 tcg_temp_free_i64(tcg_res);
4272 /* C3.6.27 Floating point data-processing (3 source)
4273 * 31 30 29 28 24 23 22 21 20 16 15 14 10 9 5 4 0
4274 * +---+---+---+-----------+------+----+------+----+------+------+------+
4275 * | M | 0 | S | 1 1 1 1 1 | type | o1 | Rm | o0 | Ra | Rn | Rd |
4276 * +---+---+---+-----------+------+----+------+----+------+------+------+
4278 static void disas_fp_3src(DisasContext *s, uint32_t insn)
4280 int type = extract32(insn, 22, 2);
4281 int rd = extract32(insn, 0, 5);
4282 int rn = extract32(insn, 5, 5);
4283 int ra = extract32(insn, 10, 5);
4284 int rm = extract32(insn, 16, 5);
4285 bool o0 = extract32(insn, 15, 1);
4286 bool o1 = extract32(insn, 21, 1);
4290 handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra);
4293 handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
4296 unallocated_encoding(s);
4300 /* C3.6.28 Floating point immediate
4301 * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
4302 * +---+---+---+-----------+------+---+------------+-------+------+------+
4303 * | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd |
4304 * +---+---+---+-----------+------+---+------------+-------+------+------+
4306 static void disas_fp_imm(DisasContext *s, uint32_t insn)
4308 int rd = extract32(insn, 0, 5);
4309 int imm8 = extract32(insn, 13, 8);
4310 int is_double = extract32(insn, 22, 2);
4314 if (is_double > 1) {
4315 unallocated_encoding(s);
4319 /* The imm8 encodes the sign bit, enough bits to represent
4320 * an exponent in the range 01....1xx to 10....0xx,
4321 * and the most significant 4 bits of the mantissa; see
4322 * VFPExpandImm() in the v8 ARM ARM.
4325 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
4326 (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) |
4327 extract32(imm8, 0, 6);
4330 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
4331 (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) |
4332 (extract32(imm8, 0, 6) << 3);
4336 tcg_res = tcg_const_i64(imm);
4337 write_fp_dreg(s, rd, tcg_res);
4338 tcg_temp_free_i64(tcg_res);
4341 /* Handle floating point <=> fixed point conversions. Note that we can
4342 * also deal with fp <=> integer conversions as a special case (scale == 64)
4343 * OPTME: consider handling that special case specially or at least skipping
4344 * the call to scalbn in the helpers for zero shifts.
4346 static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode,
4347 bool itof, int rmode, int scale, int sf, int type)
4349 bool is_signed = !(opcode & 1);
4350 bool is_double = type;
4351 TCGv_ptr tcg_fpstatus;
4354 tcg_fpstatus = get_fpstatus_ptr();
4356 tcg_shift = tcg_const_i32(64 - scale);
4359 TCGv_i64 tcg_int = cpu_reg(s, rn);
4361 TCGv_i64 tcg_extend = new_tmp_a64(s);
4364 tcg_gen_ext32s_i64(tcg_extend, tcg_int);
4366 tcg_gen_ext32u_i64(tcg_extend, tcg_int);
4369 tcg_int = tcg_extend;
4373 TCGv_i64 tcg_double = tcg_temp_new_i64();
4375 gen_helper_vfp_sqtod(tcg_double, tcg_int,
4376 tcg_shift, tcg_fpstatus);
4378 gen_helper_vfp_uqtod(tcg_double, tcg_int,
4379 tcg_shift, tcg_fpstatus);
4381 write_fp_dreg(s, rd, tcg_double);
4382 tcg_temp_free_i64(tcg_double);
4384 TCGv_i32 tcg_single = tcg_temp_new_i32();
4386 gen_helper_vfp_sqtos(tcg_single, tcg_int,
4387 tcg_shift, tcg_fpstatus);
4389 gen_helper_vfp_uqtos(tcg_single, tcg_int,
4390 tcg_shift, tcg_fpstatus);
4392 write_fp_sreg(s, rd, tcg_single);
4393 tcg_temp_free_i32(tcg_single);
4396 TCGv_i64 tcg_int = cpu_reg(s, rd);
4399 if (extract32(opcode, 2, 1)) {
4400 /* There are too many rounding modes to all fit into rmode,
4401 * so FCVTA[US] is a special case.
4403 rmode = FPROUNDING_TIEAWAY;
4406 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
4408 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
4411 TCGv_i64 tcg_double = read_fp_dreg(s, rn);
4414 gen_helper_vfp_tosld(tcg_int, tcg_double,
4415 tcg_shift, tcg_fpstatus);
4417 gen_helper_vfp_tosqd(tcg_int, tcg_double,
4418 tcg_shift, tcg_fpstatus);
4422 gen_helper_vfp_tould(tcg_int, tcg_double,
4423 tcg_shift, tcg_fpstatus);
4425 gen_helper_vfp_touqd(tcg_int, tcg_double,
4426 tcg_shift, tcg_fpstatus);
4429 tcg_temp_free_i64(tcg_double);
4431 TCGv_i32 tcg_single = read_fp_sreg(s, rn);
4434 gen_helper_vfp_tosqs(tcg_int, tcg_single,
4435 tcg_shift, tcg_fpstatus);
4437 gen_helper_vfp_touqs(tcg_int, tcg_single,
4438 tcg_shift, tcg_fpstatus);
4441 TCGv_i32 tcg_dest = tcg_temp_new_i32();
4443 gen_helper_vfp_tosls(tcg_dest, tcg_single,
4444 tcg_shift, tcg_fpstatus);
4446 gen_helper_vfp_touls(tcg_dest, tcg_single,
4447 tcg_shift, tcg_fpstatus);
4449 tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
4450 tcg_temp_free_i32(tcg_dest);
4452 tcg_temp_free_i32(tcg_single);
4455 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
4456 tcg_temp_free_i32(tcg_rmode);
4459 tcg_gen_ext32u_i64(tcg_int, tcg_int);
4463 tcg_temp_free_ptr(tcg_fpstatus);
4464 tcg_temp_free_i32(tcg_shift);
4467 /* C3.6.29 Floating point <-> fixed point conversions
4468 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
4469 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
4470 * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd |
4471 * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
4473 static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
4475 int rd = extract32(insn, 0, 5);
4476 int rn = extract32(insn, 5, 5);
4477 int scale = extract32(insn, 10, 6);
4478 int opcode = extract32(insn, 16, 3);
4479 int rmode = extract32(insn, 19, 2);
4480 int type = extract32(insn, 22, 2);
4481 bool sbit = extract32(insn, 29, 1);
4482 bool sf = extract32(insn, 31, 1);
4485 if (sbit || (type > 1)
4486 || (!sf && scale < 32)) {
4487 unallocated_encoding(s);
4491 switch ((rmode << 3) | opcode) {
4492 case 0x2: /* SCVTF */
4493 case 0x3: /* UCVTF */
4496 case 0x18: /* FCVTZS */
4497 case 0x19: /* FCVTZU */
4501 unallocated_encoding(s);
4505 handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type);
4508 static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
4510 /* FMOV: gpr to or from float, double, or top half of quad fp reg,
4511 * without conversion.
4515 TCGv_i64 tcg_rn = cpu_reg(s, rn);
4521 TCGv_i64 tmp = tcg_temp_new_i64();
4522 tcg_gen_ext32u_i64(tmp, tcg_rn);
4523 tcg_gen_st_i64(tmp, cpu_env, fp_reg_offset(rd, MO_64));
4524 tcg_gen_movi_i64(tmp, 0);
4525 tcg_gen_st_i64(tmp, cpu_env, fp_reg_hi_offset(rd));
4526 tcg_temp_free_i64(tmp);
4532 TCGv_i64 tmp = tcg_const_i64(0);
4533 tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_offset(rd, MO_64));
4534 tcg_gen_st_i64(tmp, cpu_env, fp_reg_hi_offset(rd));
4535 tcg_temp_free_i64(tmp);
4539 /* 64 bit to top half. */
4540 tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(rd));
4544 TCGv_i64 tcg_rd = cpu_reg(s, rd);
4549 tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(rn, MO_32));
4553 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(rn, MO_64));
4556 /* 64 bits from top half */
4557 tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(rn));
4563 /* C3.6.30 Floating point <-> integer conversions
4564 * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
4565 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
4566 * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
4567 * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
4569 static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
4571 int rd = extract32(insn, 0, 5);
4572 int rn = extract32(insn, 5, 5);
4573 int opcode = extract32(insn, 16, 3);
4574 int rmode = extract32(insn, 19, 2);
4575 int type = extract32(insn, 22, 2);
4576 bool sbit = extract32(insn, 29, 1);
4577 bool sf = extract32(insn, 31, 1);
4580 unallocated_encoding(s);
4586 bool itof = opcode & 1;
4589 unallocated_encoding(s);
4593 switch (sf << 3 | type << 1 | rmode) {
4594 case 0x0: /* 32 bit */
4595 case 0xa: /* 64 bit */
4596 case 0xd: /* 64 bit to top half of quad */
4599 /* all other sf/type/rmode combinations are invalid */
4600 unallocated_encoding(s);
4604 handle_fmov(s, rd, rn, type, itof);
4606 /* actual FP conversions */
4607 bool itof = extract32(opcode, 1, 1);
4609 if (type > 1 || (rmode != 0 && opcode > 1)) {
4610 unallocated_encoding(s);
4614 handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
4618 /* FP-specific subcases of table C3-6 (SIMD and FP data processing)
4619 * 31 30 29 28 25 24 0
4620 * +---+---+---+---------+-----------------------------+
4621 * | | 0 | | 1 1 1 1 | |
4622 * +---+---+---+---------+-----------------------------+
4624 static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
4626 if (extract32(insn, 24, 1)) {
4627 /* Floating point data-processing (3 source) */
4628 disas_fp_3src(s, insn);
4629 } else if (extract32(insn, 21, 1) == 0) {
4630 /* Floating point to fixed point conversions */
4631 disas_fp_fixed_conv(s, insn);
4633 switch (extract32(insn, 10, 2)) {
4635 /* Floating point conditional compare */
4636 disas_fp_ccomp(s, insn);
4639 /* Floating point data-processing (2 source) */
4640 disas_fp_2src(s, insn);
4643 /* Floating point conditional select */
4644 disas_fp_csel(s, insn);
4647 switch (ctz32(extract32(insn, 12, 4))) {
4648 case 0: /* [15:12] == xxx1 */
4649 /* Floating point immediate */
4650 disas_fp_imm(s, insn);
4652 case 1: /* [15:12] == xx10 */
4653 /* Floating point compare */
4654 disas_fp_compare(s, insn);
4656 case 2: /* [15:12] == x100 */
4657 /* Floating point data-processing (1 source) */
4658 disas_fp_1src(s, insn);
4660 case 3: /* [15:12] == 1000 */
4661 unallocated_encoding(s);
4663 default: /* [15:12] == 0000 */
4664 /* Floating point <-> integer conversions */
4665 disas_fp_int_conv(s, insn);
4673 static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right,
4676 /* Extract 64 bits from the middle of two concatenated 64 bit
4677 * vector register slices left:right. The extracted bits start
4678 * at 'pos' bits into the right (least significant) side.
4679 * We return the result in tcg_right, and guarantee not to
4682 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
4683 assert(pos > 0 && pos < 64);
4685 tcg_gen_shri_i64(tcg_right, tcg_right, pos);
4686 tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos);
4687 tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp);
4689 tcg_temp_free_i64(tcg_tmp);
4693 * 31 30 29 24 23 22 21 20 16 15 14 11 10 9 5 4 0
4694 * +---+---+-------------+-----+---+------+---+------+---+------+------+
4695 * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 | Rm | 0 | imm4 | 0 | Rn | Rd |
4696 * +---+---+-------------+-----+---+------+---+------+---+------+------+
4698 static void disas_simd_ext(DisasContext *s, uint32_t insn)
4700 int is_q = extract32(insn, 30, 1);
4701 int op2 = extract32(insn, 22, 2);
4702 int imm4 = extract32(insn, 11, 4);
4703 int rm = extract32(insn, 16, 5);
4704 int rn = extract32(insn, 5, 5);
4705 int rd = extract32(insn, 0, 5);
4706 int pos = imm4 << 3;
4707 TCGv_i64 tcg_resl, tcg_resh;
4709 if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) {
4710 unallocated_encoding(s);
4714 tcg_resh = tcg_temp_new_i64();
4715 tcg_resl = tcg_temp_new_i64();
4717 /* Vd gets bits starting at pos bits into Vm:Vn. This is
4718 * either extracting 128 bits from a 128:128 concatenation, or
4719 * extracting 64 bits from a 64:64 concatenation.
4722 read_vec_element(s, tcg_resl, rn, 0, MO_64);
4724 read_vec_element(s, tcg_resh, rm, 0, MO_64);
4725 do_ext64(s, tcg_resh, tcg_resl, pos);
4727 tcg_gen_movi_i64(tcg_resh, 0);
4734 EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} };
4735 EltPosns *elt = eltposns;
4742 read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64);
4744 read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64);
4747 do_ext64(s, tcg_resh, tcg_resl, pos);
4748 tcg_hh = tcg_temp_new_i64();
4749 read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64);
4750 do_ext64(s, tcg_hh, tcg_resh, pos);
4751 tcg_temp_free_i64(tcg_hh);
4755 write_vec_element(s, tcg_resl, rd, 0, MO_64);
4756 tcg_temp_free_i64(tcg_resl);
4757 write_vec_element(s, tcg_resh, rd, 1, MO_64);
4758 tcg_temp_free_i64(tcg_resh);
4762 * 31 30 29 24 23 22 21 20 16 15 14 13 12 11 10 9 5 4 0
4763 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
4764 * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 | Rm | 0 | len | op | 0 0 | Rn | Rd |
4765 * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
4767 static void disas_simd_tb(DisasContext *s, uint32_t insn)
4769 int op2 = extract32(insn, 22, 2);
4770 int is_q = extract32(insn, 30, 1);
4771 int rm = extract32(insn, 16, 5);
4772 int rn = extract32(insn, 5, 5);
4773 int rd = extract32(insn, 0, 5);
4774 int is_tblx = extract32(insn, 12, 1);
4775 int len = extract32(insn, 13, 2);
4776 TCGv_i64 tcg_resl, tcg_resh, tcg_idx;
4777 TCGv_i32 tcg_regno, tcg_numregs;
4780 unallocated_encoding(s);
4784 /* This does a table lookup: for every byte element in the input
4785 * we index into a table formed from up to four vector registers,
4786 * and then the output is the result of the lookups. Our helper
4787 * function does the lookup operation for a single 64 bit part of
4790 tcg_resl = tcg_temp_new_i64();
4791 tcg_resh = tcg_temp_new_i64();
4794 read_vec_element(s, tcg_resl, rd, 0, MO_64);
4796 tcg_gen_movi_i64(tcg_resl, 0);
4798 if (is_tblx && is_q) {
4799 read_vec_element(s, tcg_resh, rd, 1, MO_64);
4801 tcg_gen_movi_i64(tcg_resh, 0);
4804 tcg_idx = tcg_temp_new_i64();
4805 tcg_regno = tcg_const_i32(rn);
4806 tcg_numregs = tcg_const_i32(len + 1);
4807 read_vec_element(s, tcg_idx, rm, 0, MO_64);
4808 gen_helper_simd_tbl(tcg_resl, cpu_env, tcg_resl, tcg_idx,
4809 tcg_regno, tcg_numregs);
4811 read_vec_element(s, tcg_idx, rm, 1, MO_64);
4812 gen_helper_simd_tbl(tcg_resh, cpu_env, tcg_resh, tcg_idx,
4813 tcg_regno, tcg_numregs);
4815 tcg_temp_free_i64(tcg_idx);
4816 tcg_temp_free_i32(tcg_regno);
4817 tcg_temp_free_i32(tcg_numregs);
4819 write_vec_element(s, tcg_resl, rd, 0, MO_64);
4820 tcg_temp_free_i64(tcg_resl);
4821 write_vec_element(s, tcg_resh, rd, 1, MO_64);
4822 tcg_temp_free_i64(tcg_resh);
4825 /* C3.6.3 ZIP/UZP/TRN
4826 * 31 30 29 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
4827 * +---+---+-------------+------+---+------+---+------------------+------+
4828 * | 0 | Q | 0 0 1 1 1 0 | size | 0 | Rm | 0 | opc | 1 0 | Rn | Rd |
4829 * +---+---+-------------+------+---+------+---+------------------+------+
4831 static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
4833 int rd = extract32(insn, 0, 5);
4834 int rn = extract32(insn, 5, 5);
4835 int rm = extract32(insn, 16, 5);
4836 int size = extract32(insn, 22, 2);
4837 /* opc field bits [1:0] indicate ZIP/UZP/TRN;
4838 * bit 2 indicates 1 vs 2 variant of the insn.
4840 int opcode = extract32(insn, 12, 2);
4841 bool part = extract32(insn, 14, 1);
4842 bool is_q = extract32(insn, 30, 1);
4843 int esize = 8 << size;
4845 int datasize = is_q ? 128 : 64;
4846 int elements = datasize / esize;
4847 TCGv_i64 tcg_res, tcg_resl, tcg_resh;
4849 if (opcode == 0 || (size == 3 && !is_q)) {
4850 unallocated_encoding(s);
4854 tcg_resl = tcg_const_i64(0);
4855 tcg_resh = tcg_const_i64(0);
4856 tcg_res = tcg_temp_new_i64();
4858 for (i = 0; i < elements; i++) {
4860 case 1: /* UZP1/2 */
4862 int midpoint = elements / 2;
4864 read_vec_element(s, tcg_res, rn, 2 * i + part, size);
4866 read_vec_element(s, tcg_res, rm,
4867 2 * (i - midpoint) + part, size);
4871 case 2: /* TRN1/2 */
4873 read_vec_element(s, tcg_res, rm, (i & ~1) + part, size);
4875 read_vec_element(s, tcg_res, rn, (i & ~1) + part, size);
4878 case 3: /* ZIP1/2 */
4880 int base = part * elements / 2;
4882 read_vec_element(s, tcg_res, rm, base + (i >> 1), size);
4884 read_vec_element(s, tcg_res, rn, base + (i >> 1), size);
4889 g_assert_not_reached();
4894 tcg_gen_shli_i64(tcg_res, tcg_res, ofs);
4895 tcg_gen_or_i64(tcg_resl, tcg_resl, tcg_res);
4897 tcg_gen_shli_i64(tcg_res, tcg_res, ofs - 64);
4898 tcg_gen_or_i64(tcg_resh, tcg_resh, tcg_res);
4902 tcg_temp_free_i64(tcg_res);
4904 write_vec_element(s, tcg_resl, rd, 0, MO_64);
4905 tcg_temp_free_i64(tcg_resl);
4906 write_vec_element(s, tcg_resh, rd, 1, MO_64);
4907 tcg_temp_free_i64(tcg_resh);
4910 static void do_minmaxop(DisasContext *s, TCGv_i32 tcg_elt1, TCGv_i32 tcg_elt2,
4911 int opc, bool is_min, TCGv_ptr fpst)
4913 /* Helper function for disas_simd_across_lanes: do a single precision
4914 * min/max operation on the specified two inputs,
4915 * and return the result in tcg_elt1.
4919 gen_helper_vfp_minnums(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
4921 gen_helper_vfp_maxnums(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
4926 gen_helper_vfp_mins(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
4928 gen_helper_vfp_maxs(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
4933 /* C3.6.4 AdvSIMD across lanes
4934 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
4935 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
4936 * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
4937 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
4939 static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
4941 int rd = extract32(insn, 0, 5);
4942 int rn = extract32(insn, 5, 5);
4943 int size = extract32(insn, 22, 2);
4944 int opcode = extract32(insn, 12, 5);
4945 bool is_q = extract32(insn, 30, 1);
4946 bool is_u = extract32(insn, 29, 1);
4948 bool is_min = false;
4952 TCGv_i64 tcg_res, tcg_elt;
4955 case 0x1b: /* ADDV */
4957 unallocated_encoding(s);
4961 case 0x3: /* SADDLV, UADDLV */
4962 case 0xa: /* SMAXV, UMAXV */
4963 case 0x1a: /* SMINV, UMINV */
4964 if (size == 3 || (size == 2 && !is_q)) {
4965 unallocated_encoding(s);
4969 case 0xc: /* FMAXNMV, FMINNMV */
4970 case 0xf: /* FMAXV, FMINV */
4971 if (!is_u || !is_q || extract32(size, 0, 1)) {
4972 unallocated_encoding(s);
4975 /* Bit 1 of size field encodes min vs max, and actual size is always
4976 * 32 bits: adjust the size variable so following code can rely on it
4978 is_min = extract32(size, 1, 1);
4983 unallocated_encoding(s);
4988 elements = (is_q ? 128 : 64) / esize;
4990 tcg_res = tcg_temp_new_i64();
4991 tcg_elt = tcg_temp_new_i64();
4993 /* These instructions operate across all lanes of a vector
4994 * to produce a single result. We can guarantee that a 64
4995 * bit intermediate is sufficient:
4996 * + for [US]ADDLV the maximum element size is 32 bits, and
4997 * the result type is 64 bits
4998 * + for FMAX*V, FMIN*V, ADDV the intermediate type is the
4999 * same as the element size, which is 32 bits at most
5000 * For the integer operations we can choose to work at 64
5001 * or 32 bits and truncate at the end; for simplicity
5002 * we use 64 bits always. The floating point
5003 * ops do require 32 bit intermediates, though.
5006 read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN));
5008 for (i = 1; i < elements; i++) {
5009 read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN));
5012 case 0x03: /* SADDLV / UADDLV */
5013 case 0x1b: /* ADDV */
5014 tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
5016 case 0x0a: /* SMAXV / UMAXV */
5017 tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
5019 tcg_res, tcg_elt, tcg_res, tcg_elt);
5021 case 0x1a: /* SMINV / UMINV */
5022 tcg_gen_movcond_i64(is_u ? TCG_COND_LEU : TCG_COND_LE,
5024 tcg_res, tcg_elt, tcg_res, tcg_elt);
5028 g_assert_not_reached();
5033 /* Floating point ops which work on 32 bit (single) intermediates.
5034 * Note that correct NaN propagation requires that we do these
5035 * operations in exactly the order specified by the pseudocode.
5037 TCGv_i32 tcg_elt1 = tcg_temp_new_i32();
5038 TCGv_i32 tcg_elt2 = tcg_temp_new_i32();
5039 TCGv_i32 tcg_elt3 = tcg_temp_new_i32();
5040 TCGv_ptr fpst = get_fpstatus_ptr();
5042 assert(esize == 32);
5043 assert(elements == 4);
5045 read_vec_element(s, tcg_elt, rn, 0, MO_32);
5046 tcg_gen_trunc_i64_i32(tcg_elt1, tcg_elt);
5047 read_vec_element(s, tcg_elt, rn, 1, MO_32);
5048 tcg_gen_trunc_i64_i32(tcg_elt2, tcg_elt);
5050 do_minmaxop(s, tcg_elt1, tcg_elt2, opcode, is_min, fpst);
5052 read_vec_element(s, tcg_elt, rn, 2, MO_32);
5053 tcg_gen_trunc_i64_i32(tcg_elt2, tcg_elt);
5054 read_vec_element(s, tcg_elt, rn, 3, MO_32);
5055 tcg_gen_trunc_i64_i32(tcg_elt3, tcg_elt);
5057 do_minmaxop(s, tcg_elt2, tcg_elt3, opcode, is_min, fpst);
5059 do_minmaxop(s, tcg_elt1, tcg_elt2, opcode, is_min, fpst);
5061 tcg_gen_extu_i32_i64(tcg_res, tcg_elt1);
5062 tcg_temp_free_i32(tcg_elt1);
5063 tcg_temp_free_i32(tcg_elt2);
5064 tcg_temp_free_i32(tcg_elt3);
5065 tcg_temp_free_ptr(fpst);
5068 tcg_temp_free_i64(tcg_elt);
5070 /* Now truncate the result to the width required for the final output */
5071 if (opcode == 0x03) {
5072 /* SADDLV, UADDLV: result is 2*esize */
5078 tcg_gen_ext8u_i64(tcg_res, tcg_res);
5081 tcg_gen_ext16u_i64(tcg_res, tcg_res);
5084 tcg_gen_ext32u_i64(tcg_res, tcg_res);
5089 g_assert_not_reached();
5092 write_fp_dreg(s, rd, tcg_res);
5093 tcg_temp_free_i64(tcg_res);
5096 /* C6.3.31 DUP (Element, Vector)
5098 * 31 30 29 21 20 16 15 10 9 5 4 0
5099 * +---+---+-------------------+--------+-------------+------+------+
5100 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
5101 * +---+---+-------------------+--------+-------------+------+------+
5103 * size: encoded in imm5 (see ARM ARM LowestSetBit())
5105 static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn,
5108 int size = ctz32(imm5);
5109 int esize = 8 << size;
5110 int elements = (is_q ? 128 : 64) / esize;
5114 if (size > 3 || (size == 3 && !is_q)) {
5115 unallocated_encoding(s);
5119 index = imm5 >> (size + 1);
5121 tmp = tcg_temp_new_i64();
5122 read_vec_element(s, tmp, rn, index, size);
5124 for (i = 0; i < elements; i++) {
5125 write_vec_element(s, tmp, rd, i, size);
5129 clear_vec_high(s, rd);
5132 tcg_temp_free_i64(tmp);
5135 /* C6.3.31 DUP (element, scalar)
5136 * 31 21 20 16 15 10 9 5 4 0
5137 * +-----------------------+--------+-------------+------+------+
5138 * | 0 1 0 1 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
5139 * +-----------------------+--------+-------------+------+------+
5141 static void handle_simd_dupes(DisasContext *s, int rd, int rn,
5144 int size = ctz32(imm5);
5149 unallocated_encoding(s);
5153 index = imm5 >> (size + 1);
5155 /* This instruction just extracts the specified element and
5156 * zero-extends it into the bottom of the destination register.
5158 tmp = tcg_temp_new_i64();
5159 read_vec_element(s, tmp, rn, index, size);
5160 write_fp_dreg(s, rd, tmp);
5161 tcg_temp_free_i64(tmp);
5164 /* C6.3.32 DUP (General)
5166 * 31 30 29 21 20 16 15 10 9 5 4 0
5167 * +---+---+-------------------+--------+-------------+------+------+
5168 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 1 1 | Rn | Rd |
5169 * +---+---+-------------------+--------+-------------+------+------+
5171 * size: encoded in imm5 (see ARM ARM LowestSetBit())
5173 static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn,
5176 int size = ctz32(imm5);
5177 int esize = 8 << size;
5178 int elements = (is_q ? 128 : 64)/esize;
5181 if (size > 3 || ((size == 3) && !is_q)) {
5182 unallocated_encoding(s);
5185 for (i = 0; i < elements; i++) {
5186 write_vec_element(s, cpu_reg(s, rn), rd, i, size);
5189 clear_vec_high(s, rd);
5193 /* C6.3.150 INS (Element)
5195 * 31 21 20 16 15 14 11 10 9 5 4 0
5196 * +-----------------------+--------+------------+---+------+------+
5197 * | 0 1 1 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
5198 * +-----------------------+--------+------------+---+------+------+
5200 * size: encoded in imm5 (see ARM ARM LowestSetBit())
5201 * index: encoded in imm5<4:size+1>
5203 static void handle_simd_inse(DisasContext *s, int rd, int rn,
5206 int size = ctz32(imm5);
5207 int src_index, dst_index;
5211 unallocated_encoding(s);
5214 dst_index = extract32(imm5, 1+size, 5);
5215 src_index = extract32(imm4, size, 4);
5217 tmp = tcg_temp_new_i64();
5219 read_vec_element(s, tmp, rn, src_index, size);
5220 write_vec_element(s, tmp, rd, dst_index, size);
5222 tcg_temp_free_i64(tmp);
5226 /* C6.3.151 INS (General)
5228 * 31 21 20 16 15 10 9 5 4 0
5229 * +-----------------------+--------+-------------+------+------+
5230 * | 0 1 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 1 1 1 | Rn | Rd |
5231 * +-----------------------+--------+-------------+------+------+
5233 * size: encoded in imm5 (see ARM ARM LowestSetBit())
5234 * index: encoded in imm5<4:size+1>
5236 static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5)
5238 int size = ctz32(imm5);
5242 unallocated_encoding(s);
5246 idx = extract32(imm5, 1 + size, 4 - size);
5247 write_vec_element(s, cpu_reg(s, rn), rd, idx, size);
5251 * C6.3.321 UMOV (General)
5252 * C6.3.237 SMOV (General)
5254 * 31 30 29 21 20 16 15 12 10 9 5 4 0
5255 * +---+---+-------------------+--------+-------------+------+------+
5256 * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 1 U 1 1 | Rn | Rd |
5257 * +---+---+-------------------+--------+-------------+------+------+
5259 * U: unsigned when set
5260 * size: encoded in imm5 (see ARM ARM LowestSetBit())
5262 static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed,
5263 int rn, int rd, int imm5)
5265 int size = ctz32(imm5);
5269 /* Check for UnallocatedEncodings */
5271 if (size > 2 || (size == 2 && !is_q)) {
5272 unallocated_encoding(s);
5277 || (size < 3 && is_q)
5278 || (size == 3 && !is_q)) {
5279 unallocated_encoding(s);
5283 element = extract32(imm5, 1+size, 4);
5285 tcg_rd = cpu_reg(s, rd);
5286 read_vec_element(s, tcg_rd, rn, element, size | (is_signed ? MO_SIGN : 0));
5287 if (is_signed && !is_q) {
5288 tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
5292 /* C3.6.5 AdvSIMD copy
5293 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
5294 * +---+---+----+-----------------+------+---+------+---+------+------+
5295 * | 0 | Q | op | 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
5296 * +---+---+----+-----------------+------+---+------+---+------+------+
5298 static void disas_simd_copy(DisasContext *s, uint32_t insn)
5300 int rd = extract32(insn, 0, 5);
5301 int rn = extract32(insn, 5, 5);
5302 int imm4 = extract32(insn, 11, 4);
5303 int op = extract32(insn, 29, 1);
5304 int is_q = extract32(insn, 30, 1);
5305 int imm5 = extract32(insn, 16, 5);
5310 handle_simd_inse(s, rd, rn, imm4, imm5);
5312 unallocated_encoding(s);
5317 /* DUP (element - vector) */
5318 handle_simd_dupe(s, is_q, rd, rn, imm5);
5322 handle_simd_dupg(s, is_q, rd, rn, imm5);
5327 handle_simd_insg(s, rd, rn, imm5);
5329 unallocated_encoding(s);
5334 /* UMOV/SMOV (is_q indicates 32/64; imm4 indicates signedness) */
5335 handle_simd_umov_smov(s, is_q, (imm4 == 5), rn, rd, imm5);
5338 unallocated_encoding(s);
5344 /* C3.6.6 AdvSIMD modified immediate
5345 * 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0
5346 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
5347 * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd |
5348 * +---+---+----+---------------------+-----+-------+----+---+-------+------+
5350 * There are a number of operations that can be carried out here:
5351 * MOVI - move (shifted) imm into register
5352 * MVNI - move inverted (shifted) imm into register
5353 * ORR - bitwise OR of (shifted) imm with register
5354 * BIC - bitwise clear of (shifted) imm with register
5356 static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
5358 int rd = extract32(insn, 0, 5);
5359 int cmode = extract32(insn, 12, 4);
5360 int cmode_3_1 = extract32(cmode, 1, 3);
5361 int cmode_0 = extract32(cmode, 0, 1);
5362 int o2 = extract32(insn, 11, 1);
5363 uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
5364 bool is_neg = extract32(insn, 29, 1);
5365 bool is_q = extract32(insn, 30, 1);
5367 TCGv_i64 tcg_rd, tcg_imm;
5370 if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
5371 unallocated_encoding(s);
5375 /* See AdvSIMDExpandImm() in ARM ARM */
5376 switch (cmode_3_1) {
5377 case 0: /* Replicate(Zeros(24):imm8, 2) */
5378 case 1: /* Replicate(Zeros(16):imm8:Zeros(8), 2) */
5379 case 2: /* Replicate(Zeros(8):imm8:Zeros(16), 2) */
5380 case 3: /* Replicate(imm8:Zeros(24), 2) */
5382 int shift = cmode_3_1 * 8;
5383 imm = bitfield_replicate(abcdefgh << shift, 32);
5386 case 4: /* Replicate(Zeros(8):imm8, 4) */
5387 case 5: /* Replicate(imm8:Zeros(8), 4) */
5389 int shift = (cmode_3_1 & 0x1) * 8;
5390 imm = bitfield_replicate(abcdefgh << shift, 16);
5395 /* Replicate(Zeros(8):imm8:Ones(16), 2) */
5396 imm = (abcdefgh << 16) | 0xffff;
5398 /* Replicate(Zeros(16):imm8:Ones(8), 2) */
5399 imm = (abcdefgh << 8) | 0xff;
5401 imm = bitfield_replicate(imm, 32);
5404 if (!cmode_0 && !is_neg) {
5405 imm = bitfield_replicate(abcdefgh, 8);
5406 } else if (!cmode_0 && is_neg) {
5409 for (i = 0; i < 8; i++) {
5410 if ((abcdefgh) & (1 << i)) {
5411 imm |= 0xffULL << (i * 8);
5414 } else if (cmode_0) {
5416 imm = (abcdefgh & 0x3f) << 48;
5417 if (abcdefgh & 0x80) {
5418 imm |= 0x8000000000000000ULL;
5420 if (abcdefgh & 0x40) {
5421 imm |= 0x3fc0000000000000ULL;
5423 imm |= 0x4000000000000000ULL;
5426 imm = (abcdefgh & 0x3f) << 19;
5427 if (abcdefgh & 0x80) {
5430 if (abcdefgh & 0x40) {
5441 if (cmode_3_1 != 7 && is_neg) {
5445 tcg_imm = tcg_const_i64(imm);
5446 tcg_rd = new_tmp_a64(s);
5448 for (i = 0; i < 2; i++) {
5449 int foffs = i ? fp_reg_hi_offset(rd) : fp_reg_offset(rd, MO_64);
5451 if (i == 1 && !is_q) {
5452 /* non-quad ops clear high half of vector */
5453 tcg_gen_movi_i64(tcg_rd, 0);
5454 } else if ((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9) {
5455 tcg_gen_ld_i64(tcg_rd, cpu_env, foffs);
5458 tcg_gen_and_i64(tcg_rd, tcg_rd, tcg_imm);
5461 tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_imm);
5465 tcg_gen_mov_i64(tcg_rd, tcg_imm);
5467 tcg_gen_st_i64(tcg_rd, cpu_env, foffs);
5470 tcg_temp_free_i64(tcg_imm);
5473 /* C3.6.7 AdvSIMD scalar copy
5474 * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
5475 * +-----+----+-----------------+------+---+------+---+------+------+
5476 * | 0 1 | op | 1 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
5477 * +-----+----+-----------------+------+---+------+---+------+------+
5479 static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn)
5481 int rd = extract32(insn, 0, 5);
5482 int rn = extract32(insn, 5, 5);
5483 int imm4 = extract32(insn, 11, 4);
5484 int imm5 = extract32(insn, 16, 5);
5485 int op = extract32(insn, 29, 1);
5487 if (op != 0 || imm4 != 0) {
5488 unallocated_encoding(s);
5492 /* DUP (element, scalar) */
5493 handle_simd_dupes(s, rd, rn, imm5);
5496 /* C3.6.8 AdvSIMD scalar pairwise
5497 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
5498 * +-----+---+-----------+------+-----------+--------+-----+------+------+
5499 * | 0 1 | U | 1 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
5500 * +-----+---+-----------+------+-----------+--------+-----+------+------+
5502 static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
5504 unsupported_encoding(s, insn);
5508 * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
5510 * This code is handles the common shifting code and is used by both
5511 * the vector and scalar code.
5513 static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
5514 TCGv_i64 tcg_rnd, bool accumulate,
5515 bool is_u, int size, int shift)
5517 bool extended_result = false;
5518 bool round = !TCGV_IS_UNUSED_I64(tcg_rnd);
5520 TCGv_i64 tcg_src_hi;
5522 if (round && size == 3) {
5523 extended_result = true;
5524 ext_lshift = 64 - shift;
5525 tcg_src_hi = tcg_temp_new_i64();
5526 } else if (shift == 64) {
5527 if (!accumulate && is_u) {
5528 /* result is zero */
5529 tcg_gen_movi_i64(tcg_res, 0);
5534 /* Deal with the rounding step */
5536 if (extended_result) {
5537 TCGv_i64 tcg_zero = tcg_const_i64(0);
5539 /* take care of sign extending tcg_res */
5540 tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63);
5541 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
5542 tcg_src, tcg_src_hi,
5545 tcg_gen_add2_i64(tcg_src, tcg_src_hi,
5549 tcg_temp_free_i64(tcg_zero);
5551 tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd);
5555 /* Now do the shift right */
5556 if (round && extended_result) {
5557 /* extended case, >64 bit precision required */
5558 if (ext_lshift == 0) {
5559 /* special case, only high bits matter */
5560 tcg_gen_mov_i64(tcg_src, tcg_src_hi);
5562 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
5563 tcg_gen_shli_i64(tcg_src_hi, tcg_src_hi, ext_lshift);
5564 tcg_gen_or_i64(tcg_src, tcg_src, tcg_src_hi);
5569 /* essentially shifting in 64 zeros */
5570 tcg_gen_movi_i64(tcg_src, 0);
5572 tcg_gen_shri_i64(tcg_src, tcg_src, shift);
5576 /* effectively extending the sign-bit */
5577 tcg_gen_sari_i64(tcg_src, tcg_src, 63);
5579 tcg_gen_sari_i64(tcg_src, tcg_src, shift);
5585 tcg_gen_add_i64(tcg_res, tcg_res, tcg_src);
5587 tcg_gen_mov_i64(tcg_res, tcg_src);
5590 if (extended_result) {
5591 tcg_temp_free_i64(tcg_src_hi);
5595 /* Common SHL/SLI - Shift left with an optional insert */
5596 static void handle_shli_with_ins(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
5597 bool insert, int shift)
5599 if (insert) { /* SLI */
5600 tcg_gen_deposit_i64(tcg_res, tcg_res, tcg_src, shift, 64 - shift);
5602 tcg_gen_shli_i64(tcg_res, tcg_src, shift);
5606 /* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */
5607 static void handle_scalar_simd_shri(DisasContext *s,
5608 bool is_u, int immh, int immb,
5609 int opcode, int rn, int rd)
5612 int immhb = immh << 3 | immb;
5613 int shift = 2 * (8 << size) - immhb;
5614 bool accumulate = false;
5620 if (!extract32(immh, 3, 1)) {
5621 unallocated_encoding(s);
5626 case 0x02: /* SSRA / USRA (accumulate) */
5629 case 0x04: /* SRSHR / URSHR (rounding) */
5632 case 0x06: /* SRSRA / URSRA (accum + rounding) */
5633 accumulate = round = true;
5638 uint64_t round_const = 1ULL << (shift - 1);
5639 tcg_round = tcg_const_i64(round_const);
5641 TCGV_UNUSED_I64(tcg_round);
5644 tcg_rn = read_fp_dreg(s, rn);
5645 tcg_rd = accumulate ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
5647 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
5648 accumulate, is_u, size, shift);
5650 write_fp_dreg(s, rd, tcg_rd);
5652 tcg_temp_free_i64(tcg_rn);
5653 tcg_temp_free_i64(tcg_rd);
5655 tcg_temp_free_i64(tcg_round);
5659 /* SHL/SLI - Scalar shift left */
5660 static void handle_scalar_simd_shli(DisasContext *s, bool insert,
5661 int immh, int immb, int opcode,
5664 int size = 32 - clz32(immh) - 1;
5665 int immhb = immh << 3 | immb;
5666 int shift = immhb - (8 << size);
5667 TCGv_i64 tcg_rn = new_tmp_a64(s);
5668 TCGv_i64 tcg_rd = new_tmp_a64(s);
5670 if (!extract32(immh, 3, 1)) {
5671 unallocated_encoding(s);
5675 tcg_rn = read_fp_dreg(s, rn);
5676 tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
5678 handle_shli_with_ins(tcg_rd, tcg_rn, insert, shift);
5680 write_fp_dreg(s, rd, tcg_rd);
5682 tcg_temp_free_i64(tcg_rn);
5683 tcg_temp_free_i64(tcg_rd);
5686 /* C3.6.9 AdvSIMD scalar shift by immediate
5687 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
5688 * +-----+---+-------------+------+------+--------+---+------+------+
5689 * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
5690 * +-----+---+-------------+------+------+--------+---+------+------+
5692 * This is the scalar version so it works on a fixed sized registers
5694 static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
5696 int rd = extract32(insn, 0, 5);
5697 int rn = extract32(insn, 5, 5);
5698 int opcode = extract32(insn, 11, 5);
5699 int immb = extract32(insn, 16, 3);
5700 int immh = extract32(insn, 19, 4);
5701 bool is_u = extract32(insn, 29, 1);
5704 case 0x00: /* SSHR / USHR */
5705 case 0x02: /* SSRA / USRA */
5706 case 0x04: /* SRSHR / URSHR */
5707 case 0x06: /* SRSRA / URSRA */
5708 handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd);
5710 case 0x0a: /* SHL / SLI */
5711 handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd);
5714 unsupported_encoding(s, insn);
5719 /* C3.6.10 AdvSIMD scalar three different
5720 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
5721 * +-----+---+-----------+------+---+------+--------+-----+------+------+
5722 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
5723 * +-----+---+-----------+------+---+------+--------+-----+------+------+
5725 static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
5727 unsupported_encoding(s, insn);
5730 static void handle_3same_64(DisasContext *s, int opcode, bool u,
5731 TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
5733 /* Handle 64x64->64 opcodes which are shared between the scalar
5734 * and vector 3-same groups. We cover every opcode where size == 3
5735 * is valid in either the three-reg-same (integer, not pairwise)
5736 * or scalar-three-reg-same groups. (Some opcodes are not yet
5742 case 0x1: /* SQADD */
5744 gen_helper_neon_qadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
5746 gen_helper_neon_qadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
5749 case 0x5: /* SQSUB */
5751 gen_helper_neon_qsub_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
5753 gen_helper_neon_qsub_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
5756 case 0x6: /* CMGT, CMHI */
5757 /* 64 bit integer comparison, result = test ? (2^64 - 1) : 0.
5758 * We implement this using setcond (test) and then negating.
5760 cond = u ? TCG_COND_GTU : TCG_COND_GT;
5762 tcg_gen_setcond_i64(cond, tcg_rd, tcg_rn, tcg_rm);
5763 tcg_gen_neg_i64(tcg_rd, tcg_rd);
5765 case 0x7: /* CMGE, CMHS */
5766 cond = u ? TCG_COND_GEU : TCG_COND_GE;
5768 case 0x11: /* CMTST, CMEQ */
5773 /* CMTST : test is "if (X & Y != 0)". */
5774 tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
5775 tcg_gen_setcondi_i64(TCG_COND_NE, tcg_rd, tcg_rd, 0);
5776 tcg_gen_neg_i64(tcg_rd, tcg_rd);
5778 case 0x8: /* SSHL, USHL */
5780 gen_helper_neon_shl_u64(tcg_rd, tcg_rn, tcg_rm);
5782 gen_helper_neon_shl_s64(tcg_rd, tcg_rn, tcg_rm);
5785 case 0x9: /* SQSHL, UQSHL */
5787 gen_helper_neon_qshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
5789 gen_helper_neon_qshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
5792 case 0xa: /* SRSHL, URSHL */
5794 gen_helper_neon_rshl_u64(tcg_rd, tcg_rn, tcg_rm);
5796 gen_helper_neon_rshl_s64(tcg_rd, tcg_rn, tcg_rm);
5799 case 0xb: /* SQRSHL, UQRSHL */
5801 gen_helper_neon_qrshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
5803 gen_helper_neon_qrshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
5806 case 0x10: /* ADD, SUB */
5808 tcg_gen_sub_i64(tcg_rd, tcg_rn, tcg_rm);
5810 tcg_gen_add_i64(tcg_rd, tcg_rn, tcg_rm);
5814 g_assert_not_reached();
5818 /* Handle the 3-same-operands float operations; shared by the scalar
5819 * and vector encodings. The caller must filter out any encodings
5820 * not allocated for the encoding it is dealing with.
5822 static void handle_3same_float(DisasContext *s, int size, int elements,
5823 int fpopcode, int rd, int rn, int rm)
5826 TCGv_ptr fpst = get_fpstatus_ptr();
5828 for (pass = 0; pass < elements; pass++) {
5831 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
5832 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
5833 TCGv_i64 tcg_res = tcg_temp_new_i64();
5835 read_vec_element(s, tcg_op1, rn, pass, MO_64);
5836 read_vec_element(s, tcg_op2, rm, pass, MO_64);
5839 case 0x18: /* FMAXNM */
5840 gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
5842 case 0x1a: /* FADD */
5843 gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
5845 case 0x1e: /* FMAX */
5846 gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
5848 case 0x38: /* FMINNM */
5849 gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
5851 case 0x3a: /* FSUB */
5852 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
5854 case 0x3e: /* FMIN */
5855 gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
5857 case 0x5b: /* FMUL */
5858 gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
5860 case 0x5f: /* FDIV */
5861 gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
5863 case 0x7a: /* FABD */
5864 gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
5865 gen_helper_vfp_absd(tcg_res, tcg_res);
5868 g_assert_not_reached();
5871 write_vec_element(s, tcg_res, rd, pass, MO_64);
5873 tcg_temp_free_i64(tcg_res);
5874 tcg_temp_free_i64(tcg_op1);
5875 tcg_temp_free_i64(tcg_op2);
5878 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
5879 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
5880 TCGv_i32 tcg_res = tcg_temp_new_i32();
5882 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
5883 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
5886 case 0x1a: /* FADD */
5887 gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
5889 case 0x1e: /* FMAX */
5890 gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
5892 case 0x18: /* FMAXNM */
5893 gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
5895 case 0x38: /* FMINNM */
5896 gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
5898 case 0x3a: /* FSUB */
5899 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
5901 case 0x3e: /* FMIN */
5902 gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
5904 case 0x5b: /* FMUL */
5905 gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
5907 case 0x5f: /* FDIV */
5908 gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
5910 case 0x7a: /* FABD */
5911 gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
5912 gen_helper_vfp_abss(tcg_res, tcg_res);
5915 g_assert_not_reached();
5918 if (elements == 1) {
5919 /* scalar single so clear high part */
5920 TCGv_i64 tcg_tmp = tcg_temp_new_i64();
5922 tcg_gen_extu_i32_i64(tcg_tmp, tcg_res);
5923 write_vec_element(s, tcg_tmp, rd, pass, MO_64);
5924 tcg_temp_free_i64(tcg_tmp);
5926 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
5929 tcg_temp_free_i32(tcg_res);
5930 tcg_temp_free_i32(tcg_op1);
5931 tcg_temp_free_i32(tcg_op2);
5935 tcg_temp_free_ptr(fpst);
5937 if ((elements << size) < 4) {
5938 /* scalar, or non-quad vector op */
5939 clear_vec_high(s, rd);
5943 /* C3.6.11 AdvSIMD scalar three same
5944 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
5945 * +-----+---+-----------+------+---+------+--------+---+------+------+
5946 * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
5947 * +-----+---+-----------+------+---+------+--------+---+------+------+
5949 static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
5951 int rd = extract32(insn, 0, 5);
5952 int rn = extract32(insn, 5, 5);
5953 int opcode = extract32(insn, 11, 5);
5954 int rm = extract32(insn, 16, 5);
5955 int size = extract32(insn, 22, 2);
5956 bool u = extract32(insn, 29, 1);
5961 if (opcode >= 0x18) {
5962 /* Floating point: U, size[1] and opcode indicate operation */
5963 int fpopcode = opcode | (extract32(size, 1, 1) << 5) | (u << 6);
5965 case 0x1b: /* FMULX */
5966 case 0x1c: /* FCMEQ */
5967 case 0x1f: /* FRECPS */
5968 case 0x3f: /* FRSQRTS */
5969 case 0x5c: /* FCMGE */
5970 case 0x5d: /* FACGE */
5971 case 0x7c: /* FCMGT */
5972 case 0x7d: /* FACGT */
5973 unsupported_encoding(s, insn);
5975 case 0x7a: /* FABD */
5978 unallocated_encoding(s);
5982 handle_3same_float(s, extract32(size, 0, 1), 1, fpopcode, rd, rn, rm);
5987 case 0x1: /* SQADD, UQADD */
5988 case 0x5: /* SQSUB, UQSUB */
5989 unsupported_encoding(s, insn);
5991 case 0x8: /* SSHL, USHL */
5992 case 0xa: /* SRSHL, URSHL */
5993 case 0x6: /* CMGT, CMHI */
5994 case 0x7: /* CMGE, CMHS */
5995 case 0x11: /* CMTST, CMEQ */
5996 case 0x10: /* ADD, SUB (vector) */
5998 unallocated_encoding(s);
6002 case 0x9: /* SQSHL, UQSHL */
6003 case 0xb: /* SQRSHL, UQRSHL */
6004 unsupported_encoding(s, insn);
6006 case 0x16: /* SQDMULH, SQRDMULH (vector) */
6007 if (size != 1 && size != 2) {
6008 unallocated_encoding(s);
6011 unsupported_encoding(s, insn);
6014 unallocated_encoding(s);
6018 tcg_rn = read_fp_dreg(s, rn); /* op1 */
6019 tcg_rm = read_fp_dreg(s, rm); /* op2 */
6020 tcg_rd = tcg_temp_new_i64();
6022 /* For the moment we only support the opcodes which are
6023 * 64-bit-width only. The size != 3 cases will
6024 * be handled later when the relevant ops are implemented.
6026 handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm);
6028 write_fp_dreg(s, rd, tcg_rd);
6030 tcg_temp_free_i64(tcg_rn);
6031 tcg_temp_free_i64(tcg_rm);
6032 tcg_temp_free_i64(tcg_rd);
6035 /* C3.6.12 AdvSIMD scalar two reg misc
6036 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
6037 * +-----+---+-----------+------+-----------+--------+-----+------+------+
6038 * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
6039 * +-----+---+-----------+------+-----------+--------+-----+------+------+
6041 static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
6043 unsupported_encoding(s, insn);
6046 /* C3.6.13 AdvSIMD scalar x indexed element
6047 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
6048 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
6049 * | 0 1 | U | 1 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
6050 * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
6052 static void disas_simd_scalar_indexed(DisasContext *s, uint32_t insn)
6054 unsupported_encoding(s, insn);
6057 /* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
6058 static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
6059 int immh, int immb, int opcode, int rn, int rd)
6061 int size = 32 - clz32(immh) - 1;
6062 int immhb = immh << 3 | immb;
6063 int shift = 2 * (8 << size) - immhb;
6064 bool accumulate = false;
6066 int dsize = is_q ? 128 : 64;
6067 int esize = 8 << size;
6068 int elements = dsize/esize;
6069 TCGMemOp memop = size | (is_u ? 0 : MO_SIGN);
6070 TCGv_i64 tcg_rn = new_tmp_a64(s);
6071 TCGv_i64 tcg_rd = new_tmp_a64(s);
6075 if (extract32(immh, 3, 1) && !is_q) {
6076 unallocated_encoding(s);
6080 if (size > 3 && !is_q) {
6081 unallocated_encoding(s);
6086 case 0x02: /* SSRA / USRA (accumulate) */
6089 case 0x04: /* SRSHR / URSHR (rounding) */
6092 case 0x06: /* SRSRA / URSRA (accum + rounding) */
6093 accumulate = round = true;
6098 uint64_t round_const = 1ULL << (shift - 1);
6099 tcg_round = tcg_const_i64(round_const);
6101 TCGV_UNUSED_I64(tcg_round);
6104 for (i = 0; i < elements; i++) {
6105 read_vec_element(s, tcg_rn, rn, i, memop);
6107 read_vec_element(s, tcg_rd, rd, i, memop);
6110 handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
6111 accumulate, is_u, size, shift);
6113 write_vec_element(s, tcg_rd, rd, i, size);
6117 clear_vec_high(s, rd);
6121 tcg_temp_free_i64(tcg_round);
6125 /* SHL/SLI - Vector shift left */
6126 static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
6127 int immh, int immb, int opcode, int rn, int rd)
6129 int size = 32 - clz32(immh) - 1;
6130 int immhb = immh << 3 | immb;
6131 int shift = immhb - (8 << size);
6132 int dsize = is_q ? 128 : 64;
6133 int esize = 8 << size;
6134 int elements = dsize/esize;
6135 TCGv_i64 tcg_rn = new_tmp_a64(s);
6136 TCGv_i64 tcg_rd = new_tmp_a64(s);
6139 if (extract32(immh, 3, 1) && !is_q) {
6140 unallocated_encoding(s);
6144 if (size > 3 && !is_q) {
6145 unallocated_encoding(s);
6149 for (i = 0; i < elements; i++) {
6150 read_vec_element(s, tcg_rn, rn, i, size);
6152 read_vec_element(s, tcg_rd, rd, i, size);
6155 handle_shli_with_ins(tcg_rd, tcg_rn, insert, shift);
6157 write_vec_element(s, tcg_rd, rd, i, size);
6161 clear_vec_high(s, rd);
6165 /* USHLL/SHLL - Vector shift left with widening */
6166 static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
6167 int immh, int immb, int opcode, int rn, int rd)
6169 int size = 32 - clz32(immh) - 1;
6170 int immhb = immh << 3 | immb;
6171 int shift = immhb - (8 << size);
6173 int esize = 8 << size;
6174 int elements = dsize/esize;
6175 TCGv_i64 tcg_rn = new_tmp_a64(s);
6176 TCGv_i64 tcg_rd = new_tmp_a64(s);
6180 unallocated_encoding(s);
6184 /* For the LL variants the store is larger than the load,
6185 * so if rd == rn we would overwrite parts of our input.
6186 * So load everything right now and use shifts in the main loop.
6188 read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64);
6190 for (i = 0; i < elements; i++) {
6191 tcg_gen_shri_i64(tcg_rd, tcg_rn, i * esize);
6192 ext_and_shift_reg(tcg_rd, tcg_rd, size | (!is_u << 2), 0);
6193 tcg_gen_shli_i64(tcg_rd, tcg_rd, shift);
6194 write_vec_element(s, tcg_rd, rd, i, size + 1);
6199 /* C3.6.14 AdvSIMD shift by immediate
6200 * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
6201 * +---+---+---+-------------+------+------+--------+---+------+------+
6202 * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
6203 * +---+---+---+-------------+------+------+--------+---+------+------+
6205 static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
6207 int rd = extract32(insn, 0, 5);
6208 int rn = extract32(insn, 5, 5);
6209 int opcode = extract32(insn, 11, 5);
6210 int immb = extract32(insn, 16, 3);
6211 int immh = extract32(insn, 19, 4);
6212 bool is_u = extract32(insn, 29, 1);
6213 bool is_q = extract32(insn, 30, 1);
6216 case 0x00: /* SSHR / USHR */
6217 case 0x02: /* SSRA / USRA (accumulate) */
6218 case 0x04: /* SRSHR / URSHR (rounding) */
6219 case 0x06: /* SRSRA / URSRA (accum + rounding) */
6220 handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd);
6222 case 0x0a: /* SHL / SLI */
6223 handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd);
6225 case 0x14: /* SSHLL / USHLL */
6226 handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd);
6229 /* We don't currently implement any of the Narrow or saturating shifts;
6230 * nor do we implement the fixed-point conversions in this
6231 * encoding group (SCVTF, FCVTZS, UCVTF, FCVTZU).
6233 unsupported_encoding(s, insn);
6238 static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
6239 int opcode, int rd, int rn, int rm)
6241 /* 3-reg-different widening insns: 64 x 64 -> 128 */
6242 TCGv_i64 tcg_res[2];
6245 tcg_res[0] = tcg_temp_new_i64();
6246 tcg_res[1] = tcg_temp_new_i64();
6248 /* Does this op do an adding accumulate, a subtracting accumulate,
6249 * or no accumulate at all?
6267 read_vec_element(s, tcg_res[0], rd, 0, MO_64);
6268 read_vec_element(s, tcg_res[1], rd, 1, MO_64);
6271 /* size == 2 means two 32x32->64 operations; this is worth special
6272 * casing because we can generally handle it inline.
6275 for (pass = 0; pass < 2; pass++) {
6276 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
6277 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
6278 TCGv_i64 tcg_passres;
6279 TCGMemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
6281 int elt = pass + is_q * 2;
6283 read_vec_element(s, tcg_op1, rn, elt, memop);
6284 read_vec_element(s, tcg_op2, rm, elt, memop);
6287 tcg_passres = tcg_res[pass];
6289 tcg_passres = tcg_temp_new_i64();
6293 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
6294 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
6296 TCGv_i64 tcg_tmp1 = tcg_temp_new_i64();
6297 TCGv_i64 tcg_tmp2 = tcg_temp_new_i64();
6299 tcg_gen_sub_i64(tcg_tmp1, tcg_op1, tcg_op2);
6300 tcg_gen_sub_i64(tcg_tmp2, tcg_op2, tcg_op1);
6301 tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
6303 tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2);
6304 tcg_temp_free_i64(tcg_tmp1);
6305 tcg_temp_free_i64(tcg_tmp2);
6308 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
6309 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
6310 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
6311 tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
6314 g_assert_not_reached();
6318 tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
6319 tcg_temp_free_i64(tcg_passres);
6320 } else if (accop < 0) {
6321 tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
6322 tcg_temp_free_i64(tcg_passres);
6325 tcg_temp_free_i64(tcg_op1);
6326 tcg_temp_free_i64(tcg_op2);
6329 /* size 0 or 1, generally helper functions */
6330 for (pass = 0; pass < 2; pass++) {
6331 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
6332 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
6333 TCGv_i64 tcg_passres;
6334 int elt = pass + is_q * 2;
6336 read_vec_element_i32(s, tcg_op1, rn, elt, MO_32);
6337 read_vec_element_i32(s, tcg_op2, rm, elt, MO_32);
6340 tcg_passres = tcg_res[pass];
6342 tcg_passres = tcg_temp_new_i64();
6346 case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
6347 case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
6350 gen_helper_neon_abdl_u16(tcg_passres, tcg_op1, tcg_op2);
6352 gen_helper_neon_abdl_s16(tcg_passres, tcg_op1, tcg_op2);
6356 gen_helper_neon_abdl_u32(tcg_passres, tcg_op1, tcg_op2);
6358 gen_helper_neon_abdl_s32(tcg_passres, tcg_op1, tcg_op2);
6362 case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
6363 case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
6364 case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
6367 gen_helper_neon_mull_u8(tcg_passres, tcg_op1, tcg_op2);
6369 gen_helper_neon_mull_s8(tcg_passres, tcg_op1, tcg_op2);
6373 gen_helper_neon_mull_u16(tcg_passres, tcg_op1, tcg_op2);
6375 gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
6380 g_assert_not_reached();
6382 tcg_temp_free_i32(tcg_op1);
6383 tcg_temp_free_i32(tcg_op2);
6387 gen_helper_neon_addl_u16(tcg_res[pass], tcg_res[pass],
6390 gen_helper_neon_addl_u32(tcg_res[pass], tcg_res[pass],
6393 tcg_temp_free_i64(tcg_passres);
6394 } else if (accop < 0) {
6396 gen_helper_neon_subl_u16(tcg_res[pass], tcg_res[pass],
6399 gen_helper_neon_subl_u32(tcg_res[pass], tcg_res[pass],
6402 tcg_temp_free_i64(tcg_passres);
6407 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
6408 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
6409 tcg_temp_free_i64(tcg_res[0]);
6410 tcg_temp_free_i64(tcg_res[1]);
6413 /* C3.6.15 AdvSIMD three different
6414 * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
6415 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
6416 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
6417 * +---+---+---+-----------+------+---+------+--------+-----+------+------+
6419 static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
6421 /* Instructions in this group fall into three basic classes
6422 * (in each case with the operation working on each element in
6423 * the input vectors):
6424 * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra
6426 * (2) wide 64 x 128 -> 128
6427 * (3) narrowing 128 x 128 -> 64
6428 * Here we do initial decode, catch unallocated cases and
6429 * dispatch to separate functions for each class.
6431 int is_q = extract32(insn, 30, 1);
6432 int is_u = extract32(insn, 29, 1);
6433 int size = extract32(insn, 22, 2);
6434 int opcode = extract32(insn, 12, 4);
6435 int rm = extract32(insn, 16, 5);
6436 int rn = extract32(insn, 5, 5);
6437 int rd = extract32(insn, 0, 5);
6440 case 1: /* SADDW, SADDW2, UADDW, UADDW2 */
6441 case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */
6442 /* 64 x 128 -> 128 */
6443 unsupported_encoding(s, insn);
6445 case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */
6446 case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */
6447 /* 128 x 128 -> 64 */
6448 unsupported_encoding(s, insn);
6455 unallocated_encoding(s);
6461 unsupported_encoding(s, insn);
6468 /* 64 x 64 -> 128 */
6470 unallocated_encoding(s);
6473 handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm);
6476 /* opcode 15 not allocated */
6477 unallocated_encoding(s);
6482 /* Logic op (opcode == 3) subgroup of C3.6.16. */
6483 static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
6485 int rd = extract32(insn, 0, 5);
6486 int rn = extract32(insn, 5, 5);
6487 int rm = extract32(insn, 16, 5);
6488 int size = extract32(insn, 22, 2);
6489 bool is_u = extract32(insn, 29, 1);
6490 bool is_q = extract32(insn, 30, 1);
6491 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
6492 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
6493 TCGv_i64 tcg_res[2];
6496 tcg_res[0] = tcg_temp_new_i64();
6497 tcg_res[1] = tcg_temp_new_i64();
6499 for (pass = 0; pass < (is_q ? 2 : 1); pass++) {
6500 read_vec_element(s, tcg_op1, rn, pass, MO_64);
6501 read_vec_element(s, tcg_op2, rm, pass, MO_64);
6506 tcg_gen_and_i64(tcg_res[pass], tcg_op1, tcg_op2);
6509 tcg_gen_andc_i64(tcg_res[pass], tcg_op1, tcg_op2);
6512 tcg_gen_or_i64(tcg_res[pass], tcg_op1, tcg_op2);
6515 tcg_gen_orc_i64(tcg_res[pass], tcg_op1, tcg_op2);
6520 /* B* ops need res loaded to operate on */
6521 read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
6526 tcg_gen_xor_i64(tcg_res[pass], tcg_op1, tcg_op2);
6528 case 1: /* BSL bitwise select */
6529 tcg_gen_xor_i64(tcg_op1, tcg_op1, tcg_op2);
6530 tcg_gen_and_i64(tcg_op1, tcg_op1, tcg_res[pass]);
6531 tcg_gen_xor_i64(tcg_res[pass], tcg_op2, tcg_op1);
6533 case 2: /* BIT, bitwise insert if true */
6534 tcg_gen_xor_i64(tcg_op1, tcg_op1, tcg_res[pass]);
6535 tcg_gen_and_i64(tcg_op1, tcg_op1, tcg_op2);
6536 tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
6538 case 3: /* BIF, bitwise insert if false */
6539 tcg_gen_xor_i64(tcg_op1, tcg_op1, tcg_res[pass]);
6540 tcg_gen_andc_i64(tcg_op1, tcg_op1, tcg_op2);
6541 tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
6547 write_vec_element(s, tcg_res[0], rd, 0, MO_64);
6549 tcg_gen_movi_i64(tcg_res[1], 0);
6551 write_vec_element(s, tcg_res[1], rd, 1, MO_64);
6553 tcg_temp_free_i64(tcg_op1);
6554 tcg_temp_free_i64(tcg_op2);
6555 tcg_temp_free_i64(tcg_res[0]);
6556 tcg_temp_free_i64(tcg_res[1]);
6559 /* Pairwise op subgroup of C3.6.16. */
6560 static void disas_simd_3same_pair(DisasContext *s, uint32_t insn)
6562 unsupported_encoding(s, insn);
6565 /* Floating point op subgroup of C3.6.16. */
6566 static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
6568 /* For floating point ops, the U, size[1] and opcode bits
6569 * together indicate the operation. size[0] indicates single
6572 int fpopcode = extract32(insn, 11, 5)
6573 | (extract32(insn, 23, 1) << 5)
6574 | (extract32(insn, 29, 1) << 6);
6575 int is_q = extract32(insn, 30, 1);
6576 int size = extract32(insn, 22, 1);
6577 int rm = extract32(insn, 16, 5);
6578 int rn = extract32(insn, 5, 5);
6579 int rd = extract32(insn, 0, 5);
6581 int datasize = is_q ? 128 : 64;
6582 int esize = 32 << size;
6583 int elements = datasize / esize;
6585 if (size == 1 && !is_q) {
6586 unallocated_encoding(s);
6591 case 0x58: /* FMAXNMP */
6592 case 0x5a: /* FADDP */
6593 case 0x5e: /* FMAXP */
6594 case 0x78: /* FMINNMP */
6595 case 0x7e: /* FMINP */
6597 unsupported_encoding(s, insn);
6599 case 0x1b: /* FMULX */
6600 case 0x1c: /* FCMEQ */
6601 case 0x1f: /* FRECPS */
6602 case 0x3f: /* FRSQRTS */
6603 case 0x5c: /* FCMGE */
6604 case 0x5d: /* FACGE */
6605 case 0x7c: /* FCMGT */
6606 case 0x7d: /* FACGT */
6607 case 0x19: /* FMLA */
6608 case 0x39: /* FMLS */
6609 unsupported_encoding(s, insn);
6611 case 0x18: /* FMAXNM */
6612 case 0x1a: /* FADD */
6613 case 0x1e: /* FMAX */
6614 case 0x38: /* FMINNM */
6615 case 0x3a: /* FSUB */
6616 case 0x3e: /* FMIN */
6617 case 0x5b: /* FMUL */
6618 case 0x5f: /* FDIV */
6619 case 0x7a: /* FABD */
6620 handle_3same_float(s, size, elements, fpopcode, rd, rn, rm);
6623 unallocated_encoding(s);
6628 /* Integer op subgroup of C3.6.16. */
6629 static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
6631 int is_q = extract32(insn, 30, 1);
6632 int u = extract32(insn, 29, 1);
6633 int size = extract32(insn, 22, 2);
6634 int opcode = extract32(insn, 11, 5);
6635 int rm = extract32(insn, 16, 5);
6636 int rn = extract32(insn, 5, 5);
6637 int rd = extract32(insn, 0, 5);
6641 case 0x13: /* MUL, PMUL */
6642 if (u && size != 0) {
6643 unallocated_encoding(s);
6647 case 0x0: /* SHADD, UHADD */
6648 case 0x2: /* SRHADD, URHADD */
6649 case 0x4: /* SHSUB, UHSUB */
6650 case 0xc: /* SMAX, UMAX */
6651 case 0xd: /* SMIN, UMIN */
6652 case 0xe: /* SABD, UABD */
6653 case 0xf: /* SABA, UABA */
6654 case 0x12: /* MLA, MLS */
6656 unallocated_encoding(s);
6659 unsupported_encoding(s, insn);
6661 case 0x16: /* SQDMULH, SQRDMULH */
6662 if (size == 0 || size == 3) {
6663 unallocated_encoding(s);
6666 unsupported_encoding(s, insn);
6669 if (size == 3 && !is_q) {
6670 unallocated_encoding(s);
6677 for (pass = 0; pass < (is_q ? 2 : 1); pass++) {
6678 TCGv_i64 tcg_op1 = tcg_temp_new_i64();
6679 TCGv_i64 tcg_op2 = tcg_temp_new_i64();
6680 TCGv_i64 tcg_res = tcg_temp_new_i64();
6682 read_vec_element(s, tcg_op1, rn, pass, MO_64);
6683 read_vec_element(s, tcg_op2, rm, pass, MO_64);
6685 handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2);
6687 write_vec_element(s, tcg_res, rd, pass, MO_64);
6689 tcg_temp_free_i64(tcg_res);
6690 tcg_temp_free_i64(tcg_op1);
6691 tcg_temp_free_i64(tcg_op2);
6694 for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
6695 TCGv_i32 tcg_op1 = tcg_temp_new_i32();
6696 TCGv_i32 tcg_op2 = tcg_temp_new_i32();
6697 TCGv_i32 tcg_res = tcg_temp_new_i32();
6698 NeonGenTwoOpFn *genfn = NULL;
6699 NeonGenTwoOpEnvFn *genenvfn = NULL;
6701 read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
6702 read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
6705 case 0x1: /* SQADD, UQADD */
6707 static NeonGenTwoOpEnvFn * const fns[3][2] = {
6708 { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
6709 { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
6710 { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
6712 genenvfn = fns[size][u];
6715 case 0x5: /* SQSUB, UQSUB */
6717 static NeonGenTwoOpEnvFn * const fns[3][2] = {
6718 { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
6719 { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
6720 { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
6722 genenvfn = fns[size][u];
6725 case 0x6: /* CMGT, CMHI */
6727 static NeonGenTwoOpFn * const fns[3][2] = {
6728 { gen_helper_neon_cgt_s8, gen_helper_neon_cgt_u8 },
6729 { gen_helper_neon_cgt_s16, gen_helper_neon_cgt_u16 },
6730 { gen_helper_neon_cgt_s32, gen_helper_neon_cgt_u32 },
6732 genfn = fns[size][u];
6735 case 0x7: /* CMGE, CMHS */
6737 static NeonGenTwoOpFn * const fns[3][2] = {
6738 { gen_helper_neon_cge_s8, gen_helper_neon_cge_u8 },
6739 { gen_helper_neon_cge_s16, gen_helper_neon_cge_u16 },
6740 { gen_helper_neon_cge_s32, gen_helper_neon_cge_u32 },
6742 genfn = fns[size][u];
6745 case 0x8: /* SSHL, USHL */
6747 static NeonGenTwoOpFn * const fns[3][2] = {
6748 { gen_helper_neon_shl_s8, gen_helper_neon_shl_u8 },
6749 { gen_helper_neon_shl_s16, gen_helper_neon_shl_u16 },
6750 { gen_helper_neon_shl_s32, gen_helper_neon_shl_u32 },
6752 genfn = fns[size][u];
6755 case 0x9: /* SQSHL, UQSHL */
6757 static NeonGenTwoOpEnvFn * const fns[3][2] = {
6758 { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
6759 { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
6760 { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
6762 genenvfn = fns[size][u];
6765 case 0xa: /* SRSHL, URSHL */
6767 static NeonGenTwoOpFn * const fns[3][2] = {
6768 { gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 },
6769 { gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 },
6770 { gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 },
6772 genfn = fns[size][u];
6775 case 0xb: /* SQRSHL, UQRSHL */
6777 static NeonGenTwoOpEnvFn * const fns[3][2] = {
6778 { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
6779 { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
6780 { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
6782 genenvfn = fns[size][u];
6785 case 0x10: /* ADD, SUB */
6787 static NeonGenTwoOpFn * const fns[3][2] = {
6788 { gen_helper_neon_add_u8, gen_helper_neon_sub_u8 },
6789 { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
6790 { tcg_gen_add_i32, tcg_gen_sub_i32 },
6792 genfn = fns[size][u];
6795 case 0x11: /* CMTST, CMEQ */
6797 static NeonGenTwoOpFn * const fns[3][2] = {
6798 { gen_helper_neon_tst_u8, gen_helper_neon_ceq_u8 },
6799 { gen_helper_neon_tst_u16, gen_helper_neon_ceq_u16 },
6800 { gen_helper_neon_tst_u32, gen_helper_neon_ceq_u32 },
6802 genfn = fns[size][u];
6806 g_assert_not_reached();
6810 genenvfn(tcg_res, cpu_env, tcg_op1, tcg_op2);
6812 genfn(tcg_res, tcg_op1, tcg_op2);
6815 write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
6817 tcg_temp_free_i32(tcg_res);
6818 tcg_temp_free_i32(tcg_op1);
6819 tcg_temp_free_i32(tcg_op2);
6824 clear_vec_high(s, rd);
6828 /* C3.6.16 AdvSIMD three same
6829 * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
6830 * +---+---+---+-----------+------+---+------+--------+---+------+------+
6831 * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
6832 * +---+---+---+-----------+------+---+------+--------+---+------+------+
6834 static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn)
6836 int opcode = extract32(insn, 11, 5);
6839 case 0x3: /* logic ops */
6840 disas_simd_3same_logic(s, insn);
6842 case 0x17: /* ADDP */
6843 case 0x14: /* SMAXP, UMAXP */
6844 case 0x15: /* SMINP, UMINP */
6845 /* Pairwise operations */
6846 disas_simd_3same_pair(s, insn);
6849 /* floating point ops, sz[1] and U are part of opcode */
6850 disas_simd_3same_float(s, insn);
6853 disas_simd_3same_int(s, insn);
6858 /* C3.6.17 AdvSIMD two reg misc
6859 * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
6860 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
6861 * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
6862 * +---+---+---+-----------+------+-----------+--------+-----+------+------+
6864 static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
6866 unsupported_encoding(s, insn);
6869 /* C3.6.18 AdvSIMD vector x indexed element
6870 * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
6871 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
6872 * | 0 | Q | U | 0 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
6873 * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
6875 static void disas_simd_indexed_vector(DisasContext *s, uint32_t insn)
6877 unsupported_encoding(s, insn);
6880 /* C3.6.19 Crypto AES
6881 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
6882 * +-----------------+------+-----------+--------+-----+------+------+
6883 * | 0 1 0 0 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
6884 * +-----------------+------+-----------+--------+-----+------+------+
6886 static void disas_crypto_aes(DisasContext *s, uint32_t insn)
6888 unsupported_encoding(s, insn);
6891 /* C3.6.20 Crypto three-reg SHA
6892 * 31 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
6893 * +-----------------+------+---+------+---+--------+-----+------+------+
6894 * | 0 1 0 1 1 1 1 0 | size | 0 | Rm | 0 | opcode | 0 0 | Rn | Rd |
6895 * +-----------------+------+---+------+---+--------+-----+------+------+
6897 static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
6899 unsupported_encoding(s, insn);
6902 /* C3.6.21 Crypto two-reg SHA
6903 * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
6904 * +-----------------+------+-----------+--------+-----+------+------+
6905 * | 0 1 0 1 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
6906 * +-----------------+------+-----------+--------+-----+------+------+
6908 static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
6910 unsupported_encoding(s, insn);
6913 /* C3.6 Data processing - SIMD, inc Crypto
6915 * As the decode gets a little complex we are using a table based
6916 * approach for this part of the decode.
6918 static const AArch64DecodeTable data_proc_simd[] = {
6919 /* pattern , mask , fn */
6920 { 0x0e200400, 0x9f200400, disas_simd_three_reg_same },
6921 { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff },
6922 { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
6923 { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes },
6924 { 0x0e000400, 0x9fe08400, disas_simd_copy },
6925 { 0x0f000000, 0x9f000400, disas_simd_indexed_vector },
6926 /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
6927 { 0x0f000400, 0x9ff80400, disas_simd_mod_imm },
6928 { 0x0f000400, 0x9f800400, disas_simd_shift_imm },
6929 { 0x0e000000, 0xbf208c00, disas_simd_tb },
6930 { 0x0e000800, 0xbf208c00, disas_simd_zip_trn },
6931 { 0x2e000000, 0xbf208400, disas_simd_ext },
6932 { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same },
6933 { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff },
6934 { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
6935 { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise },
6936 { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy },
6937 { 0x5f000000, 0xdf000400, disas_simd_scalar_indexed },
6938 { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
6939 { 0x4e280800, 0xff3e0c00, disas_crypto_aes },
6940 { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha },
6941 { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha },
6942 { 0x00000000, 0x00000000, NULL }
6945 static void disas_data_proc_simd(DisasContext *s, uint32_t insn)
6947 /* Note that this is called with all non-FP cases from
6948 * table C3-6 so it must UNDEF for entries not specifically
6949 * allocated to instructions in that table.
6951 AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn);
6955 unallocated_encoding(s);
6959 /* C3.6 Data processing - SIMD and floating point */
6960 static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
6962 if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) {
6963 disas_data_proc_fp(s, insn);
6965 /* SIMD, including crypto */
6966 disas_data_proc_simd(s, insn);
6970 /* C3.1 A64 instruction index by encoding */
6971 static void disas_a64_insn(CPUARMState *env, DisasContext *s)
6975 insn = arm_ldl_code(env, s->pc, s->bswap_code);
6979 switch (extract32(insn, 25, 4)) {
6980 case 0x0: case 0x1: case 0x2: case 0x3: /* UNALLOCATED */
6981 unallocated_encoding(s);
6983 case 0x8: case 0x9: /* Data processing - immediate */
6984 disas_data_proc_imm(s, insn);
6986 case 0xa: case 0xb: /* Branch, exception generation and system insns */
6987 disas_b_exc_sys(s, insn);
6992 case 0xe: /* Loads and stores */
6993 disas_ldst(s, insn);
6996 case 0xd: /* Data processing - register */
6997 disas_data_proc_reg(s, insn);
7000 case 0xf: /* Data processing - SIMD and floating point */
7001 disas_data_proc_simd_fp(s, insn);
7004 assert(FALSE); /* all 15 cases should be handled above */
7008 /* if we allocated any temporaries, free them here */
7012 void gen_intermediate_code_internal_a64(ARMCPU *cpu,
7013 TranslationBlock *tb,
7016 CPUState *cs = CPU(cpu);
7017 CPUARMState *env = &cpu->env;
7018 DisasContext dc1, *dc = &dc1;
7020 uint16_t *gen_opc_end;
7022 target_ulong pc_start;
7023 target_ulong next_page_start;
7031 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
7033 dc->is_jmp = DISAS_NEXT;
7035 dc->singlestep_enabled = cs->singlestep_enabled;
7041 dc->condexec_mask = 0;
7042 dc->condexec_cond = 0;
7043 #if !defined(CONFIG_USER_ONLY)
7046 dc->vfp_enabled = 0;
7049 dc->cp_regs = cpu->cp_regs;
7050 dc->current_pl = arm_current_pl(env);
7052 init_tmp_a64_array(dc);
7054 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
7057 max_insns = tb->cflags & CF_COUNT_MASK;
7058 if (max_insns == 0) {
7059 max_insns = CF_COUNT_MASK;
7064 tcg_clear_temp_count();
7067 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
7068 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
7069 if (bp->pc == dc->pc) {
7070 gen_exception_insn(dc, 0, EXCP_DEBUG);
7071 /* Advance PC so that clearing the breakpoint will
7072 invalidate this TB. */
7074 goto done_generating;
7080 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
7084 tcg_ctx.gen_opc_instr_start[lj++] = 0;
7087 tcg_ctx.gen_opc_pc[lj] = dc->pc;
7088 tcg_ctx.gen_opc_instr_start[lj] = 1;
7089 tcg_ctx.gen_opc_icount[lj] = num_insns;
7092 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
7096 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
7097 tcg_gen_debug_insn_start(dc->pc);
7100 disas_a64_insn(env, dc);
7102 if (tcg_check_temp_count()) {
7103 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
7107 /* Translation stops when a conditional branch is encountered.
7108 * Otherwise the subsequent code could get translated several times.
7109 * Also stop translation when a page boundary is reached. This
7110 * ensures prefetch aborts occur at the right place.
7113 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
7114 !cs->singlestep_enabled &&
7116 dc->pc < next_page_start &&
7117 num_insns < max_insns);
7119 if (tb->cflags & CF_LAST_IO) {
7123 if (unlikely(cs->singlestep_enabled) && dc->is_jmp != DISAS_EXC) {
7124 /* Note that this means single stepping WFI doesn't halt the CPU.
7125 * For conditional branch insns this is harmless unreachable code as
7126 * gen_goto_tb() has already handled emitting the debug exception
7127 * (and thus a tb-jump is not possible when singlestepping).
7129 assert(dc->is_jmp != DISAS_TB_JUMP);
7130 if (dc->is_jmp != DISAS_JUMP) {
7131 gen_a64_set_pc_im(dc->pc);
7133 gen_exception(EXCP_DEBUG);
7135 switch (dc->is_jmp) {
7137 gen_goto_tb(dc, 1, dc->pc);
7141 gen_a64_set_pc_im(dc->pc);
7144 /* indicate that the hash table must be used to find the next TB */
7152 /* This is a special case because we don't want to just halt the CPU
7153 * if trying to debug across a WFI.
7155 gen_helper_wfi(cpu_env);
7161 gen_tb_end(tb, num_insns);
7162 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
7165 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
7166 qemu_log("----------------\n");
7167 qemu_log("IN: %s\n", lookup_symbol(pc_start));
7168 log_target_disas(env, pc_start, dc->pc - pc_start,
7169 dc->thumb | (dc->bswap_code << 1));
7174 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
7177 tcg_ctx.gen_opc_instr_start[lj++] = 0;
7180 tb->size = dc->pc - pc_start;
7181 tb->icount = num_insns;