4 * Copyright (c) 2007 CodeSourcery
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "exec/helper-proto.h"
22 #include "exec/exec-all.h"
23 #include "exec/cpu_ldst.h"
24 #include "hw/semihosting/semihost.h"
26 #if defined(CONFIG_USER_ONLY)
28 void m68k_cpu_do_interrupt(CPUState *cs)
30 cs->exception_index = -1;
33 static inline void do_interrupt_m68k_hardirq(CPUM68KState *env)
39 static void cf_rte(CPUM68KState *env)
45 fmt = cpu_ldl_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0);
46 env->pc = cpu_ldl_mmuidx_ra(env, sp + 4, MMU_KERNEL_IDX, 0);
47 sp |= (fmt >> 28) & 3;
48 env->aregs[7] = sp + 8;
50 cpu_m68k_set_sr(env, fmt);
53 static void m68k_rte(CPUM68KState *env)
61 sr = cpu_lduw_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0);
63 env->pc = cpu_ldl_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0);
65 if (m68k_feature(env, M68K_FEATURE_QUAD_MULDIV)) {
66 /* all except 68000 */
67 fmt = cpu_lduw_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0);
74 cpu_m68k_set_sr(env, sr);
89 cpu_m68k_set_sr(env, sr);
92 static const char *m68k_exception_name(int index)
96 return "Access Fault";
98 return "Address Error";
100 return "Illegal Instruction";
102 return "Divide by Zero";
106 return "FTRAPcc, TRAPcc, TRAPV";
108 return "Privilege Violation";
115 case EXCP_DEBEGBP: /* 68020/030 only */
116 return "Copro Protocol Violation";
118 return "Format Error";
119 case EXCP_UNINITIALIZED:
120 return "Unitialized Interruot";
122 return "Spurious Interrupt";
123 case EXCP_INT_LEVEL_1:
124 return "Level 1 Interrupt";
125 case EXCP_INT_LEVEL_1 + 1:
126 return "Level 2 Interrupt";
127 case EXCP_INT_LEVEL_1 + 2:
128 return "Level 3 Interrupt";
129 case EXCP_INT_LEVEL_1 + 3:
130 return "Level 4 Interrupt";
131 case EXCP_INT_LEVEL_1 + 4:
132 return "Level 5 Interrupt";
133 case EXCP_INT_LEVEL_1 + 5:
134 return "Level 6 Interrupt";
135 case EXCP_INT_LEVEL_1 + 6:
136 return "Level 7 Interrupt";
157 case EXCP_TRAP0 + 10:
159 case EXCP_TRAP0 + 11:
161 case EXCP_TRAP0 + 12:
163 case EXCP_TRAP0 + 13:
165 case EXCP_TRAP0 + 14:
167 case EXCP_TRAP0 + 15:
170 return "FP Branch/Set on unordered condition";
172 return "FP Inexact Result";
174 return "FP Divide by Zero";
176 return "FP Underflow";
178 return "FP Operand Error";
180 return "FP Overflow";
182 return "FP Signaling NAN";
184 return "FP Unimplemented Data Type";
185 case EXCP_MMU_CONF: /* 68030/68851 only */
186 return "MMU Configuration Error";
187 case EXCP_MMU_ILLEGAL: /* 68851 only */
188 return "MMU Illegal Operation";
189 case EXCP_MMU_ACCESS: /* 68851 only */
190 return "MMU Access Level Violation";
192 return "User Defined Vector";
197 static void cf_interrupt_all(CPUM68KState *env, int is_hw)
199 CPUState *cs = env_cpu(env);
210 switch (cs->exception_index) {
212 /* Return from an exception. */
216 if (semihosting_enabled()
217 && (env->sr & SR_S) != 0
218 && (env->pc & 3) == 0
219 && cpu_lduw_code(env, env->pc - 4) == 0x4e71
220 && cpu_ldl_code(env, env->pc) == 0x4e7bf000) {
222 do_m68k_semihosting(env, env->dregs[0]);
226 cs->exception_index = EXCP_HLT;
230 if (cs->exception_index >= EXCP_TRAP0
231 && cs->exception_index <= EXCP_TRAP15) {
232 /* Move the PC after the trap instruction. */
237 vector = cs->exception_index << 2;
239 sr = env->sr | cpu_m68k_get_ccr(env);
240 if (qemu_loglevel_mask(CPU_LOG_INT)) {
242 qemu_log("INT %6d: %s(%#x) pc=%08x sp=%08x sr=%04x\n",
243 ++count, m68k_exception_name(cs->exception_index),
244 vector, env->pc, env->aregs[7], sr);
253 env->sr = (env->sr & ~SR_I) | (env->pending_level << SR_I_SHIFT);
258 fmt |= (sp & 3) << 28;
260 /* ??? This could cause MMU faults. */
263 cpu_stl_mmuidx_ra(env, sp, retaddr, MMU_KERNEL_IDX, 0);
265 cpu_stl_mmuidx_ra(env, sp, fmt, MMU_KERNEL_IDX, 0);
267 /* Jump to vector. */
268 env->pc = cpu_ldl_mmuidx_ra(env, env->vbr + vector, MMU_KERNEL_IDX, 0);
271 static inline void do_stack_frame(CPUM68KState *env, uint32_t *sp,
272 uint16_t format, uint16_t sr,
273 uint32_t addr, uint32_t retaddr)
275 if (m68k_feature(env, M68K_FEATURE_QUAD_MULDIV)) {
276 /* all except 68000 */
277 CPUState *cs = env_cpu(env);
281 cpu_stl_mmuidx_ra(env, *sp, env->pc, MMU_KERNEL_IDX, 0);
283 cpu_stl_mmuidx_ra(env, *sp, addr, MMU_KERNEL_IDX, 0);
288 cpu_stl_mmuidx_ra(env, *sp, addr, MMU_KERNEL_IDX, 0);
292 cpu_stw_mmuidx_ra(env, *sp, (format << 12) + (cs->exception_index << 2),
296 cpu_stl_mmuidx_ra(env, *sp, retaddr, MMU_KERNEL_IDX, 0);
298 cpu_stw_mmuidx_ra(env, *sp, sr, MMU_KERNEL_IDX, 0);
301 static void m68k_interrupt_all(CPUM68KState *env, int is_hw)
303 CPUState *cs = env_cpu(env);
312 switch (cs->exception_index) {
314 /* Return from an exception. */
317 case EXCP_TRAP0 ... EXCP_TRAP15:
318 /* Move the PC after the trap instruction. */
324 vector = cs->exception_index << 2;
326 sr = env->sr | cpu_m68k_get_ccr(env);
327 if (qemu_loglevel_mask(CPU_LOG_INT)) {
329 qemu_log("INT %6d: %s(%#x) pc=%08x sp=%08x sr=%04x\n",
330 ++count, m68k_exception_name(cs->exception_index),
331 vector, env->pc, env->aregs[7], sr);
335 * MC68040UM/AD, chapter 9.3.10
338 /* "the processor first make an internal copy" */
340 /* "set the mode to supervisor" */
342 /* "suppress tracing" */
344 /* "sets the processor interrupt mask" */
346 sr |= (env->sr & ~SR_I) | (env->pending_level << SR_I_SHIFT);
348 cpu_m68k_set_sr(env, sr);
352 if (cs->exception_index == EXCP_ACCESS) {
353 if (env->mmu.fault) {
354 cpu_abort(cs, "DOUBLE MMU FAULT\n");
356 env->mmu.fault = true;
359 cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
362 cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
365 cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
366 /* write back 1 / push data 0 */
368 cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
369 /* write back 1 address */
371 cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
372 /* write back 2 data */
374 cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
375 /* write back 2 address */
377 cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
378 /* write back 3 data */
380 cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
381 /* write back 3 address */
383 cpu_stl_mmuidx_ra(env, sp, env->mmu.ar, MMU_KERNEL_IDX, 0);
386 cpu_stl_mmuidx_ra(env, sp, env->mmu.ar, MMU_KERNEL_IDX, 0);
387 /* write back 1 status */
389 cpu_stw_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
390 /* write back 2 status */
392 cpu_stw_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
393 /* write back 3 status */
395 cpu_stw_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
396 /* special status word */
398 cpu_stw_mmuidx_ra(env, sp, env->mmu.ssw, MMU_KERNEL_IDX, 0);
399 /* effective address */
401 cpu_stl_mmuidx_ra(env, sp, env->mmu.ar, MMU_KERNEL_IDX, 0);
403 do_stack_frame(env, &sp, 7, oldsr, 0, retaddr);
404 env->mmu.fault = false;
405 if (qemu_loglevel_mask(CPU_LOG_INT)) {
407 "ssw: %08x ea: %08x sfc: %d dfc: %d\n",
408 env->mmu.ssw, env->mmu.ar, env->sfc, env->dfc);
410 } else if (cs->exception_index == EXCP_ADDRESS) {
411 do_stack_frame(env, &sp, 2, oldsr, 0, retaddr);
412 } else if (cs->exception_index == EXCP_ILLEGAL ||
413 cs->exception_index == EXCP_DIV0 ||
414 cs->exception_index == EXCP_CHK ||
415 cs->exception_index == EXCP_TRAPCC ||
416 cs->exception_index == EXCP_TRACE) {
417 /* FIXME: addr is not only env->pc */
418 do_stack_frame(env, &sp, 2, oldsr, env->pc, retaddr);
419 } else if (is_hw && oldsr & SR_M &&
420 cs->exception_index >= EXCP_SPURIOUS &&
421 cs->exception_index <= EXCP_INT_LEVEL_7) {
422 do_stack_frame(env, &sp, 0, oldsr, 0, retaddr);
425 cpu_m68k_set_sr(env, sr &= ~SR_M);
426 sp = env->aregs[7] & ~1;
427 do_stack_frame(env, &sp, 1, oldsr, 0, retaddr);
429 do_stack_frame(env, &sp, 0, oldsr, 0, retaddr);
433 /* Jump to vector. */
434 env->pc = cpu_ldl_mmuidx_ra(env, env->vbr + vector, MMU_KERNEL_IDX, 0);
437 static void do_interrupt_all(CPUM68KState *env, int is_hw)
439 if (m68k_feature(env, M68K_FEATURE_M68000)) {
440 m68k_interrupt_all(env, is_hw);
443 cf_interrupt_all(env, is_hw);
446 void m68k_cpu_do_interrupt(CPUState *cs)
448 M68kCPU *cpu = M68K_CPU(cs);
449 CPUM68KState *env = &cpu->env;
451 do_interrupt_all(env, 0);
454 static inline void do_interrupt_m68k_hardirq(CPUM68KState *env)
456 do_interrupt_all(env, 1);
459 void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
460 unsigned size, MMUAccessType access_type,
461 int mmu_idx, MemTxAttrs attrs,
462 MemTxResult response, uintptr_t retaddr)
464 M68kCPU *cpu = M68K_CPU(cs);
465 CPUM68KState *env = &cpu->env;
467 cpu_restore_state(cs, retaddr, true);
469 if (m68k_feature(env, M68K_FEATURE_M68040)) {
471 env->mmu.ssw |= M68K_ATC_040;
472 /* FIXME: manage MMU table access error */
473 env->mmu.ssw &= ~M68K_TM_040;
474 if (env->sr & SR_S) { /* SUPERVISOR */
475 env->mmu.ssw |= M68K_TM_040_SUPER;
477 if (access_type == MMU_INST_FETCH) { /* instruction or data */
478 env->mmu.ssw |= M68K_TM_040_CODE;
480 env->mmu.ssw |= M68K_TM_040_DATA;
482 env->mmu.ssw &= ~M68K_BA_SIZE_MASK;
485 env->mmu.ssw |= M68K_BA_SIZE_BYTE;
488 env->mmu.ssw |= M68K_BA_SIZE_WORD;
491 env->mmu.ssw |= M68K_BA_SIZE_LONG;
495 if (access_type != MMU_DATA_STORE) {
496 env->mmu.ssw |= M68K_RW_040;
501 cs->exception_index = EXCP_ACCESS;
507 bool m68k_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
509 M68kCPU *cpu = M68K_CPU(cs);
510 CPUM68KState *env = &cpu->env;
512 if (interrupt_request & CPU_INTERRUPT_HARD
513 && ((env->sr & SR_I) >> SR_I_SHIFT) < env->pending_level) {
515 * Real hardware gets the interrupt vector via an IACK cycle
516 * at this point. Current emulated hardware doesn't rely on
517 * this, so we provide/save the vector when the interrupt is
520 cs->exception_index = env->pending_vector;
521 do_interrupt_m68k_hardirq(env);
527 static void raise_exception_ra(CPUM68KState *env, int tt, uintptr_t raddr)
529 CPUState *cs = env_cpu(env);
531 cs->exception_index = tt;
532 cpu_loop_exit_restore(cs, raddr);
535 static void raise_exception(CPUM68KState *env, int tt)
537 raise_exception_ra(env, tt, 0);
540 void HELPER(raise_exception)(CPUM68KState *env, uint32_t tt)
542 raise_exception(env, tt);
545 void HELPER(divuw)(CPUM68KState *env, int destr, uint32_t den)
547 uint32_t num = env->dregs[destr];
551 raise_exception_ra(env, EXCP_DIV0, GETPC());
556 env->cc_c = 0; /* always cleared, even if overflow */
560 * real 68040 keeps N and unset Z on overflow,
561 * whereas documentation says "undefined"
566 env->dregs[destr] = deposit32(quot, 16, 16, rem);
567 env->cc_z = (int16_t)quot;
568 env->cc_n = (int16_t)quot;
572 void HELPER(divsw)(CPUM68KState *env, int destr, int32_t den)
574 int32_t num = env->dregs[destr];
578 raise_exception_ra(env, EXCP_DIV0, GETPC());
583 env->cc_c = 0; /* always cleared, even if overflow */
584 if (quot != (int16_t)quot) {
586 /* nothing else is modified */
588 * real 68040 keeps N and unset Z on overflow,
589 * whereas documentation says "undefined"
594 env->dregs[destr] = deposit32(quot, 16, 16, rem);
595 env->cc_z = (int16_t)quot;
596 env->cc_n = (int16_t)quot;
600 void HELPER(divul)(CPUM68KState *env, int numr, int regr, uint32_t den)
602 uint32_t num = env->dregs[numr];
606 raise_exception_ra(env, EXCP_DIV0, GETPC());
616 if (m68k_feature(env, M68K_FEATURE_CF_ISA_A)) {
618 env->dregs[numr] = quot;
620 env->dregs[regr] = rem;
623 env->dregs[regr] = rem;
624 env->dregs[numr] = quot;
628 void HELPER(divsl)(CPUM68KState *env, int numr, int regr, int32_t den)
630 int32_t num = env->dregs[numr];
634 raise_exception_ra(env, EXCP_DIV0, GETPC());
644 if (m68k_feature(env, M68K_FEATURE_CF_ISA_A)) {
646 env->dregs[numr] = quot;
648 env->dregs[regr] = rem;
651 env->dregs[regr] = rem;
652 env->dregs[numr] = quot;
656 void HELPER(divull)(CPUM68KState *env, int numr, int regr, uint32_t den)
658 uint64_t num = deposit64(env->dregs[numr], 32, 32, env->dregs[regr]);
663 raise_exception_ra(env, EXCP_DIV0, GETPC());
668 env->cc_c = 0; /* always cleared, even if overflow */
669 if (quot > 0xffffffffULL) {
672 * real 68040 keeps N and unset Z on overflow,
673 * whereas documentation says "undefined"
683 * If Dq and Dr are the same, the quotient is returned.
684 * therefore we set Dq last.
687 env->dregs[regr] = rem;
688 env->dregs[numr] = quot;
691 void HELPER(divsll)(CPUM68KState *env, int numr, int regr, int32_t den)
693 int64_t num = deposit64(env->dregs[numr], 32, 32, env->dregs[regr]);
698 raise_exception_ra(env, EXCP_DIV0, GETPC());
703 env->cc_c = 0; /* always cleared, even if overflow */
704 if (quot != (int32_t)quot) {
707 * real 68040 keeps N and unset Z on overflow,
708 * whereas documentation says "undefined"
718 * If Dq and Dr are the same, the quotient is returned.
719 * therefore we set Dq last.
722 env->dregs[regr] = rem;
723 env->dregs[numr] = quot;
726 /* We're executing in a serial context -- no need to be atomic. */
727 void HELPER(cas2w)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2)
729 uint32_t Dc1 = extract32(regs, 9, 3);
730 uint32_t Dc2 = extract32(regs, 6, 3);
731 uint32_t Du1 = extract32(regs, 3, 3);
732 uint32_t Du2 = extract32(regs, 0, 3);
733 int16_t c1 = env->dregs[Dc1];
734 int16_t c2 = env->dregs[Dc2];
735 int16_t u1 = env->dregs[Du1];
736 int16_t u2 = env->dregs[Du2];
738 uintptr_t ra = GETPC();
740 l1 = cpu_lduw_data_ra(env, a1, ra);
741 l2 = cpu_lduw_data_ra(env, a2, ra);
742 if (l1 == c1 && l2 == c2) {
743 cpu_stw_data_ra(env, a1, u1, ra);
744 cpu_stw_data_ra(env, a2, u2, ra);
754 env->cc_op = CC_OP_CMPW;
755 env->dregs[Dc1] = deposit32(env->dregs[Dc1], 0, 16, l1);
756 env->dregs[Dc2] = deposit32(env->dregs[Dc2], 0, 16, l2);
759 static void do_cas2l(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2,
762 uint32_t Dc1 = extract32(regs, 9, 3);
763 uint32_t Dc2 = extract32(regs, 6, 3);
764 uint32_t Du1 = extract32(regs, 3, 3);
765 uint32_t Du2 = extract32(regs, 0, 3);
766 uint32_t c1 = env->dregs[Dc1];
767 uint32_t c2 = env->dregs[Dc2];
768 uint32_t u1 = env->dregs[Du1];
769 uint32_t u2 = env->dregs[Du2];
771 uintptr_t ra = GETPC();
772 #if defined(CONFIG_ATOMIC64) && !defined(CONFIG_USER_ONLY)
773 int mmu_idx = cpu_mmu_index(env, 0);
778 /* We're executing in a parallel context -- must be atomic. */
779 #ifdef CONFIG_ATOMIC64
781 if ((a1 & 7) == 0 && a2 == a1 + 4) {
782 c = deposit64(c2, 32, 32, c1);
783 u = deposit64(u2, 32, 32, u1);
784 #ifdef CONFIG_USER_ONLY
785 l = helper_atomic_cmpxchgq_be(env, a1, c, u);
787 oi = make_memop_idx(MO_BEQ, mmu_idx);
788 l = helper_atomic_cmpxchgq_be_mmu(env, a1, c, u, oi, ra);
792 } else if ((a2 & 7) == 0 && a1 == a2 + 4) {
793 c = deposit64(c1, 32, 32, c2);
794 u = deposit64(u1, 32, 32, u2);
795 #ifdef CONFIG_USER_ONLY
796 l = helper_atomic_cmpxchgq_be(env, a2, c, u);
798 oi = make_memop_idx(MO_BEQ, mmu_idx);
799 l = helper_atomic_cmpxchgq_be_mmu(env, a2, c, u, oi, ra);
806 /* Tell the main loop we need to serialize this insn. */
807 cpu_loop_exit_atomic(env_cpu(env), ra);
810 /* We're executing in a serial context -- no need to be atomic. */
811 l1 = cpu_ldl_data_ra(env, a1, ra);
812 l2 = cpu_ldl_data_ra(env, a2, ra);
813 if (l1 == c1 && l2 == c2) {
814 cpu_stl_data_ra(env, a1, u1, ra);
815 cpu_stl_data_ra(env, a2, u2, ra);
826 env->cc_op = CC_OP_CMPL;
827 env->dregs[Dc1] = l1;
828 env->dregs[Dc2] = l2;
831 void HELPER(cas2l)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2)
833 do_cas2l(env, regs, a1, a2, false);
836 void HELPER(cas2l_parallel)(CPUM68KState *env, uint32_t regs, uint32_t a1,
839 do_cas2l(env, regs, a1, a2, true);
849 static struct bf_data bf_prep(uint32_t addr, int32_t ofs, uint32_t len)
853 /* Bound length; map 0 to 32. */
854 len = ((len - 1) & 31) + 1;
856 /* Note that ofs is signed. */
865 * Compute the number of bytes required (minus one) to
866 * satisfy the bitfield.
868 blen = (bofs + len - 1) / 8;
871 * Canonicalize the bit offset for data loaded into a 64-bit big-endian
872 * word. For the cases where BLEN is not a power of 2, adjust ADDR so
873 * that we can use the next power of two sized load without crossing a
874 * page boundary, unless the field itself crosses the boundary.
894 bofs += 8 * (addr & 3);
899 g_assert_not_reached();
902 return (struct bf_data){
910 static uint64_t bf_load(CPUM68KState *env, uint32_t addr, int blen,
915 return cpu_ldub_data_ra(env, addr, ra);
917 return cpu_lduw_data_ra(env, addr, ra);
920 return cpu_ldl_data_ra(env, addr, ra);
922 return cpu_ldq_data_ra(env, addr, ra);
924 g_assert_not_reached();
928 static void bf_store(CPUM68KState *env, uint32_t addr, int blen,
929 uint64_t data, uintptr_t ra)
933 cpu_stb_data_ra(env, addr, data, ra);
936 cpu_stw_data_ra(env, addr, data, ra);
940 cpu_stl_data_ra(env, addr, data, ra);
943 cpu_stq_data_ra(env, addr, data, ra);
946 g_assert_not_reached();
950 uint32_t HELPER(bfexts_mem)(CPUM68KState *env, uint32_t addr,
951 int32_t ofs, uint32_t len)
953 uintptr_t ra = GETPC();
954 struct bf_data d = bf_prep(addr, ofs, len);
955 uint64_t data = bf_load(env, d.addr, d.blen, ra);
957 return (int64_t)(data << d.bofs) >> (64 - d.len);
960 uint64_t HELPER(bfextu_mem)(CPUM68KState *env, uint32_t addr,
961 int32_t ofs, uint32_t len)
963 uintptr_t ra = GETPC();
964 struct bf_data d = bf_prep(addr, ofs, len);
965 uint64_t data = bf_load(env, d.addr, d.blen, ra);
968 * Put CC_N at the top of the high word; put the zero-extended value
969 * at the bottom of the low word.
973 data |= data << (64 - d.len);
978 uint32_t HELPER(bfins_mem)(CPUM68KState *env, uint32_t addr, uint32_t val,
979 int32_t ofs, uint32_t len)
981 uintptr_t ra = GETPC();
982 struct bf_data d = bf_prep(addr, ofs, len);
983 uint64_t data = bf_load(env, d.addr, d.blen, ra);
984 uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
986 data = (data & ~mask) | (((uint64_t)val << (64 - d.len)) >> d.bofs);
988 bf_store(env, d.addr, d.blen, data, ra);
990 /* The field at the top of the word is also CC_N for CC_OP_LOGIC. */
991 return val << (32 - d.len);
994 uint32_t HELPER(bfchg_mem)(CPUM68KState *env, uint32_t addr,
995 int32_t ofs, uint32_t len)
997 uintptr_t ra = GETPC();
998 struct bf_data d = bf_prep(addr, ofs, len);
999 uint64_t data = bf_load(env, d.addr, d.blen, ra);
1000 uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
1002 bf_store(env, d.addr, d.blen, data ^ mask, ra);
1004 return ((data & mask) << d.bofs) >> 32;
1007 uint32_t HELPER(bfclr_mem)(CPUM68KState *env, uint32_t addr,
1008 int32_t ofs, uint32_t len)
1010 uintptr_t ra = GETPC();
1011 struct bf_data d = bf_prep(addr, ofs, len);
1012 uint64_t data = bf_load(env, d.addr, d.blen, ra);
1013 uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
1015 bf_store(env, d.addr, d.blen, data & ~mask, ra);
1017 return ((data & mask) << d.bofs) >> 32;
1020 uint32_t HELPER(bfset_mem)(CPUM68KState *env, uint32_t addr,
1021 int32_t ofs, uint32_t len)
1023 uintptr_t ra = GETPC();
1024 struct bf_data d = bf_prep(addr, ofs, len);
1025 uint64_t data = bf_load(env, d.addr, d.blen, ra);
1026 uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
1028 bf_store(env, d.addr, d.blen, data | mask, ra);
1030 return ((data & mask) << d.bofs) >> 32;
1033 uint32_t HELPER(bfffo_reg)(uint32_t n, uint32_t ofs, uint32_t len)
1035 return (n ? clz32(n) : len) + ofs;
1038 uint64_t HELPER(bfffo_mem)(CPUM68KState *env, uint32_t addr,
1039 int32_t ofs, uint32_t len)
1041 uintptr_t ra = GETPC();
1042 struct bf_data d = bf_prep(addr, ofs, len);
1043 uint64_t data = bf_load(env, d.addr, d.blen, ra);
1044 uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
1045 uint64_t n = (data & mask) << d.bofs;
1046 uint32_t ffo = helper_bfffo_reg(n >> 32, ofs, d.len);
1049 * Return FFO in the low word and N in the high word.
1050 * Note that because of MASK and the shift, the low word
1056 void HELPER(chk)(CPUM68KState *env, int32_t val, int32_t ub)
1060 * X: Not affected, C,V,Z: Undefined,
1061 * N: Set if val < 0; cleared if val > ub, undefined otherwise
1062 * We implement here values found from a real MC68040:
1063 * X,V,Z: Not affected
1064 * N: Set if val < 0; cleared if val >= 0
1065 * C: if 0 <= ub: set if val < 0 or val > ub, cleared otherwise
1066 * if 0 > ub: set if val > ub and val < 0, cleared otherwise
1069 env->cc_c = 0 <= ub ? val < 0 || val > ub : val > ub && val < 0;
1071 if (val < 0 || val > ub) {
1072 CPUState *cs = env_cpu(env);
1074 /* Recover PC and CC_OP for the beginning of the insn. */
1075 cpu_restore_state(cs, GETPC(), true);
1077 /* flags have been modified by gen_flush_flags() */
1078 env->cc_op = CC_OP_FLAGS;
1079 /* Adjust PC to end of the insn. */
1082 cs->exception_index = EXCP_CHK;
1087 void HELPER(chk2)(CPUM68KState *env, int32_t val, int32_t lb, int32_t ub)
1091 * X: Not affected, N,V: Undefined,
1092 * Z: Set if val is equal to lb or ub
1093 * C: Set if val < lb or val > ub, cleared otherwise
1094 * We implement here values found from a real MC68040:
1095 * X,N,V: Not affected
1096 * Z: Set if val is equal to lb or ub
1097 * C: if lb <= ub: set if val < lb or val > ub, cleared otherwise
1098 * if lb > ub: set if val > ub and val < lb, cleared otherwise
1100 env->cc_z = val != lb && val != ub;
1101 env->cc_c = lb <= ub ? val < lb || val > ub : val > ub && val < lb;
1104 CPUState *cs = env_cpu(env);
1106 /* Recover PC and CC_OP for the beginning of the insn. */
1107 cpu_restore_state(cs, GETPC(), true);
1109 /* flags have been modified by gen_flush_flags() */
1110 env->cc_op = CC_OP_FLAGS;
1111 /* Adjust PC to end of the insn. */
1114 cs->exception_index = EXCP_CHK;