2 * S/390 memory access helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 /*****************************************************************************/
26 #if !defined(CONFIG_USER_ONLY)
27 #include "exec/softmmu_exec.h"
29 #define MMUSUFFIX _mmu
32 #include "exec/softmmu_template.h"
35 #include "exec/softmmu_template.h"
38 #include "exec/softmmu_template.h"
41 #include "exec/softmmu_template.h"
43 /* try to fill the TLB and return an exception if error. If retaddr is
44 NULL, it means that the function was called in C code (i.e. not
45 from generated code or from helper.c) */
46 /* XXX: fix it to restore all registers */
47 void tlb_fill(CPUS390XState *env, target_ulong addr, int is_write, int mmu_idx,
52 ret = cpu_s390x_handle_mmu_fault(env, addr, is_write, mmu_idx);
53 if (unlikely(ret != 0)) {
54 if (likely(retaddr)) {
55 /* now we have a real cpu fault */
56 cpu_restore_state(env, retaddr);
64 /* #define DEBUG_HELPER */
66 #define HELPER_LOG(x...) qemu_log(x)
68 #define HELPER_LOG(x...)
71 #ifndef CONFIG_USER_ONLY
72 static void mvc_fast_memset(CPUS390XState *env, uint32_t l, uint64_t dest,
78 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
81 if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
82 cpu_stb_data(env, dest, byte);
83 cpu_abort(env, "should never reach here");
85 dest_phys |= dest & ~TARGET_PAGE_MASK;
87 dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
89 memset(dest_p, byte, len);
91 cpu_physical_memory_unmap(dest_p, 1, len, len);
94 static void mvc_fast_memmove(CPUS390XState *env, uint32_t l, uint64_t dest,
102 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
105 if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
106 cpu_stb_data(env, dest, 0);
107 cpu_abort(env, "should never reach here");
109 dest_phys |= dest & ~TARGET_PAGE_MASK;
111 if (mmu_translate(env, src, 0, asc, &src_phys, &flags)) {
112 cpu_ldub_data(env, src);
113 cpu_abort(env, "should never reach here");
115 src_phys |= src & ~TARGET_PAGE_MASK;
117 dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
118 src_p = cpu_physical_memory_map(src_phys, &len, 0);
120 memmove(dest_p, src_p, len);
122 cpu_physical_memory_unmap(dest_p, 1, len, len);
123 cpu_physical_memory_unmap(src_p, 0, len, len);
128 uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest,
135 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
136 __func__, l, dest, src);
137 for (i = 0; i <= l; i++) {
138 x = cpu_ldub_data(env, dest + i) & cpu_ldub_data(env, src + i);
142 cpu_stb_data(env, dest + i, x);
148 uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest,
155 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
156 __func__, l, dest, src);
158 #ifndef CONFIG_USER_ONLY
159 /* xor with itself is the same as memset(0) */
160 if ((l > 32) && (src == dest) &&
161 (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK)) {
162 mvc_fast_memset(env, l + 1, dest, 0);
167 memset(g2h(dest), 0, l + 1);
172 for (i = 0; i <= l; i++) {
173 x = cpu_ldub_data(env, dest + i) ^ cpu_ldub_data(env, src + i);
177 cpu_stb_data(env, dest + i, x);
183 uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest,
190 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
191 __func__, l, dest, src);
192 for (i = 0; i <= l; i++) {
193 x = cpu_ldub_data(env, dest + i) | cpu_ldub_data(env, src + i);
197 cpu_stb_data(env, dest + i, x);
203 void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
207 uint32_t l_64 = (l + 1) / 8;
209 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
210 __func__, l, dest, src);
212 #ifndef CONFIG_USER_ONLY
214 (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK) &&
215 (dest & TARGET_PAGE_MASK) == ((dest + l) & TARGET_PAGE_MASK)) {
216 if (dest == (src + 1)) {
217 mvc_fast_memset(env, l + 1, dest, cpu_ldub_data(env, src));
219 } else if ((src & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
220 mvc_fast_memmove(env, l + 1, dest, src);
225 if (dest == (src + 1)) {
226 memset(g2h(dest), cpu_ldub_data(env, src), l + 1);
229 memmove(g2h(dest), g2h(src), l + 1);
234 /* handle the parts that fit into 8-byte loads/stores */
235 if (dest != (src + 1)) {
236 for (i = 0; i < l_64; i++) {
237 cpu_stq_data(env, dest + x, cpu_ldq_data(env, src + x));
242 /* slow version crossing pages with byte accesses */
243 for (i = x; i <= l; i++) {
244 cpu_stb_data(env, dest + i, cpu_ldub_data(env, src + i));
248 /* compare unsigned byte arrays */
249 uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2)
255 HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
256 __func__, l, s1, s2);
257 for (i = 0; i <= l; i++) {
258 x = cpu_ldub_data(env, s1 + i);
259 y = cpu_ldub_data(env, s2 + i);
260 HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
275 /* compare logical under mask */
276 uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask,
282 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1,
287 d = cpu_ldub_data(env, addr);
288 r = (r1 & 0xff000000UL) >> 24;
289 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
300 mask = (mask << 1) & 0xf;
307 static inline uint64_t fix_address(CPUS390XState *env, uint64_t a)
310 if (!(env->psw.mask & PSW_MASK_64)) {
316 static inline uint64_t get_address(CPUS390XState *env, int x2, int b2, int d2)
325 return fix_address(env, r);
328 static inline uint64_t get_address_31fix(CPUS390XState *env, int reg)
330 return fix_address(env, env->regs[reg]);
333 /* search string (c is byte to search, r2 is string, r1 end of string) */
334 uint64_t HELPER(srst)(CPUS390XState *env, uint64_t r0, uint64_t end,
340 str = fix_address(env, str);
341 end = fix_address(env, end);
343 /* Assume for now that R2 is unmodified. */
346 /* Lest we fail to service interrupts in a timely manner, limit the
347 amount of work we're willing to do. For now, lets cap at 8k. */
348 for (len = 0; len < 0x2000; ++len) {
349 if (str + len == end) {
350 /* Character not found. R1 & R2 are unmodified. */
354 v = cpu_ldub_data(env, str + len);
356 /* Character found. Set R1 to the location; R2 is unmodified. */
362 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
363 env->retxl = str + len;
368 /* unsigned string compare (c is string terminator) */
369 uint64_t HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2)
374 s1 = fix_address(env, s1);
375 s2 = fix_address(env, s2);
377 /* Lest we fail to service interrupts in a timely manner, limit the
378 amount of work we're willing to do. For now, lets cap at 8k. */
379 for (len = 0; len < 0x2000; ++len) {
380 uint8_t v1 = cpu_ldub_data(env, s1 + len);
381 uint8_t v2 = cpu_ldub_data(env, s2 + len);
384 /* Equal. CC=0, and don't advance the registers. */
390 /* Unequal. CC={1,2}, and advance the registers. Note that
391 the terminator need not be zero, but the string that contains
392 the terminator is by definition "low". */
393 env->cc_op = (v1 == c ? 1 : v2 == c ? 2 : v1 < v2 ? 1 : 2);
394 env->retxl = s2 + len;
399 /* CPU-determined bytes equal; advance the registers. */
401 env->retxl = s2 + len;
406 void HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
408 /* XXX missing r0 handling */
410 #ifdef CONFIG_USER_ONLY
411 memmove(g2h(r1), g2h(r2), TARGET_PAGE_SIZE);
413 mvc_fast_memmove(env, TARGET_PAGE_SIZE, r1, r2);
417 /* string copy (c is string terminator) */
418 uint64_t HELPER(mvst)(CPUS390XState *env, uint64_t c, uint64_t d, uint64_t s)
423 d = fix_address(env, d);
424 s = fix_address(env, s);
426 /* Lest we fail to service interrupts in a timely manner, limit the
427 amount of work we're willing to do. For now, lets cap at 8k. */
428 for (len = 0; len < 0x2000; ++len) {
429 uint8_t v = cpu_ldub_data(env, s + len);
430 cpu_stb_data(env, d + len, v);
432 /* Complete. Set CC=1 and advance R1. */
439 /* Incomplete. Set CC=3 and signal to advance R1 and R2. */
441 env->retxl = s + len;
445 /* compare and swap 64-bit */
446 uint64_t HELPER(csg)(CPUS390XState *env, uint64_t r1, uint64_t a2, uint64_t r3)
448 /* FIXME: locking? */
449 uint64_t v2 = cpu_ldq_data(env, a2);
451 cpu_stq_data(env, a2, r3);
460 /* compare double and swap 64-bit */
461 uint32_t HELPER(cdsg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
463 /* FIXME: locking? */
465 uint64_t v2_hi = cpu_ldq_data(env, a2);
466 uint64_t v2_lo = cpu_ldq_data(env, a2 + 8);
467 uint64_t v1_hi = env->regs[r1];
468 uint64_t v1_lo = env->regs[r1 + 1];
470 if ((v1_hi == v2_hi) && (v1_lo == v2_lo)) {
472 cpu_stq_data(env, a2, env->regs[r3]);
473 cpu_stq_data(env, a2 + 8, env->regs[r3 + 1]);
476 env->regs[r1] = v2_hi;
477 env->regs[r1 + 1] = v2_lo;
483 /* compare and swap 32-bit */
484 uint64_t HELPER(cs)(CPUS390XState *env, uint64_t r1, uint64_t a2, uint64_t r3)
486 /* FIXME: locking? */
487 uint32_t v2 = cpu_ldl_data(env, a2);
488 if ((uint32_t)r1 == v2) {
489 cpu_stl_data(env, a2, (uint32_t)r3);
498 static uint32_t helper_icm(CPUS390XState *env, uint32_t r1, uint64_t address,
501 int pos = 24; /* top of the lower half of r1 */
502 uint64_t rmask = 0xff000000ULL;
509 env->regs[r1] &= ~rmask;
510 val = cpu_ldub_data(env, address);
511 if ((val & 0x80) && !ccd) {
515 if (val && cc == 0) {
518 env->regs[r1] |= (uint64_t)val << pos;
521 mask = (mask << 1) & 0xf;
529 /* execute instruction
530 this instruction executes an insn modified with the contents of r1
531 it does not change the executed instruction in memory
532 it does not change the program counter
533 in other words: tricky...
534 currently implemented by interpreting the cases it is most commonly used in
536 uint32_t HELPER(ex)(CPUS390XState *env, uint32_t cc, uint64_t v1,
537 uint64_t addr, uint64_t ret)
539 uint16_t insn = cpu_lduw_code(env, addr);
541 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __func__, v1, addr,
543 if ((insn & 0xf0ff) == 0xd000) {
544 uint32_t l, insn2, b1, b2, d1, d2;
547 insn2 = cpu_ldl_code(env, addr + 2);
548 b1 = (insn2 >> 28) & 0xf;
549 b2 = (insn2 >> 12) & 0xf;
550 d1 = (insn2 >> 16) & 0xfff;
552 switch (insn & 0xf00) {
554 helper_mvc(env, l, get_address(env, 0, b1, d1),
555 get_address(env, 0, b2, d2));
558 cc = helper_clc(env, l, get_address(env, 0, b1, d1),
559 get_address(env, 0, b2, d2));
562 cc = helper_xc(env, l, get_address(env, 0, b1, d1),
563 get_address(env, 0, b2, d2));
566 helper_tr(env, l, get_address(env, 0, b1, d1),
567 get_address(env, 0, b2, d2));
573 } else if ((insn & 0xff00) == 0x0a00) {
574 /* supervisor call */
575 HELPER_LOG("%s: svc %ld via execute\n", __func__, (insn | v1) & 0xff);
576 env->psw.addr = ret - 4;
577 env->int_svc_code = (insn | v1) & 0xff;
578 env->int_svc_ilen = 4;
579 helper_exception(env, EXCP_SVC);
580 } else if ((insn & 0xff00) == 0xbf00) {
581 uint32_t insn2, r1, r3, b2, d2;
583 insn2 = cpu_ldl_code(env, addr + 2);
584 r1 = (insn2 >> 20) & 0xf;
585 r3 = (insn2 >> 16) & 0xf;
586 b2 = (insn2 >> 12) & 0xf;
588 cc = helper_icm(env, r1, get_address(env, 0, b2, d2), r3);
591 cpu_abort(env, "EXECUTE on instruction prefix 0x%x not implemented\n",
597 /* load access registers r1 to r3 from memory at a2 */
598 void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
602 for (i = r1;; i = (i + 1) % 16) {
603 env->aregs[i] = cpu_ldl_data(env, a2);
612 /* store access registers r1 to r3 in memory at a2 */
613 void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
617 for (i = r1;; i = (i + 1) % 16) {
618 cpu_stl_data(env, a2, env->aregs[i]);
628 uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
630 uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
631 uint64_t dest = get_address_31fix(env, r1);
632 uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
633 uint64_t src = get_address_31fix(env, r2);
634 uint8_t pad = src >> 24;
638 if (destlen == srclen) {
640 } else if (destlen < srclen) {
646 if (srclen > destlen) {
650 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
651 v = cpu_ldub_data(env, src);
652 cpu_stb_data(env, dest, v);
655 for (; destlen; dest++, destlen--) {
656 cpu_stb_data(env, dest, pad);
659 env->regs[r1 + 1] = destlen;
660 /* can't use srclen here, we trunc'ed it */
661 env->regs[r2 + 1] -= src - env->regs[r2];
662 env->regs[r1] = dest;
668 /* move long extended another memcopy insn with more bells and whistles */
669 uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
672 uint64_t destlen = env->regs[r1 + 1];
673 uint64_t dest = env->regs[r1];
674 uint64_t srclen = env->regs[r3 + 1];
675 uint64_t src = env->regs[r3];
676 uint8_t pad = a2 & 0xff;
680 if (!(env->psw.mask & PSW_MASK_64)) {
681 destlen = (uint32_t)destlen;
682 srclen = (uint32_t)srclen;
687 if (destlen == srclen) {
689 } else if (destlen < srclen) {
695 if (srclen > destlen) {
699 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
700 v = cpu_ldub_data(env, src);
701 cpu_stb_data(env, dest, v);
704 for (; destlen; dest++, destlen--) {
705 cpu_stb_data(env, dest, pad);
708 env->regs[r1 + 1] = destlen;
709 /* can't use srclen here, we trunc'ed it */
710 /* FIXME: 31-bit mode! */
711 env->regs[r3 + 1] -= src - env->regs[r3];
712 env->regs[r1] = dest;
718 /* compare logical long extended memcompare insn with padding */
719 uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
722 uint64_t destlen = env->regs[r1 + 1];
723 uint64_t dest = get_address_31fix(env, r1);
724 uint64_t srclen = env->regs[r3 + 1];
725 uint64_t src = get_address_31fix(env, r3);
726 uint8_t pad = a2 & 0xff;
727 uint8_t v1 = 0, v2 = 0;
730 if (!(destlen || srclen)) {
734 if (srclen > destlen) {
738 for (; destlen || srclen; src++, dest++, destlen--, srclen--) {
739 v1 = srclen ? cpu_ldub_data(env, src) : pad;
740 v2 = destlen ? cpu_ldub_data(env, dest) : pad;
742 cc = (v1 < v2) ? 1 : 2;
747 env->regs[r1 + 1] = destlen;
748 /* can't use srclen here, we trunc'ed it */
749 env->regs[r3 + 1] -= src - env->regs[r3];
750 env->regs[r1] = dest;
757 uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1,
758 uint64_t src, uint64_t src_len)
760 uint64_t max_len, len;
761 uint64_t cksm = (uint32_t)r1;
763 /* Lest we fail to service interrupts in a timely manner, limit the
764 amount of work we're willing to do. For now, lets cap at 8k. */
765 max_len = (src_len > 0x2000 ? 0x2000 : src_len);
767 /* Process full words as available. */
768 for (len = 0; len + 4 <= max_len; len += 4, src += 4) {
769 cksm += (uint32_t)cpu_ldl_data(env, src);
772 switch (max_len - len) {
774 cksm += cpu_ldub_data(env, src) << 24;
778 cksm += cpu_lduw_data(env, src) << 16;
782 cksm += cpu_lduw_data(env, src) << 16;
783 cksm += cpu_ldub_data(env, src + 2) << 8;
788 /* Fold the carry from the checksum. Note that we can see carry-out
789 during folding more than once (but probably not more than twice). */
790 while (cksm > 0xffffffffull) {
791 cksm = (uint32_t)cksm + (cksm >> 32);
794 /* Indicate whether or not we've processed everything. */
795 env->cc_op = (len == src_len ? 0 : 3);
797 /* Return both cksm and processed length. */
802 void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
805 int len_dest = len >> 4;
806 int len_src = len & 0xf;
808 int second_nibble = 0;
813 /* last byte is special, it only flips the nibbles */
814 b = cpu_ldub_data(env, src);
815 cpu_stb_data(env, dest, (b << 4) | (b >> 4));
819 /* now pad every nibble with 0xf0 */
821 while (len_dest > 0) {
822 uint8_t cur_byte = 0;
825 cur_byte = cpu_ldub_data(env, src);
831 /* only advance one nibble at a time */
837 second_nibble = !second_nibble;
840 cur_byte = (cur_byte & 0xf);
844 cpu_stb_data(env, dest, cur_byte);
848 void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array,
853 for (i = 0; i <= len; i++) {
854 uint8_t byte = cpu_ldub_data(env, array + i);
855 uint8_t new_byte = cpu_ldub_data(env, trans + byte);
857 cpu_stb_data(env, array + i, new_byte);
861 #if !defined(CONFIG_USER_ONLY)
862 void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
867 for (i = r1;; i = (i + 1) % 16) {
868 env->cregs[i] = cpu_ldq_data(env, src);
869 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
870 i, src, env->cregs[i]);
871 src += sizeof(uint64_t);
881 void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
886 for (i = r1;; i = (i + 1) % 16) {
887 env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) |
888 cpu_ldl_data(env, src);
889 src += sizeof(uint32_t);
899 void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
904 for (i = r1;; i = (i + 1) % 16) {
905 cpu_stq_data(env, dest, env->cregs[i]);
906 dest += sizeof(uint64_t);
914 void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
919 for (i = r1;; i = (i + 1) % 16) {
920 cpu_stl_data(env, dest, env->cregs[i]);
921 dest += sizeof(uint32_t);
929 uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2)
936 /* insert storage key extended */
937 uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2)
939 uint64_t addr = get_address(env, 0, 0, r2);
941 if (addr > ram_size) {
945 return env->storage_keys[addr / TARGET_PAGE_SIZE];
948 /* set storage key extended */
949 void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2)
951 uint64_t addr = get_address(env, 0, 0, r2);
953 if (addr > ram_size) {
957 env->storage_keys[addr / TARGET_PAGE_SIZE] = r1;
960 /* reset reference bit extended */
961 uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2)
970 key = env->storage_keys[r2 / TARGET_PAGE_SIZE];
971 re = key & (SK_R | SK_C);
972 env->storage_keys[r2 / TARGET_PAGE_SIZE] = (key & ~SK_R);
977 * 0 Reference bit zero; change bit zero
978 * 1 Reference bit zero; change bit one
979 * 2 Reference bit one; change bit zero
980 * 3 Reference bit one; change bit one
986 /* compare and swap and purge */
987 uint32_t HELPER(csp)(CPUS390XState *env, uint32_t r1, uint32_t r2)
990 uint32_t o1 = env->regs[r1];
991 uint64_t a2 = get_address_31fix(env, r2) & ~3ULL;
992 uint32_t o2 = cpu_ldl_data(env, a2);
995 cpu_stl_data(env, a2, env->regs[(r1 + 1) & 15]);
996 if (env->regs[r2] & 0x3) {
997 /* flush TLB / ALB */
1002 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | o2;
1009 static uint32_t mvc_asc(CPUS390XState *env, int64_t l, uint64_t a1,
1010 uint64_t mode1, uint64_t a2, uint64_t mode2)
1012 target_ulong src, dest;
1013 int flags, cc = 0, i;
1017 } else if (l > 256) {
1023 if (mmu_translate(env, a1 & TARGET_PAGE_MASK, 1, mode1, &dest, &flags)) {
1026 dest |= a1 & ~TARGET_PAGE_MASK;
1028 if (mmu_translate(env, a2 & TARGET_PAGE_MASK, 0, mode2, &src, &flags)) {
1031 src |= a2 & ~TARGET_PAGE_MASK;
1033 /* XXX replace w/ memcpy */
1034 for (i = 0; i < l; i++) {
1035 /* XXX be more clever */
1036 if ((((dest + i) & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) ||
1037 (((src + i) & TARGET_PAGE_MASK) != (src & TARGET_PAGE_MASK))) {
1038 mvc_asc(env, l - i, a1 + i, mode1, a2 + i, mode2);
1041 stb_phys(dest + i, ldub_phys(src + i));
1047 uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
1049 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
1050 __func__, l, a1, a2);
1052 return mvc_asc(env, l, a1, PSW_ASC_SECONDARY, a2, PSW_ASC_PRIMARY);
1055 uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
1057 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
1058 __func__, l, a1, a2);
1060 return mvc_asc(env, l, a1, PSW_ASC_PRIMARY, a2, PSW_ASC_SECONDARY);
1063 /* invalidate pte */
1064 void HELPER(ipte)(CPUS390XState *env, uint64_t pte_addr, uint64_t vaddr)
1066 uint64_t page = vaddr & TARGET_PAGE_MASK;
1069 /* XXX broadcast to other CPUs */
1071 /* XXX Linux is nice enough to give us the exact pte address.
1072 According to spec we'd have to find it out ourselves */
1073 /* XXX Linux is fine with overwriting the pte, the spec requires
1074 us to only set the invalid bit */
1075 stq_phys(pte_addr, pte | _PAGE_INVALID);
1077 /* XXX we exploit the fact that Linux passes the exact virtual
1078 address here - it's not obliged to! */
1079 tlb_flush_page(env, page);
1081 /* XXX 31-bit hack */
1082 if (page & 0x80000000) {
1083 tlb_flush_page(env, page & ~0x80000000);
1085 tlb_flush_page(env, page | 0x80000000);
1089 /* flush local tlb */
1090 void HELPER(ptlb)(CPUS390XState *env)
1095 /* store using real address */
1096 void HELPER(stura)(CPUS390XState *env, uint64_t addr, uint32_t v1)
1098 stw_phys(get_address(env, 0, 0, addr), v1);
1101 /* load real address */
1102 uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
1105 int old_exc = env->exception_index;
1106 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
1110 /* XXX incomplete - has more corner cases */
1111 if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
1112 program_interrupt(env, PGM_SPECIAL_OP, 2);
1115 env->exception_index = old_exc;
1116 if (mmu_translate(env, addr, 0, asc, &ret, &flags)) {
1119 if (env->exception_index == EXCP_PGM) {
1120 ret = env->int_pgm_code | 0x80000000;
1122 ret |= addr & ~TARGET_PAGE_MASK;
1124 env->exception_index = old_exc;