2 * S/390 memory access helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "qemu/int128.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/s390x/storage-keys.h"
33 /*****************************************************************************/
35 #if !defined(CONFIG_USER_ONLY)
37 /* try to fill the TLB and return an exception if error. If retaddr is
38 NULL, it means that the function was called in C code (i.e. not
39 from generated code or from helper.c) */
40 /* XXX: fix it to restore all registers */
41 void tlb_fill(CPUState *cs, target_ulong addr, int size,
42 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
44 int ret = s390_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
45 if (unlikely(ret != 0)) {
46 cpu_loop_exit_restore(cs, retaddr);
52 /* #define DEBUG_HELPER */
54 #define HELPER_LOG(x...) qemu_log(x)
56 #define HELPER_LOG(x...)
59 static inline bool psw_key_valid(CPUS390XState *env, uint8_t psw_key)
61 uint16_t pkm = env->cregs[3] >> 16;
63 if (env->psw.mask & PSW_MASK_PSTATE) {
64 /* PSW key has range 0..15, it is valid if the bit is 1 in the PKM */
65 return pkm & (0x80 >> psw_key);
70 /* Reduce the length so that addr + len doesn't cross a page boundary. */
71 static inline uint32_t adj_len_to_page(uint32_t len, uint64_t addr)
73 #ifndef CONFIG_USER_ONLY
74 if ((addr & ~TARGET_PAGE_MASK) + len - 1 >= TARGET_PAGE_SIZE) {
75 return -(addr | TARGET_PAGE_MASK);
81 /* Trigger a SPECIFICATION exception if an address or a length is not
83 static inline void check_alignment(CPUS390XState *env, uint64_t v,
84 int wordsize, uintptr_t ra)
87 s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
91 /* Load a value from memory according to its size. */
92 static inline uint64_t cpu_ldusize_data_ra(CPUS390XState *env, uint64_t addr,
93 int wordsize, uintptr_t ra)
97 return cpu_ldub_data_ra(env, addr, ra);
99 return cpu_lduw_data_ra(env, addr, ra);
105 /* Store a to memory according to its size. */
106 static inline void cpu_stsize_data_ra(CPUS390XState *env, uint64_t addr,
107 uint64_t value, int wordsize,
112 cpu_stb_data_ra(env, addr, value, ra);
115 cpu_stw_data_ra(env, addr, value, ra);
122 static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
123 uint32_t l, uintptr_t ra)
125 int mmu_idx = cpu_mmu_index(env, false);
128 void *p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
130 /* Access to the whole page in write mode granted. */
131 uint32_t l_adj = adj_len_to_page(l, dest);
132 memset(p, byte, l_adj);
136 /* We failed to get access to the whole page. The next write
137 access will likely fill the QEMU TLB for the next iteration. */
138 cpu_stb_data_ra(env, dest, byte, ra);
145 #ifndef CONFIG_USER_ONLY
146 static void fast_memmove_idx(CPUS390XState *env, uint64_t dest, uint64_t src,
147 uint32_t len, int dest_idx, int src_idx,
150 TCGMemOpIdx oi_dest = make_memop_idx(MO_UB, dest_idx);
151 TCGMemOpIdx oi_src = make_memop_idx(MO_UB, src_idx);
158 src = wrap_address(env, src);
159 dest = wrap_address(env, dest);
160 src_p = tlb_vaddr_to_host(env, src, MMU_DATA_LOAD, src_idx);
161 dest_p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, dest_idx);
163 if (src_p && dest_p) {
164 /* Access to both whole pages granted. */
165 len_adj = adj_len_to_page(adj_len_to_page(len, src), dest);
166 memmove(dest_p, src_p, len_adj);
168 /* We failed to get access to one or both whole pages. The next
169 read or write access will likely fill the QEMU TLB for the
172 x = helper_ret_ldub_mmu(env, src, oi_src, ra);
173 helper_ret_stb_mmu(env, dest, x, oi_dest, ra);
181 static int mmu_idx_from_as(uint8_t as)
185 return MMU_PRIMARY_IDX;
187 return MMU_SECONDARY_IDX;
191 /* FIXME AS_ACCREG */
192 g_assert_not_reached();
196 static void fast_memmove_as(CPUS390XState *env, uint64_t dest, uint64_t src,
197 uint32_t len, uint8_t dest_as, uint8_t src_as,
200 int src_idx = mmu_idx_from_as(src_as);
201 int dest_idx = mmu_idx_from_as(dest_as);
203 fast_memmove_idx(env, dest, src, len, dest_idx, src_idx, ra);
207 static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
208 uint32_t l, uintptr_t ra)
210 int mmu_idx = cpu_mmu_index(env, false);
213 void *src_p = tlb_vaddr_to_host(env, src, MMU_DATA_LOAD, mmu_idx);
214 void *dest_p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
215 if (src_p && dest_p) {
216 /* Access to both whole pages granted. */
217 uint32_t l_adj = adj_len_to_page(l, src);
218 l_adj = adj_len_to_page(l_adj, dest);
219 memmove(dest_p, src_p, l_adj);
224 /* We failed to get access to one or both whole pages. The next
225 read or write access will likely fill the QEMU TLB for the
227 cpu_stb_data_ra(env, dest, cpu_ldub_data_ra(env, src, ra), ra);
236 static uint32_t do_helper_nc(CPUS390XState *env, uint32_t l, uint64_t dest,
237 uint64_t src, uintptr_t ra)
242 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
243 __func__, l, dest, src);
245 for (i = 0; i <= l; i++) {
246 uint8_t x = cpu_ldub_data_ra(env, src + i, ra);
247 x &= cpu_ldub_data_ra(env, dest + i, ra);
249 cpu_stb_data_ra(env, dest + i, x, ra);
254 uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest,
257 return do_helper_nc(env, l, dest, src, GETPC());
261 static uint32_t do_helper_xc(CPUS390XState *env, uint32_t l, uint64_t dest,
262 uint64_t src, uintptr_t ra)
267 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
268 __func__, l, dest, src);
270 /* xor with itself is the same as memset(0) */
272 fast_memset(env, dest, 0, l + 1, ra);
276 for (i = 0; i <= l; i++) {
277 uint8_t x = cpu_ldub_data_ra(env, src + i, ra);
278 x ^= cpu_ldub_data_ra(env, dest + i, ra);
280 cpu_stb_data_ra(env, dest + i, x, ra);
285 uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest,
288 return do_helper_xc(env, l, dest, src, GETPC());
292 static uint32_t do_helper_oc(CPUS390XState *env, uint32_t l, uint64_t dest,
293 uint64_t src, uintptr_t ra)
298 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
299 __func__, l, dest, src);
301 for (i = 0; i <= l; i++) {
302 uint8_t x = cpu_ldub_data_ra(env, src + i, ra);
303 x |= cpu_ldub_data_ra(env, dest + i, ra);
305 cpu_stb_data_ra(env, dest + i, x, ra);
310 uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest,
313 return do_helper_oc(env, l, dest, src, GETPC());
317 static uint32_t do_helper_mvc(CPUS390XState *env, uint32_t l, uint64_t dest,
318 uint64_t src, uintptr_t ra)
322 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
323 __func__, l, dest, src);
325 /* mvc and memmove do not behave the same when areas overlap! */
326 /* mvc with source pointing to the byte after the destination is the
327 same as memset with the first source byte */
328 if (dest == src + 1) {
329 fast_memset(env, dest, cpu_ldub_data_ra(env, src, ra), l + 1, ra);
330 } else if (dest < src || src + l < dest) {
331 fast_memmove(env, dest, src, l + 1, ra);
333 /* slow version with byte accesses which always work */
334 for (i = 0; i <= l; i++) {
335 uint8_t x = cpu_ldub_data_ra(env, src + i, ra);
336 cpu_stb_data_ra(env, dest + i, x, ra);
343 void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
345 do_helper_mvc(env, l, dest, src, GETPC());
349 void HELPER(mvcin)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
351 uintptr_t ra = GETPC();
354 for (i = 0; i <= l; i++) {
355 uint8_t v = cpu_ldub_data_ra(env, src - i, ra);
356 cpu_stb_data_ra(env, dest + i, v, ra);
361 void HELPER(mvn)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
363 uintptr_t ra = GETPC();
366 for (i = 0; i <= l; i++) {
367 uint8_t v = cpu_ldub_data_ra(env, dest + i, ra) & 0xf0;
368 v |= cpu_ldub_data_ra(env, src + i, ra) & 0x0f;
369 cpu_stb_data_ra(env, dest + i, v, ra);
373 /* move with offset */
374 void HELPER(mvo)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
376 uintptr_t ra = GETPC();
377 int len_dest = l >> 4;
378 int len_src = l & 0xf;
379 uint8_t byte_dest, byte_src;
385 /* Handle rightmost byte */
386 byte_src = cpu_ldub_data_ra(env, src, ra);
387 byte_dest = cpu_ldub_data_ra(env, dest, ra);
388 byte_dest = (byte_dest & 0x0f) | (byte_src << 4);
389 cpu_stb_data_ra(env, dest, byte_dest, ra);
391 /* Process remaining bytes from right to left */
392 for (i = 1; i <= len_dest; i++) {
393 byte_dest = byte_src >> 4;
394 if (len_src - i >= 0) {
395 byte_src = cpu_ldub_data_ra(env, src - i, ra);
399 byte_dest |= byte_src << 4;
400 cpu_stb_data_ra(env, dest - i, byte_dest, ra);
405 void HELPER(mvz)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
407 uintptr_t ra = GETPC();
410 for (i = 0; i <= l; i++) {
411 uint8_t b = cpu_ldub_data_ra(env, dest + i, ra) & 0x0f;
412 b |= cpu_ldub_data_ra(env, src + i, ra) & 0xf0;
413 cpu_stb_data_ra(env, dest + i, b, ra);
417 /* compare unsigned byte arrays */
418 static uint32_t do_helper_clc(CPUS390XState *env, uint32_t l, uint64_t s1,
419 uint64_t s2, uintptr_t ra)
424 HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
425 __func__, l, s1, s2);
427 for (i = 0; i <= l; i++) {
428 uint8_t x = cpu_ldub_data_ra(env, s1 + i, ra);
429 uint8_t y = cpu_ldub_data_ra(env, s2 + i, ra);
430 HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
444 uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2)
446 return do_helper_clc(env, l, s1, s2, GETPC());
449 /* compare logical under mask */
450 uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask,
453 uintptr_t ra = GETPC();
456 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1,
461 uint8_t d = cpu_ldub_data_ra(env, addr, ra);
462 uint8_t r = extract32(r1, 24, 8);
463 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
474 mask = (mask << 1) & 0xf;
482 static inline uint64_t get_address(CPUS390XState *env, int reg)
484 return wrap_address(env, env->regs[reg]);
487 static inline void set_address(CPUS390XState *env, int reg, uint64_t address)
489 if (env->psw.mask & PSW_MASK_64) {
491 env->regs[reg] = address;
493 if (!(env->psw.mask & PSW_MASK_32)) {
494 /* 24-Bit mode. According to the PoO it is implementation
495 dependent if bits 32-39 remain unchanged or are set to
496 zeros. Choose the former so that the function can also be
498 env->regs[reg] = deposit64(env->regs[reg], 0, 24, address);
500 /* 31-Bit mode. According to the PoO it is implementation
501 dependent if bit 32 remains unchanged or is set to zero.
502 Choose the latter so that the function can also be used for
504 address &= 0x7fffffff;
505 env->regs[reg] = deposit64(env->regs[reg], 0, 32, address);
510 static inline uint64_t wrap_length(CPUS390XState *env, uint64_t length)
512 if (!(env->psw.mask & PSW_MASK_64)) {
513 /* 24-Bit and 31-Bit mode */
514 length &= 0x7fffffff;
519 static inline uint64_t get_length(CPUS390XState *env, int reg)
521 return wrap_length(env, env->regs[reg]);
524 static inline void set_length(CPUS390XState *env, int reg, uint64_t length)
526 if (env->psw.mask & PSW_MASK_64) {
528 env->regs[reg] = length;
530 /* 24-Bit and 31-Bit mode */
531 env->regs[reg] = deposit64(env->regs[reg], 0, 32, length);
535 /* search string (c is byte to search, r2 is string, r1 end of string) */
536 void HELPER(srst)(CPUS390XState *env, uint32_t r1, uint32_t r2)
538 uintptr_t ra = GETPC();
541 uint8_t v, c = env->regs[0];
543 /* Bits 32-55 must contain all 0. */
544 if (env->regs[0] & 0xffffff00u) {
545 s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
548 str = get_address(env, r2);
549 end = get_address(env, r1);
551 /* Lest we fail to service interrupts in a timely manner, limit the
552 amount of work we're willing to do. For now, let's cap at 8k. */
553 for (len = 0; len < 0x2000; ++len) {
554 if (str + len == end) {
555 /* Character not found. R1 & R2 are unmodified. */
559 v = cpu_ldub_data_ra(env, str + len, ra);
561 /* Character found. Set R1 to the location; R2 is unmodified. */
563 set_address(env, r1, str + len);
568 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
570 set_address(env, r2, str + len);
573 void HELPER(srstu)(CPUS390XState *env, uint32_t r1, uint32_t r2)
575 uintptr_t ra = GETPC();
577 uint16_t v, c = env->regs[0];
578 uint64_t end, str, adj_end;
580 /* Bits 32-47 of R0 must be zero. */
581 if (env->regs[0] & 0xffff0000u) {
582 s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
585 str = get_address(env, r2);
586 end = get_address(env, r1);
588 /* If the LSB of the two addresses differ, use one extra byte. */
589 adj_end = end + ((str ^ end) & 1);
591 /* Lest we fail to service interrupts in a timely manner, limit the
592 amount of work we're willing to do. For now, let's cap at 8k. */
593 for (len = 0; len < 0x2000; len += 2) {
594 if (str + len == adj_end) {
595 /* End of input found. */
599 v = cpu_lduw_data_ra(env, str + len, ra);
601 /* Character found. Set R1 to the location; R2 is unmodified. */
603 set_address(env, r1, str + len);
608 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
610 set_address(env, r2, str + len);
613 /* unsigned string compare (c is string terminator) */
614 uint64_t HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2)
616 uintptr_t ra = GETPC();
620 s1 = wrap_address(env, s1);
621 s2 = wrap_address(env, s2);
623 /* Lest we fail to service interrupts in a timely manner, limit the
624 amount of work we're willing to do. For now, let's cap at 8k. */
625 for (len = 0; len < 0x2000; ++len) {
626 uint8_t v1 = cpu_ldub_data_ra(env, s1 + len, ra);
627 uint8_t v2 = cpu_ldub_data_ra(env, s2 + len, ra);
630 /* Equal. CC=0, and don't advance the registers. */
636 /* Unequal. CC={1,2}, and advance the registers. Note that
637 the terminator need not be zero, but the string that contains
638 the terminator is by definition "low". */
639 env->cc_op = (v1 == c ? 1 : v2 == c ? 2 : v1 < v2 ? 1 : 2);
640 env->retxl = s2 + len;
645 /* CPU-determined bytes equal; advance the registers. */
647 env->retxl = s2 + len;
652 uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
654 /* ??? missing r0 handling, which includes access keys, but more
655 importantly optional suppression of the exception! */
656 fast_memmove(env, r1, r2, TARGET_PAGE_SIZE, GETPC());
657 return 0; /* data moved */
660 /* string copy (c is string terminator) */
661 uint64_t HELPER(mvst)(CPUS390XState *env, uint64_t c, uint64_t d, uint64_t s)
663 uintptr_t ra = GETPC();
667 d = wrap_address(env, d);
668 s = wrap_address(env, s);
670 /* Lest we fail to service interrupts in a timely manner, limit the
671 amount of work we're willing to do. For now, let's cap at 8k. */
672 for (len = 0; len < 0x2000; ++len) {
673 uint8_t v = cpu_ldub_data_ra(env, s + len, ra);
674 cpu_stb_data_ra(env, d + len, v, ra);
676 /* Complete. Set CC=1 and advance R1. */
683 /* Incomplete. Set CC=3 and signal to advance R1 and R2. */
685 env->retxl = s + len;
689 /* load access registers r1 to r3 from memory at a2 */
690 void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
692 uintptr_t ra = GETPC();
696 /* we either came here by lam or lamy, which have different lengths */
697 s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, ra);
700 for (i = r1;; i = (i + 1) % 16) {
701 env->aregs[i] = cpu_ldl_data_ra(env, a2, ra);
710 /* store access registers r1 to r3 in memory at a2 */
711 void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
713 uintptr_t ra = GETPC();
717 s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
720 for (i = r1;; i = (i + 1) % 16) {
721 cpu_stl_data_ra(env, a2, env->aregs[i], ra);
730 /* move long helper */
731 static inline uint32_t do_mvcl(CPUS390XState *env,
732 uint64_t *dest, uint64_t *destlen,
733 uint64_t *src, uint64_t *srclen,
734 uint16_t pad, int wordsize, uintptr_t ra)
736 uint64_t len = MIN(*srclen, *destlen);
739 if (*destlen == *srclen) {
741 } else if (*destlen < *srclen) {
747 /* Copy the src array */
748 fast_memmove(env, *dest, *src, len, ra);
754 /* Pad the remaining area */
756 fast_memset(env, *dest, pad, *destlen, ra);
760 /* If remaining length is odd, pad with odd byte first. */
762 cpu_stb_data_ra(env, *dest, pad & 0xff, ra);
766 /* The remaining length is even, pad using words. */
767 for (; *destlen; *dest += 2, *destlen -= 2) {
768 cpu_stw_data_ra(env, *dest, pad, ra);
776 uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
778 uintptr_t ra = GETPC();
779 uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
780 uint64_t dest = get_address(env, r1);
781 uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
782 uint64_t src = get_address(env, r2);
783 uint8_t pad = env->regs[r2 + 1] >> 24;
786 cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 1, ra);
788 env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, destlen);
789 env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, srclen);
790 set_address(env, r1, dest);
791 set_address(env, r2, src);
796 /* move long extended */
797 uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
800 uintptr_t ra = GETPC();
801 uint64_t destlen = get_length(env, r1 + 1);
802 uint64_t dest = get_address(env, r1);
803 uint64_t srclen = get_length(env, r3 + 1);
804 uint64_t src = get_address(env, r3);
808 cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 1, ra);
810 set_length(env, r1 + 1, destlen);
811 set_length(env, r3 + 1, srclen);
812 set_address(env, r1, dest);
813 set_address(env, r3, src);
818 /* move long unicode */
819 uint32_t HELPER(mvclu)(CPUS390XState *env, uint32_t r1, uint64_t a2,
822 uintptr_t ra = GETPC();
823 uint64_t destlen = get_length(env, r1 + 1);
824 uint64_t dest = get_address(env, r1);
825 uint64_t srclen = get_length(env, r3 + 1);
826 uint64_t src = get_address(env, r3);
830 cc = do_mvcl(env, &dest, &destlen, &src, &srclen, pad, 2, ra);
832 set_length(env, r1 + 1, destlen);
833 set_length(env, r3 + 1, srclen);
834 set_address(env, r1, dest);
835 set_address(env, r3, src);
840 /* compare logical long helper */
841 static inline uint32_t do_clcl(CPUS390XState *env,
842 uint64_t *src1, uint64_t *src1len,
843 uint64_t *src3, uint64_t *src3len,
844 uint16_t pad, uint64_t limit,
845 int wordsize, uintptr_t ra)
847 uint64_t len = MAX(*src1len, *src3len);
850 check_alignment(env, *src1len | *src3len, wordsize, ra);
856 /* Lest we fail to service interrupts in a timely manner, limit the
857 amount of work we're willing to do. */
863 for (; len; len -= wordsize) {
868 v1 = cpu_ldusize_data_ra(env, *src1, wordsize, ra);
871 v3 = cpu_ldusize_data_ra(env, *src3, wordsize, ra);
875 cc = (v1 < v3) ? 1 : 2;
881 *src1len -= wordsize;
885 *src3len -= wordsize;
893 /* compare logical long */
894 uint32_t HELPER(clcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
896 uintptr_t ra = GETPC();
897 uint64_t src1len = extract64(env->regs[r1 + 1], 0, 24);
898 uint64_t src1 = get_address(env, r1);
899 uint64_t src3len = extract64(env->regs[r2 + 1], 0, 24);
900 uint64_t src3 = get_address(env, r2);
901 uint8_t pad = env->regs[r2 + 1] >> 24;
904 cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, -1, 1, ra);
906 env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, src1len);
907 env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, src3len);
908 set_address(env, r1, src1);
909 set_address(env, r2, src3);
914 /* compare logical long extended memcompare insn with padding */
915 uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
918 uintptr_t ra = GETPC();
919 uint64_t src1len = get_length(env, r1 + 1);
920 uint64_t src1 = get_address(env, r1);
921 uint64_t src3len = get_length(env, r3 + 1);
922 uint64_t src3 = get_address(env, r3);
926 cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x2000, 1, ra);
928 set_length(env, r1 + 1, src1len);
929 set_length(env, r3 + 1, src3len);
930 set_address(env, r1, src1);
931 set_address(env, r3, src3);
936 /* compare logical long unicode memcompare insn with padding */
937 uint32_t HELPER(clclu)(CPUS390XState *env, uint32_t r1, uint64_t a2,
940 uintptr_t ra = GETPC();
941 uint64_t src1len = get_length(env, r1 + 1);
942 uint64_t src1 = get_address(env, r1);
943 uint64_t src3len = get_length(env, r3 + 1);
944 uint64_t src3 = get_address(env, r3);
948 cc = do_clcl(env, &src1, &src1len, &src3, &src3len, pad, 0x1000, 2, ra);
950 set_length(env, r1 + 1, src1len);
951 set_length(env, r3 + 1, src3len);
952 set_address(env, r1, src1);
953 set_address(env, r3, src3);
959 uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1,
960 uint64_t src, uint64_t src_len)
962 uintptr_t ra = GETPC();
963 uint64_t max_len, len;
964 uint64_t cksm = (uint32_t)r1;
966 /* Lest we fail to service interrupts in a timely manner, limit the
967 amount of work we're willing to do. For now, let's cap at 8k. */
968 max_len = (src_len > 0x2000 ? 0x2000 : src_len);
970 /* Process full words as available. */
971 for (len = 0; len + 4 <= max_len; len += 4, src += 4) {
972 cksm += (uint32_t)cpu_ldl_data_ra(env, src, ra);
975 switch (max_len - len) {
977 cksm += cpu_ldub_data_ra(env, src, ra) << 24;
981 cksm += cpu_lduw_data_ra(env, src, ra) << 16;
985 cksm += cpu_lduw_data_ra(env, src, ra) << 16;
986 cksm += cpu_ldub_data_ra(env, src + 2, ra) << 8;
991 /* Fold the carry from the checksum. Note that we can see carry-out
992 during folding more than once (but probably not more than twice). */
993 while (cksm > 0xffffffffull) {
994 cksm = (uint32_t)cksm + (cksm >> 32);
997 /* Indicate whether or not we've processed everything. */
998 env->cc_op = (len == src_len ? 0 : 3);
1000 /* Return both cksm and processed length. */
1005 void HELPER(pack)(CPUS390XState *env, uint32_t len, uint64_t dest, uint64_t src)
1007 uintptr_t ra = GETPC();
1008 int len_dest = len >> 4;
1009 int len_src = len & 0xf;
1015 /* last byte is special, it only flips the nibbles */
1016 b = cpu_ldub_data_ra(env, src, ra);
1017 cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra);
1021 /* now pack every value */
1022 while (len_dest >= 0) {
1026 b = cpu_ldub_data_ra(env, src, ra) & 0x0f;
1031 b |= cpu_ldub_data_ra(env, src, ra) << 4;
1038 cpu_stb_data_ra(env, dest, b, ra);
1042 static inline void do_pkau(CPUS390XState *env, uint64_t dest, uint64_t src,
1043 uint32_t srclen, int ssize, uintptr_t ra)
1046 /* The destination operand is always 16 bytes long. */
1047 const int destlen = 16;
1049 /* The operands are processed from right to left. */
1051 dest += destlen - 1;
1053 for (i = 0; i < destlen; i++) {
1056 /* Start with a positive sign */
1059 } else if (srclen > ssize) {
1060 b = cpu_ldub_data_ra(env, src, ra) & 0x0f;
1065 if (srclen > ssize) {
1066 b |= cpu_ldub_data_ra(env, src, ra) << 4;
1071 cpu_stb_data_ra(env, dest, b, ra);
1077 void HELPER(pka)(CPUS390XState *env, uint64_t dest, uint64_t src,
1080 do_pkau(env, dest, src, srclen, 1, GETPC());
1083 void HELPER(pku)(CPUS390XState *env, uint64_t dest, uint64_t src,
1086 do_pkau(env, dest, src, srclen, 2, GETPC());
1089 void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
1092 uintptr_t ra = GETPC();
1093 int len_dest = len >> 4;
1094 int len_src = len & 0xf;
1096 int second_nibble = 0;
1101 /* last byte is special, it only flips the nibbles */
1102 b = cpu_ldub_data_ra(env, src, ra);
1103 cpu_stb_data_ra(env, dest, (b << 4) | (b >> 4), ra);
1107 /* now pad every nibble with 0xf0 */
1109 while (len_dest > 0) {
1110 uint8_t cur_byte = 0;
1113 cur_byte = cpu_ldub_data_ra(env, src, ra);
1119 /* only advance one nibble at a time */
1120 if (second_nibble) {
1125 second_nibble = !second_nibble;
1128 cur_byte = (cur_byte & 0xf);
1132 cpu_stb_data_ra(env, dest, cur_byte, ra);
1136 static inline uint32_t do_unpkau(CPUS390XState *env, uint64_t dest,
1137 uint32_t destlen, int dsize, uint64_t src,
1143 /* The source operand is always 16 bytes long. */
1144 const int srclen = 16;
1146 /* The operands are processed from right to left. */
1148 dest += destlen - dsize;
1150 /* Check for the sign. */
1151 b = cpu_ldub_data_ra(env, src, ra);
1165 cc = 3; /* invalid */
1169 /* Now pad every nibble with 0x30, advancing one nibble at a time. */
1170 for (i = 0; i < destlen; i += dsize) {
1171 if (i == (31 * dsize)) {
1172 /* If length is 32/64 bytes, the leftmost byte is 0. */
1174 } else if (i % (2 * dsize)) {
1175 b = cpu_ldub_data_ra(env, src, ra);
1180 cpu_stsize_data_ra(env, dest, 0x30 + (b & 0xf), dsize, ra);
1187 uint32_t HELPER(unpka)(CPUS390XState *env, uint64_t dest, uint32_t destlen,
1190 return do_unpkau(env, dest, destlen, 1, src, GETPC());
1193 uint32_t HELPER(unpku)(CPUS390XState *env, uint64_t dest, uint32_t destlen,
1196 return do_unpkau(env, dest, destlen, 2, src, GETPC());
1199 uint32_t HELPER(tp)(CPUS390XState *env, uint64_t dest, uint32_t destlen)
1201 uintptr_t ra = GETPC();
1205 for (i = 0; i < destlen; i++) {
1206 uint8_t b = cpu_ldub_data_ra(env, dest + i, ra);
1208 cc |= (b & 0xf0) > 0x90 ? 2 : 0;
1210 if (i == (destlen - 1)) {
1212 cc |= (b & 0xf) < 0xa ? 1 : 0;
1215 cc |= (b & 0xf) > 0x9 ? 2 : 0;
1222 static uint32_t do_helper_tr(CPUS390XState *env, uint32_t len, uint64_t array,
1223 uint64_t trans, uintptr_t ra)
1227 for (i = 0; i <= len; i++) {
1228 uint8_t byte = cpu_ldub_data_ra(env, array + i, ra);
1229 uint8_t new_byte = cpu_ldub_data_ra(env, trans + byte, ra);
1230 cpu_stb_data_ra(env, array + i, new_byte, ra);
1236 void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array,
1239 do_helper_tr(env, len, array, trans, GETPC());
1242 uint64_t HELPER(tre)(CPUS390XState *env, uint64_t array,
1243 uint64_t len, uint64_t trans)
1245 uintptr_t ra = GETPC();
1246 uint8_t end = env->regs[0] & 0xff;
1251 if (!(env->psw.mask & PSW_MASK_64)) {
1252 array &= 0x7fffffff;
1256 /* Lest we fail to service interrupts in a timely manner, limit the
1257 amount of work we're willing to do. For now, let's cap at 8k. */
1263 for (i = 0; i < l; i++) {
1264 uint8_t byte, new_byte;
1266 byte = cpu_ldub_data_ra(env, array + i, ra);
1273 new_byte = cpu_ldub_data_ra(env, trans + byte, ra);
1274 cpu_stb_data_ra(env, array + i, new_byte, ra);
1278 env->retxl = len - i;
1282 static inline uint32_t do_helper_trt(CPUS390XState *env, int len,
1283 uint64_t array, uint64_t trans,
1284 int inc, uintptr_t ra)
1288 for (i = 0; i <= len; i++) {
1289 uint8_t byte = cpu_ldub_data_ra(env, array + i * inc, ra);
1290 uint8_t sbyte = cpu_ldub_data_ra(env, trans + byte, ra);
1293 set_address(env, 1, array + i * inc);
1294 env->regs[2] = deposit64(env->regs[2], 0, 8, sbyte);
1295 return (i == len) ? 2 : 1;
1302 uint32_t HELPER(trt)(CPUS390XState *env, uint32_t len, uint64_t array,
1305 return do_helper_trt(env, len, array, trans, 1, GETPC());
1308 uint32_t HELPER(trtr)(CPUS390XState *env, uint32_t len, uint64_t array,
1311 return do_helper_trt(env, len, array, trans, -1, GETPC());
1314 /* Translate one/two to one/two */
1315 uint32_t HELPER(trXX)(CPUS390XState *env, uint32_t r1, uint32_t r2,
1316 uint32_t tst, uint32_t sizes)
1318 uintptr_t ra = GETPC();
1319 int dsize = (sizes & 1) ? 1 : 2;
1320 int ssize = (sizes & 2) ? 1 : 2;
1321 uint64_t tbl = get_address(env, 1);
1322 uint64_t dst = get_address(env, r1);
1323 uint64_t len = get_length(env, r1 + 1);
1324 uint64_t src = get_address(env, r2);
1328 /* The lower address bits of TBL are ignored. For TROO, TROT, it's
1329 the low 3 bits (double-word aligned). For TRTO, TRTT, it's either
1330 the low 12 bits (4K, without ETF2-ENH) or 3 bits (with ETF2-ENH). */
1331 if (ssize == 2 && !s390_has_feat(S390_FEAT_ETF2_ENH)) {
1337 check_alignment(env, len, ssize, ra);
1339 /* Lest we fail to service interrupts in a timely manner, */
1340 /* limit the amount of work we're willing to do. */
1341 for (i = 0; i < 0x2000; i++) {
1342 uint16_t sval = cpu_ldusize_data_ra(env, src, ssize, ra);
1343 uint64_t tble = tbl + (sval * dsize);
1344 uint16_t dval = cpu_ldusize_data_ra(env, tble, dsize, ra);
1349 cpu_stsize_data_ra(env, dst, dval, dsize, ra);
1361 set_address(env, r1, dst);
1362 set_length(env, r1 + 1, len);
1363 set_address(env, r2, src);
1368 static void do_cdsg(CPUS390XState *env, uint64_t addr,
1369 uint32_t r1, uint32_t r3, bool parallel)
1371 uintptr_t ra = GETPC();
1372 Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]);
1373 Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
1378 #ifndef CONFIG_ATOMIC128
1379 cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
1381 int mem_idx = cpu_mmu_index(env, false);
1382 TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
1383 oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
1384 fail = !int128_eq(oldv, cmpv);
1387 uint64_t oldh, oldl;
1389 check_alignment(env, addr, 16, ra);
1391 oldh = cpu_ldq_data_ra(env, addr + 0, ra);
1392 oldl = cpu_ldq_data_ra(env, addr + 8, ra);
1394 oldv = int128_make128(oldl, oldh);
1395 fail = !int128_eq(oldv, cmpv);
1400 cpu_stq_data_ra(env, addr + 0, int128_gethi(newv), ra);
1401 cpu_stq_data_ra(env, addr + 8, int128_getlo(newv), ra);
1405 env->regs[r1] = int128_gethi(oldv);
1406 env->regs[r1 + 1] = int128_getlo(oldv);
1409 void HELPER(cdsg)(CPUS390XState *env, uint64_t addr,
1410 uint32_t r1, uint32_t r3)
1412 do_cdsg(env, addr, r1, r3, false);
1415 void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr,
1416 uint32_t r1, uint32_t r3)
1418 do_cdsg(env, addr, r1, r3, true);
1421 static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
1422 uint64_t a2, bool parallel)
1424 #if !defined(CONFIG_USER_ONLY) || defined(CONFIG_ATOMIC128)
1425 uint32_t mem_idx = cpu_mmu_index(env, false);
1427 uintptr_t ra = GETPC();
1428 uint32_t fc = extract32(env->regs[0], 0, 8);
1429 uint32_t sc = extract32(env->regs[0], 8, 8);
1430 uint64_t pl = get_address(env, 1) & -16;
1434 /* Sanity check the function code and storage characteristic. */
1435 if (fc > 1 || sc > 3) {
1436 if (!s390_has_feat(S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2)) {
1437 goto spec_exception;
1439 if (fc > 2 || sc > 4 || (fc == 2 && (r3 & 1))) {
1440 goto spec_exception;
1444 /* Sanity check the alignments. */
1445 if (extract32(a1, 0, 4 << fc) || extract32(a2, 0, 1 << sc)) {
1446 goto spec_exception;
1449 /* Sanity check writability of the store address. */
1450 #ifndef CONFIG_USER_ONLY
1451 probe_write(env, a2, 0, mem_idx, ra);
1454 /* Note that the compare-and-swap is atomic, and the store is atomic, but
1455 the complete operation is not. Therefore we do not need to assert serial
1456 context in order to implement this. That said, restart early if we can't
1457 support either operation that is supposed to be atomic. */
1460 #if !defined(CONFIG_ATOMIC64)
1462 #elif !defined(CONFIG_ATOMIC128)
1465 if (((4 << fc) | (1 << sc)) & mask) {
1466 cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
1470 /* All loads happen before all stores. For simplicity, load the entire
1471 store value area from the parameter list. */
1472 svh = cpu_ldq_data_ra(env, pl + 16, ra);
1473 svl = cpu_ldq_data_ra(env, pl + 24, ra);
1478 uint32_t nv = cpu_ldl_data_ra(env, pl, ra);
1479 uint32_t cv = env->regs[r3];
1483 #ifdef CONFIG_USER_ONLY
1484 uint32_t *haddr = g2h(a1);
1485 ov = atomic_cmpxchg__nocheck(haddr, cv, nv);
1487 TCGMemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx);
1488 ov = helper_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi, ra);
1491 ov = cpu_ldl_data_ra(env, a1, ra);
1492 cpu_stl_data_ra(env, a1, (ov == cv ? nv : ov), ra);
1495 env->regs[r3] = deposit64(env->regs[r3], 32, 32, ov);
1501 uint64_t nv = cpu_ldq_data_ra(env, pl, ra);
1502 uint64_t cv = env->regs[r3];
1506 #ifdef CONFIG_ATOMIC64
1507 # ifdef CONFIG_USER_ONLY
1508 uint64_t *haddr = g2h(a1);
1509 ov = atomic_cmpxchg__nocheck(haddr, cv, nv);
1511 TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx);
1512 ov = helper_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra);
1515 /* Note that we asserted !parallel above. */
1516 g_assert_not_reached();
1519 ov = cpu_ldq_data_ra(env, a1, ra);
1520 cpu_stq_data_ra(env, a1, (ov == cv ? nv : ov), ra);
1529 uint64_t nvh = cpu_ldq_data_ra(env, pl, ra);
1530 uint64_t nvl = cpu_ldq_data_ra(env, pl + 8, ra);
1531 Int128 nv = int128_make128(nvl, nvh);
1532 Int128 cv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
1536 #ifdef CONFIG_ATOMIC128
1537 TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
1538 ov = helper_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra);
1539 cc = !int128_eq(ov, cv);
1541 /* Note that we asserted !parallel above. */
1542 g_assert_not_reached();
1545 uint64_t oh = cpu_ldq_data_ra(env, a1 + 0, ra);
1546 uint64_t ol = cpu_ldq_data_ra(env, a1 + 8, ra);
1548 ov = int128_make128(ol, oh);
1549 cc = !int128_eq(ov, cv);
1554 cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra);
1555 cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra);
1558 env->regs[r3 + 0] = int128_gethi(ov);
1559 env->regs[r3 + 1] = int128_getlo(ov);
1564 g_assert_not_reached();
1567 /* Store only if the comparison succeeded. Note that above we use a pair
1568 of 64-bit big-endian loads, so for sc < 3 we must extract the value
1569 from the most-significant bits of svh. */
1573 cpu_stb_data_ra(env, a2, svh >> 56, ra);
1576 cpu_stw_data_ra(env, a2, svh >> 48, ra);
1579 cpu_stl_data_ra(env, a2, svh >> 32, ra);
1582 cpu_stq_data_ra(env, a2, svh, ra);
1586 #ifdef CONFIG_ATOMIC128
1587 TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
1588 Int128 sv = int128_make128(svl, svh);
1589 helper_atomic_sto_be_mmu(env, a2, sv, oi, ra);
1591 /* Note that we asserted !parallel above. */
1592 g_assert_not_reached();
1595 cpu_stq_data_ra(env, a2 + 0, svh, ra);
1596 cpu_stq_data_ra(env, a2 + 8, svl, ra);
1600 g_assert_not_reached();
1607 s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
1608 g_assert_not_reached();
1611 uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2)
1613 return do_csst(env, r3, a1, a2, false);
1616 uint32_t HELPER(csst_parallel)(CPUS390XState *env, uint32_t r3, uint64_t a1,
1619 return do_csst(env, r3, a1, a2, true);
1622 #if !defined(CONFIG_USER_ONLY)
1623 void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
1625 uintptr_t ra = GETPC();
1626 S390CPU *cpu = s390_env_get_cpu(env);
1627 bool PERchanged = false;
1632 s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
1635 for (i = r1;; i = (i + 1) % 16) {
1636 uint64_t val = cpu_ldq_data_ra(env, src, ra);
1637 if (env->cregs[i] != val && i >= 9 && i <= 11) {
1640 env->cregs[i] = val;
1641 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
1643 src += sizeof(uint64_t);
1650 if (PERchanged && env->psw.mask & PSW_MASK_PER) {
1651 s390_cpu_recompute_watchpoints(CPU(cpu));
1654 tlb_flush(CPU(cpu));
1657 void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
1659 uintptr_t ra = GETPC();
1660 S390CPU *cpu = s390_env_get_cpu(env);
1661 bool PERchanged = false;
1666 s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
1669 for (i = r1;; i = (i + 1) % 16) {
1670 uint32_t val = cpu_ldl_data_ra(env, src, ra);
1671 if ((uint32_t)env->cregs[i] != val && i >= 9 && i <= 11) {
1674 env->cregs[i] = deposit64(env->cregs[i], 0, 32, val);
1675 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%x\n", i, src, val);
1676 src += sizeof(uint32_t);
1683 if (PERchanged && env->psw.mask & PSW_MASK_PER) {
1684 s390_cpu_recompute_watchpoints(CPU(cpu));
1687 tlb_flush(CPU(cpu));
1690 void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
1692 uintptr_t ra = GETPC();
1697 s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
1700 for (i = r1;; i = (i + 1) % 16) {
1701 cpu_stq_data_ra(env, dest, env->cregs[i], ra);
1702 dest += sizeof(uint64_t);
1710 void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
1712 uintptr_t ra = GETPC();
1717 s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
1720 for (i = r1;; i = (i + 1) % 16) {
1721 cpu_stl_data_ra(env, dest, env->cregs[i], ra);
1722 dest += sizeof(uint32_t);
1730 uint32_t HELPER(testblock)(CPUS390XState *env, uint64_t real_addr)
1732 uintptr_t ra = GETPC();
1735 real_addr = wrap_address(env, real_addr) & TARGET_PAGE_MASK;
1737 for (i = 0; i < TARGET_PAGE_SIZE; i += 8) {
1738 cpu_stq_real_ra(env, real_addr + i, 0, ra);
1744 uint32_t HELPER(tprot)(CPUS390XState *env, uint64_t a1, uint64_t a2)
1746 S390CPU *cpu = s390_env_get_cpu(env);
1747 CPUState *cs = CPU(cpu);
1750 * TODO: we currently don't handle all access protection types
1751 * (including access-list and key-controlled) as well as AR mode.
1753 if (!s390_cpu_virt_mem_check_write(cpu, a1, 0, 1)) {
1754 /* Fetching permitted; storing permitted */
1758 if (env->int_pgm_code == PGM_PROTECTION) {
1759 /* retry if reading is possible */
1760 cs->exception_index = 0;
1761 if (!s390_cpu_virt_mem_check_read(cpu, a1, 0, 1)) {
1762 /* Fetching permitted; storing not permitted */
1767 switch (env->int_pgm_code) {
1768 case PGM_PROTECTION:
1769 /* Fetching not permitted; storing not permitted */
1770 cs->exception_index = 0;
1772 case PGM_ADDRESSING:
1773 case PGM_TRANS_SPEC:
1774 /* exceptions forwarded to the guest */
1775 s390_cpu_virt_mem_handle_exc(cpu, GETPC());
1779 /* Translation not available */
1780 cs->exception_index = 0;
1784 /* insert storage key extended */
1785 uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2)
1787 static S390SKeysState *ss;
1788 static S390SKeysClass *skeyclass;
1789 uint64_t addr = wrap_address(env, r2);
1792 if (addr > ram_size) {
1796 if (unlikely(!ss)) {
1797 ss = s390_get_skeys_device();
1798 skeyclass = S390_SKEYS_GET_CLASS(ss);
1801 if (skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key)) {
1807 /* set storage key extended */
1808 void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2)
1810 static S390SKeysState *ss;
1811 static S390SKeysClass *skeyclass;
1812 uint64_t addr = wrap_address(env, r2);
1815 if (addr > ram_size) {
1819 if (unlikely(!ss)) {
1820 ss = s390_get_skeys_device();
1821 skeyclass = S390_SKEYS_GET_CLASS(ss);
1825 skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key);
1828 /* reset reference bit extended */
1829 uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2)
1831 static S390SKeysState *ss;
1832 static S390SKeysClass *skeyclass;
1835 if (r2 > ram_size) {
1839 if (unlikely(!ss)) {
1840 ss = s390_get_skeys_device();
1841 skeyclass = S390_SKEYS_GET_CLASS(ss);
1844 if (skeyclass->get_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) {
1848 re = key & (SK_R | SK_C);
1851 if (skeyclass->set_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) {
1858 * 0 Reference bit zero; change bit zero
1859 * 1 Reference bit zero; change bit one
1860 * 2 Reference bit one; change bit zero
1861 * 3 Reference bit one; change bit one
1867 uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
1869 uintptr_t ra = GETPC();
1872 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
1873 __func__, l, a1, a2);
1881 /* XXX replace w/ memcpy */
1882 for (i = 0; i < l; i++) {
1883 uint8_t x = cpu_ldub_primary_ra(env, a2 + i, ra);
1884 cpu_stb_secondary_ra(env, a1 + i, x, ra);
1890 uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
1892 uintptr_t ra = GETPC();
1895 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
1896 __func__, l, a1, a2);
1904 /* XXX replace w/ memcpy */
1905 for (i = 0; i < l; i++) {
1906 uint8_t x = cpu_ldub_secondary_ra(env, a2 + i, ra);
1907 cpu_stb_primary_ra(env, a1 + i, x, ra);
1913 void HELPER(idte)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint32_t m4)
1915 CPUState *cs = CPU(s390_env_get_cpu(env));
1916 const uintptr_t ra = GETPC();
1917 uint64_t table, entry, raddr;
1918 uint16_t entries, i, index = 0;
1921 s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
1924 if (!(r2 & 0x800)) {
1925 /* invalidation-and-clearing operation */
1926 table = r1 & ASCE_ORIGIN;
1927 entries = (r2 & 0x7ff) + 1;
1929 switch (r1 & ASCE_TYPE_MASK) {
1930 case ASCE_TYPE_REGION1:
1931 index = (r2 >> 53) & 0x7ff;
1933 case ASCE_TYPE_REGION2:
1934 index = (r2 >> 42) & 0x7ff;
1936 case ASCE_TYPE_REGION3:
1937 index = (r2 >> 31) & 0x7ff;
1939 case ASCE_TYPE_SEGMENT:
1940 index = (r2 >> 20) & 0x7ff;
1943 for (i = 0; i < entries; i++) {
1944 /* addresses are not wrapped in 24/31bit mode but table index is */
1945 raddr = table + ((index + i) & 0x7ff) * sizeof(entry);
1946 entry = cpu_ldq_real_ra(env, raddr, ra);
1947 if (!(entry & REGION_ENTRY_INV)) {
1948 /* we are allowed to not store if already invalid */
1949 entry |= REGION_ENTRY_INV;
1950 cpu_stq_real_ra(env, raddr, entry, ra);
1955 /* We simply flush the complete tlb, therefore we can ignore r3. */
1959 tlb_flush_all_cpus_synced(cs);
1963 /* invalidate pte */
1964 void HELPER(ipte)(CPUS390XState *env, uint64_t pto, uint64_t vaddr,
1967 CPUState *cs = CPU(s390_env_get_cpu(env));
1968 const uintptr_t ra = GETPC();
1969 uint64_t page = vaddr & TARGET_PAGE_MASK;
1970 uint64_t pte_addr, pte;
1972 /* Compute the page table entry address */
1973 pte_addr = (pto & SEGMENT_ENTRY_ORIGIN);
1974 pte_addr += (vaddr & VADDR_PX) >> 9;
1976 /* Mark the page table entry as invalid */
1977 pte = cpu_ldq_real_ra(env, pte_addr, ra);
1978 pte |= PAGE_INVALID;
1979 cpu_stq_real_ra(env, pte_addr, pte, ra);
1981 /* XXX we exploit the fact that Linux passes the exact virtual
1982 address here - it's not obliged to! */
1984 if (vaddr & ~VADDR_PX) {
1985 tlb_flush_page(cs, page);
1986 /* XXX 31-bit hack */
1987 tlb_flush_page(cs, page ^ 0x80000000);
1989 /* looks like we don't have a valid virtual address */
1993 if (vaddr & ~VADDR_PX) {
1994 tlb_flush_page_all_cpus_synced(cs, page);
1995 /* XXX 31-bit hack */
1996 tlb_flush_page_all_cpus_synced(cs, page ^ 0x80000000);
1998 /* looks like we don't have a valid virtual address */
1999 tlb_flush_all_cpus_synced(cs);
2004 /* flush local tlb */
2005 void HELPER(ptlb)(CPUS390XState *env)
2007 S390CPU *cpu = s390_env_get_cpu(env);
2009 tlb_flush(CPU(cpu));
2012 /* flush global tlb */
2013 void HELPER(purge)(CPUS390XState *env)
2015 S390CPU *cpu = s390_env_get_cpu(env);
2017 tlb_flush_all_cpus_synced(CPU(cpu));
2020 /* load using real address */
2021 uint64_t HELPER(lura)(CPUS390XState *env, uint64_t addr)
2023 return cpu_ldl_real_ra(env, wrap_address(env, addr), GETPC());
2026 uint64_t HELPER(lurag)(CPUS390XState *env, uint64_t addr)
2028 return cpu_ldq_real_ra(env, wrap_address(env, addr), GETPC());
2031 /* store using real address */
2032 void HELPER(stura)(CPUS390XState *env, uint64_t addr, uint64_t v1)
2034 cpu_stl_real_ra(env, wrap_address(env, addr), (uint32_t)v1, GETPC());
2036 if ((env->psw.mask & PSW_MASK_PER) &&
2037 (env->cregs[9] & PER_CR9_EVENT_STORE) &&
2038 (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
2039 /* PSW is saved just before calling the helper. */
2040 env->per_address = env->psw.addr;
2041 env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
2045 void HELPER(sturg)(CPUS390XState *env, uint64_t addr, uint64_t v1)
2047 cpu_stq_real_ra(env, wrap_address(env, addr), v1, GETPC());
2049 if ((env->psw.mask & PSW_MASK_PER) &&
2050 (env->cregs[9] & PER_CR9_EVENT_STORE) &&
2051 (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
2052 /* PSW is saved just before calling the helper. */
2053 env->per_address = env->psw.addr;
2054 env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
2058 /* load real address */
2059 uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
2061 CPUState *cs = CPU(s390_env_get_cpu(env));
2063 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
2067 /* XXX incomplete - has more corner cases */
2068 if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
2069 s390_program_interrupt(env, PGM_SPECIAL_OP, 2, GETPC());
2072 old_exc = cs->exception_index;
2073 if (mmu_translate(env, addr, 0, asc, &ret, &flags, true)) {
2076 if (cs->exception_index == EXCP_PGM) {
2077 ret = env->int_pgm_code | 0x80000000;
2079 ret |= addr & ~TARGET_PAGE_MASK;
2081 cs->exception_index = old_exc;
2088 /* load pair from quadword */
2089 static uint64_t do_lpq(CPUS390XState *env, uint64_t addr, bool parallel)
2091 uintptr_t ra = GETPC();
2095 #ifndef CONFIG_ATOMIC128
2096 cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
2098 int mem_idx = cpu_mmu_index(env, false);
2099 TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
2100 Int128 v = helper_atomic_ldo_be_mmu(env, addr, oi, ra);
2101 hi = int128_gethi(v);
2102 lo = int128_getlo(v);
2105 check_alignment(env, addr, 16, ra);
2107 hi = cpu_ldq_data_ra(env, addr + 0, ra);
2108 lo = cpu_ldq_data_ra(env, addr + 8, ra);
2115 uint64_t HELPER(lpq)(CPUS390XState *env, uint64_t addr)
2117 return do_lpq(env, addr, false);
2120 uint64_t HELPER(lpq_parallel)(CPUS390XState *env, uint64_t addr)
2122 return do_lpq(env, addr, true);
2125 /* store pair to quadword */
2126 static void do_stpq(CPUS390XState *env, uint64_t addr,
2127 uint64_t low, uint64_t high, bool parallel)
2129 uintptr_t ra = GETPC();
2132 #ifndef CONFIG_ATOMIC128
2133 cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
2135 int mem_idx = cpu_mmu_index(env, false);
2136 TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
2138 Int128 v = int128_make128(low, high);
2139 helper_atomic_sto_be_mmu(env, addr, v, oi, ra);
2142 check_alignment(env, addr, 16, ra);
2144 cpu_stq_data_ra(env, addr + 0, high, ra);
2145 cpu_stq_data_ra(env, addr + 8, low, ra);
2149 void HELPER(stpq)(CPUS390XState *env, uint64_t addr,
2150 uint64_t low, uint64_t high)
2152 do_stpq(env, addr, low, high, false);
2155 void HELPER(stpq_parallel)(CPUS390XState *env, uint64_t addr,
2156 uint64_t low, uint64_t high)
2158 do_stpq(env, addr, low, high, true);
2161 /* Execute instruction. This instruction executes an insn modified with
2162 the contents of r1. It does not change the executed instruction in memory;
2163 it does not change the program counter.
2165 Perform this by recording the modified instruction in env->ex_value.
2166 This will be noticed by cpu_get_tb_cpu_state and thus tb translation.
2168 void HELPER(ex)(CPUS390XState *env, uint32_t ilen, uint64_t r1, uint64_t addr)
2170 uint64_t insn = cpu_lduw_code(env, addr);
2171 uint8_t opc = insn >> 8;
2173 /* Or in the contents of R1[56:63]. */
2176 /* Load the rest of the instruction. */
2178 switch (get_ilen(opc)) {
2182 insn |= (uint64_t)cpu_lduw_code(env, addr + 2) << 32;
2185 insn |= (uint64_t)(uint32_t)cpu_ldl_code(env, addr + 2) << 16;
2188 g_assert_not_reached();
2191 /* The very most common cases can be sped up by avoiding a new TB. */
2192 if ((opc & 0xf0) == 0xd0) {
2193 typedef uint32_t (*dx_helper)(CPUS390XState *, uint32_t, uint64_t,
2194 uint64_t, uintptr_t);
2195 static const dx_helper dx[16] = {
2196 [0x2] = do_helper_mvc,
2197 [0x4] = do_helper_nc,
2198 [0x5] = do_helper_clc,
2199 [0x6] = do_helper_oc,
2200 [0x7] = do_helper_xc,
2201 [0xc] = do_helper_tr,
2203 dx_helper helper = dx[opc & 0xf];
2206 uint32_t l = extract64(insn, 48, 8);
2207 uint32_t b1 = extract64(insn, 44, 4);
2208 uint32_t d1 = extract64(insn, 32, 12);
2209 uint32_t b2 = extract64(insn, 28, 4);
2210 uint32_t d2 = extract64(insn, 16, 12);
2211 uint64_t a1 = wrap_address(env, env->regs[b1] + d1);
2212 uint64_t a2 = wrap_address(env, env->regs[b2] + d2);
2214 env->cc_op = helper(env, l, a1, a2, 0);
2215 env->psw.addr += ilen;
2218 } else if (opc == 0x0a) {
2219 env->int_svc_code = extract64(insn, 48, 8);
2220 env->int_svc_ilen = ilen;
2221 helper_exception(env, EXCP_SVC);
2222 g_assert_not_reached();
2225 /* Record the insn we want to execute as well as the ilen to use
2226 during the execution of the target insn. This will also ensure
2227 that ex_value is non-zero, which flags that we are in a state
2228 that requires such execution. */
2229 env->ex_value = insn | ilen;
2232 uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src,
2235 const uint8_t psw_key = (env->psw.mask & PSW_MASK_KEY) >> PSW_SHIFT_KEY;
2236 const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC;
2237 const uint64_t r0 = env->regs[0];
2238 const uintptr_t ra = GETPC();
2239 uint8_t dest_key, dest_as, dest_k, dest_a;
2240 uint8_t src_key, src_as, src_k, src_a;
2244 HELPER_LOG("%s dest %" PRIx64 ", src %" PRIx64 ", len %" PRIx64 "\n",
2245 __func__, dest, src, len);
2247 if (!(env->psw.mask & PSW_MASK_DAT)) {
2248 s390_program_interrupt(env, PGM_SPECIAL_OP, 6, ra);
2251 /* OAC (operand access control) for the first operand -> dest */
2252 val = (r0 & 0xffff0000ULL) >> 16;
2253 dest_key = (val >> 12) & 0xf;
2254 dest_as = (val >> 6) & 0x3;
2255 dest_k = (val >> 1) & 0x1;
2258 /* OAC (operand access control) for the second operand -> src */
2259 val = (r0 & 0x0000ffffULL);
2260 src_key = (val >> 12) & 0xf;
2261 src_as = (val >> 6) & 0x3;
2262 src_k = (val >> 1) & 0x1;
2278 if (dest_a && dest_as == AS_HOME && (env->psw.mask & PSW_MASK_PSTATE)) {
2279 s390_program_interrupt(env, PGM_SPECIAL_OP, 6, ra);
2281 if (!(env->cregs[0] & CR0_SECONDARY) &&
2282 (dest_as == AS_SECONDARY || src_as == AS_SECONDARY)) {
2283 s390_program_interrupt(env, PGM_SPECIAL_OP, 6, ra);
2285 if (!psw_key_valid(env, dest_key) || !psw_key_valid(env, src_key)) {
2286 s390_program_interrupt(env, PGM_PRIVILEGED, 6, ra);
2289 len = wrap_length(env, len);
2295 /* FIXME: AR-mode and proper problem state mode (using PSW keys) missing */
2296 if (src_as == AS_ACCREG || dest_as == AS_ACCREG ||
2297 (env->psw.mask & PSW_MASK_PSTATE)) {
2298 qemu_log_mask(LOG_UNIMP, "%s: AR-mode and PSTATE support missing\n",
2300 s390_program_interrupt(env, PGM_ADDRESSING, 6, ra);
2304 * b) Access using correct keys
2307 #ifdef CONFIG_USER_ONLY
2308 /* psw keys are never valid in user mode, we will never reach this */
2309 g_assert_not_reached();
2311 fast_memmove_as(env, dest, src, len, dest_as, src_as, ra);
2317 /* Decode a Unicode character. A return value < 0 indicates success, storing
2318 the UTF-32 result into OCHAR and the input length into OLEN. A return
2319 value >= 0 indicates failure, and the CC value to be returned. */
2320 typedef int (*decode_unicode_fn)(CPUS390XState *env, uint64_t addr,
2321 uint64_t ilen, bool enh_check, uintptr_t ra,
2322 uint32_t *ochar, uint32_t *olen);
2324 /* Encode a Unicode character. A return value < 0 indicates success, storing
2325 the bytes into ADDR and the output length into OLEN. A return value >= 0
2326 indicates failure, and the CC value to be returned. */
2327 typedef int (*encode_unicode_fn)(CPUS390XState *env, uint64_t addr,
2328 uint64_t ilen, uintptr_t ra, uint32_t c,
2331 static int decode_utf8(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2332 bool enh_check, uintptr_t ra,
2333 uint32_t *ochar, uint32_t *olen)
2335 uint8_t s0, s1, s2, s3;
2341 s0 = cpu_ldub_data_ra(env, addr, ra);
2343 /* one byte character */
2346 } else if (s0 <= (enh_check ? 0xc1 : 0xbf)) {
2347 /* invalid character */
2349 } else if (s0 <= 0xdf) {
2350 /* two byte character */
2355 s1 = cpu_ldub_data_ra(env, addr + 1, ra);
2357 c = (c << 6) | (s1 & 0x3f);
2358 if (enh_check && (s1 & 0xc0) != 0x80) {
2361 } else if (s0 <= 0xef) {
2362 /* three byte character */
2367 s1 = cpu_ldub_data_ra(env, addr + 1, ra);
2368 s2 = cpu_ldub_data_ra(env, addr + 2, ra);
2370 c = (c << 6) | (s1 & 0x3f);
2371 c = (c << 6) | (s2 & 0x3f);
2372 /* Fold the byte-by-byte range descriptions in the PoO into
2373 tests against the complete value. It disallows encodings
2374 that could be smaller, and the UTF-16 surrogates. */
2376 && ((s1 & 0xc0) != 0x80
2377 || (s2 & 0xc0) != 0x80
2379 || (c >= 0xd800 && c <= 0xdfff))) {
2382 } else if (s0 <= (enh_check ? 0xf4 : 0xf7)) {
2383 /* four byte character */
2388 s1 = cpu_ldub_data_ra(env, addr + 1, ra);
2389 s2 = cpu_ldub_data_ra(env, addr + 2, ra);
2390 s3 = cpu_ldub_data_ra(env, addr + 3, ra);
2392 c = (c << 6) | (s1 & 0x3f);
2393 c = (c << 6) | (s2 & 0x3f);
2394 c = (c << 6) | (s3 & 0x3f);
2397 && ((s1 & 0xc0) != 0x80
2398 || (s2 & 0xc0) != 0x80
2399 || (s3 & 0xc0) != 0x80
2405 /* invalid character */
2414 static int decode_utf16(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2415 bool enh_check, uintptr_t ra,
2416 uint32_t *ochar, uint32_t *olen)
2424 s0 = cpu_lduw_data_ra(env, addr, ra);
2425 if ((s0 & 0xfc00) != 0xd800) {
2426 /* one word character */
2430 /* two word character */
2435 s1 = cpu_lduw_data_ra(env, addr + 2, ra);
2436 c = extract32(s0, 6, 4) + 1;
2437 c = (c << 6) | (s0 & 0x3f);
2438 c = (c << 10) | (s1 & 0x3ff);
2439 if (enh_check && (s1 & 0xfc00) != 0xdc00) {
2440 /* invalid surrogate character */
2450 static int decode_utf32(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2451 bool enh_check, uintptr_t ra,
2452 uint32_t *ochar, uint32_t *olen)
2459 c = cpu_ldl_data_ra(env, addr, ra);
2460 if ((c >= 0xd800 && c <= 0xdbff) || c > 0x10ffff) {
2461 /* invalid unicode character */
2470 static int encode_utf8(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2471 uintptr_t ra, uint32_t c, uint32_t *olen)
2477 /* one byte character */
2480 } else if (c <= 0x7ff) {
2481 /* two byte character */
2483 d[1] = 0x80 | extract32(c, 0, 6);
2484 d[0] = 0xc0 | extract32(c, 6, 5);
2485 } else if (c <= 0xffff) {
2486 /* three byte character */
2488 d[2] = 0x80 | extract32(c, 0, 6);
2489 d[1] = 0x80 | extract32(c, 6, 6);
2490 d[0] = 0xe0 | extract32(c, 12, 4);
2492 /* four byte character */
2494 d[3] = 0x80 | extract32(c, 0, 6);
2495 d[2] = 0x80 | extract32(c, 6, 6);
2496 d[1] = 0x80 | extract32(c, 12, 6);
2497 d[0] = 0xf0 | extract32(c, 18, 3);
2503 for (i = 0; i < l; ++i) {
2504 cpu_stb_data_ra(env, addr + i, d[i], ra);
2511 static int encode_utf16(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2512 uintptr_t ra, uint32_t c, uint32_t *olen)
2517 /* one word character */
2521 cpu_stw_data_ra(env, addr, c, ra);
2524 /* two word character */
2528 d1 = 0xdc00 | extract32(c, 0, 10);
2529 d0 = 0xd800 | extract32(c, 10, 6);
2530 d0 = deposit32(d0, 6, 4, extract32(c, 16, 5) - 1);
2531 cpu_stw_data_ra(env, addr + 0, d0, ra);
2532 cpu_stw_data_ra(env, addr + 2, d1, ra);
2539 static int encode_utf32(CPUS390XState *env, uint64_t addr, uint64_t ilen,
2540 uintptr_t ra, uint32_t c, uint32_t *olen)
2545 cpu_stl_data_ra(env, addr, c, ra);
2550 static inline uint32_t convert_unicode(CPUS390XState *env, uint32_t r1,
2551 uint32_t r2, uint32_t m3, uintptr_t ra,
2552 decode_unicode_fn decode,
2553 encode_unicode_fn encode)
2555 uint64_t dst = get_address(env, r1);
2556 uint64_t dlen = get_length(env, r1 + 1);
2557 uint64_t src = get_address(env, r2);
2558 uint64_t slen = get_length(env, r2 + 1);
2559 bool enh_check = m3 & 1;
2562 /* Lest we fail to service interrupts in a timely manner, limit the
2563 amount of work we're willing to do. For now, let's cap at 256. */
2564 for (i = 0; i < 256; ++i) {
2565 uint32_t c, ilen, olen;
2567 cc = decode(env, src, slen, enh_check, ra, &c, &ilen);
2568 if (unlikely(cc >= 0)) {
2571 cc = encode(env, dst, dlen, ra, c, &olen);
2572 if (unlikely(cc >= 0)) {
2583 set_address(env, r1, dst);
2584 set_length(env, r1 + 1, dlen);
2585 set_address(env, r2, src);
2586 set_length(env, r2 + 1, slen);
2591 uint32_t HELPER(cu12)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2593 return convert_unicode(env, r1, r2, m3, GETPC(),
2594 decode_utf8, encode_utf16);
2597 uint32_t HELPER(cu14)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2599 return convert_unicode(env, r1, r2, m3, GETPC(),
2600 decode_utf8, encode_utf32);
2603 uint32_t HELPER(cu21)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2605 return convert_unicode(env, r1, r2, m3, GETPC(),
2606 decode_utf16, encode_utf8);
2609 uint32_t HELPER(cu24)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2611 return convert_unicode(env, r1, r2, m3, GETPC(),
2612 decode_utf16, encode_utf32);
2615 uint32_t HELPER(cu41)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2617 return convert_unicode(env, r1, r2, m3, GETPC(),
2618 decode_utf32, encode_utf8);
2621 uint32_t HELPER(cu42)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t m3)
2623 return convert_unicode(env, r1, r2, m3, GETPC(),
2624 decode_utf32, encode_utf16);