#include "qemu/osdep.h"
#include "cpu.h"
#include "internal.h"
+#include "tcg_s390x.h"
#include "exec/helper-proto.h"
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
#include "qemu/int128.h"
#include "qemu/atomic128.h"
+#include "tcg/tcg.h"
#if !defined(CONFIG_USER_ONLY)
#include "hw/s390x/storage-keys.h"
+#include "hw/boards.h"
#endif
/*****************************************************************************/
return dest > src && dest <= src + len - 1;
}
-/* Reduce the length so that addr + len doesn't cross a page boundary. */
-static inline uint32_t adj_len_to_page(uint32_t len, uint64_t addr)
-{
-#ifndef CONFIG_USER_ONLY
- if ((addr & ~TARGET_PAGE_MASK) + len - 1 >= TARGET_PAGE_SIZE) {
- return -(addr | TARGET_PAGE_MASK);
- }
-#endif
- return len;
-}
-
/* Trigger a SPECIFICATION exception if an address or a length is not
naturally aligned. */
static inline void check_alignment(CPUS390XState *env, uint64_t v,
int wordsize, uintptr_t ra)
{
if (v % wordsize) {
- s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
}
desta->mmu_idx, ra);
}
-#ifndef CONFIG_USER_ONLY
-static void fast_memmove_idx(CPUS390XState *env, uint64_t dest, uint64_t src,
- uint32_t len, int dest_idx, int src_idx,
- uintptr_t ra)
+static uint8_t do_access_get_byte(CPUS390XState *env, vaddr vaddr, char **haddr,
+ int offset, int mmu_idx, uintptr_t ra)
{
- TCGMemOpIdx oi_dest = make_memop_idx(MO_UB, dest_idx);
- TCGMemOpIdx oi_src = make_memop_idx(MO_UB, src_idx);
- uint32_t len_adj;
- void *src_p;
- void *dest_p;
- uint8_t x;
-
- while (len > 0) {
- src = wrap_address(env, src);
- dest = wrap_address(env, dest);
- src_p = tlb_vaddr_to_host(env, src, MMU_DATA_LOAD, src_idx);
- dest_p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, dest_idx);
-
- if (src_p && dest_p) {
- /* Access to both whole pages granted. */
- len_adj = adj_len_to_page(adj_len_to_page(len, src), dest);
- memmove(dest_p, src_p, len_adj);
- } else {
- /* We failed to get access to one or both whole pages. The next
- read or write access will likely fill the QEMU TLB for the
- next iteration. */
- len_adj = 1;
- x = helper_ret_ldub_mmu(env, src, oi_src, ra);
- helper_ret_stb_mmu(env, dest, x, oi_dest, ra);
+#ifdef CONFIG_USER_ONLY
+ return ldub_p(*haddr + offset);
+#else
+ TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
+ uint8_t byte;
+
+ if (likely(*haddr)) {
+ return ldub_p(*haddr + offset);
+ }
+ /*
+ * Do a single access and test if we can then get access to the
+ * page. This is especially relevant to speed up TLB_NOTDIRTY.
+ */
+ byte = helper_ret_ldub_mmu(env, vaddr + offset, oi, ra);
+ *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_LOAD, mmu_idx);
+ return byte;
+#endif
+}
+
+static uint8_t access_get_byte(CPUS390XState *env, S390Access *access,
+ int offset, uintptr_t ra)
+{
+ if (offset < access->size1) {
+ return do_access_get_byte(env, access->vaddr1, &access->haddr1,
+ offset, access->mmu_idx, ra);
+ }
+ return do_access_get_byte(env, access->vaddr2, &access->haddr2,
+ offset - access->size1, access->mmu_idx, ra);
+}
+
+static void do_access_set_byte(CPUS390XState *env, vaddr vaddr, char **haddr,
+ int offset, uint8_t byte, int mmu_idx,
+ uintptr_t ra)
+{
+#ifdef CONFIG_USER_ONLY
+ stb_p(*haddr + offset, byte);
+#else
+ TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
+
+ if (likely(*haddr)) {
+ stb_p(*haddr + offset, byte);
+ return;
+ }
+ /*
+ * Do a single access and test if we can then get access to the
+ * page. This is especially relevant to speed up TLB_NOTDIRTY.
+ */
+ helper_ret_stb_mmu(env, vaddr + offset, byte, oi, ra);
+ *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx);
+#endif
+}
+
+static void access_set_byte(CPUS390XState *env, S390Access *access,
+ int offset, uint8_t byte, uintptr_t ra)
+{
+ if (offset < access->size1) {
+ do_access_set_byte(env, access->vaddr1, &access->haddr1, offset, byte,
+ access->mmu_idx, ra);
+ } else {
+ do_access_set_byte(env, access->vaddr2, &access->haddr2,
+ offset - access->size1, byte, access->mmu_idx, ra);
+ }
+}
+
+/*
+ * Move data with the same semantics as memmove() in case ranges don't overlap
+ * or src > dest. Undefined behavior on destructive overlaps.
+ */
+static void access_memmove(CPUS390XState *env, S390Access *desta,
+ S390Access *srca, uintptr_t ra)
+{
+ int diff;
+
+ g_assert(desta->size1 + desta->size2 == srca->size1 + srca->size2);
+
+ /* Fallback to slow access in case we don't have access to all host pages */
+ if (unlikely(!desta->haddr1 || (desta->size2 && !desta->haddr2) ||
+ !srca->haddr1 || (srca->size2 && !srca->haddr2))) {
+ int i;
+
+ for (i = 0; i < desta->size1 + desta->size2; i++) {
+ uint8_t byte = access_get_byte(env, srca, i, ra);
+
+ access_set_byte(env, desta, i, byte, ra);
+ }
+ return;
+ }
+
+ if (srca->size1 == desta->size1) {
+ memmove(desta->haddr1, srca->haddr1, srca->size1);
+ if (unlikely(srca->size2)) {
+ memmove(desta->haddr2, srca->haddr2, srca->size2);
+ }
+ } else if (srca->size1 < desta->size1) {
+ diff = desta->size1 - srca->size1;
+ memmove(desta->haddr1, srca->haddr1, srca->size1);
+ memmove(desta->haddr1 + srca->size1, srca->haddr2, diff);
+ if (likely(desta->size2)) {
+ memmove(desta->haddr2, srca->haddr2 + diff, desta->size2);
+ }
+ } else {
+ diff = srca->size1 - desta->size1;
+ memmove(desta->haddr1, srca->haddr1, desta->size1);
+ memmove(desta->haddr2, srca->haddr1 + desta->size1, diff);
+ if (likely(srca->size2)) {
+ memmove(desta->haddr2 + diff, srca->haddr2, srca->size2);
}
- src += len_adj;
- dest += len_adj;
- len -= len_adj;
}
}
}
}
-static void fast_memmove_as(CPUS390XState *env, uint64_t dest, uint64_t src,
- uint32_t len, uint8_t dest_as, uint8_t src_as,
- uintptr_t ra)
-{
- int src_idx = mmu_idx_from_as(src_as);
- int dest_idx = mmu_idx_from_as(dest_as);
-
- fast_memmove_idx(env, dest, src, len, dest_idx, src_idx, ra);
-}
-#endif
-
-static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
- uint32_t l, uintptr_t ra)
-{
- int mmu_idx = cpu_mmu_index(env, false);
-
- while (l > 0) {
- void *src_p = tlb_vaddr_to_host(env, src, MMU_DATA_LOAD, mmu_idx);
- void *dest_p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
- if (src_p && dest_p) {
- /* Access to both whole pages granted. */
- uint32_t l_adj = adj_len_to_page(l, src);
- l_adj = adj_len_to_page(l_adj, dest);
- memmove(dest_p, src_p, l_adj);
- src += l_adj;
- dest += l_adj;
- l -= l_adj;
- } else {
- /* We failed to get access to one or both whole pages. The next
- read or write access will likely fill the QEMU TLB for the
- next iteration. */
- cpu_stb_data_ra(env, dest, cpu_ldub_data_ra(env, src, ra), ra);
- src++;
- dest++;
- l--;
- }
- }
-}
-
/* and on array */
static uint32_t do_helper_nc(CPUS390XState *env, uint32_t l, uint64_t dest,
uint64_t src, uintptr_t ra)
{
+ const int mmu_idx = cpu_mmu_index(env, false);
+ S390Access srca1, srca2, desta;
uint32_t i;
uint8_t c = 0;
HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
__func__, l, dest, src);
- for (i = 0; i <= l; i++) {
- uint8_t x = cpu_ldub_data_ra(env, src + i, ra);
- x &= cpu_ldub_data_ra(env, dest + i, ra);
+ /* NC always processes one more byte than specified - maximum is 256 */
+ l++;
+
+ srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
+ desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ for (i = 0; i < l; i++) {
+ const uint8_t x = access_get_byte(env, &srca1, i, ra) &
+ access_get_byte(env, &srca2, i, ra);
+
c |= x;
- cpu_stb_data_ra(env, dest + i, x, ra);
+ access_set_byte(env, &desta, i, x, ra);
}
return c != 0;
}
uint64_t src, uintptr_t ra)
{
const int mmu_idx = cpu_mmu_index(env, false);
- S390Access desta;
+ S390Access srca1, srca2, desta;
uint32_t i;
uint8_t c = 0;
HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
__func__, l, dest, src);
- desta = access_prepare(env, dest, l + 1, MMU_DATA_STORE, mmu_idx, ra);
+ /* XC always processes one more byte than specified - maximum is 256 */
+ l++;
+
+ srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
+ desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
/* xor with itself is the same as memset(0) */
if (src == dest) {
return 0;
}
- for (i = 0; i <= l; i++) {
- uint8_t x = cpu_ldub_data_ra(env, src + i, ra);
- x ^= cpu_ldub_data_ra(env, dest + i, ra);
+ for (i = 0; i < l; i++) {
+ const uint8_t x = access_get_byte(env, &srca1, i, ra) ^
+ access_get_byte(env, &srca2, i, ra);
+
c |= x;
- cpu_stb_data_ra(env, dest + i, x, ra);
+ access_set_byte(env, &desta, i, x, ra);
}
return c != 0;
}
static uint32_t do_helper_oc(CPUS390XState *env, uint32_t l, uint64_t dest,
uint64_t src, uintptr_t ra)
{
+ const int mmu_idx = cpu_mmu_index(env, false);
+ S390Access srca1, srca2, desta;
uint32_t i;
uint8_t c = 0;
HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
__func__, l, dest, src);
- for (i = 0; i <= l; i++) {
- uint8_t x = cpu_ldub_data_ra(env, src + i, ra);
- x |= cpu_ldub_data_ra(env, dest + i, ra);
+ /* OC always processes one more byte than specified - maximum is 256 */
+ l++;
+
+ srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
+ desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ for (i = 0; i < l; i++) {
+ const uint8_t x = access_get_byte(env, &srca1, i, ra) |
+ access_get_byte(env, &srca2, i, ra);
+
c |= x;
- cpu_stb_data_ra(env, dest + i, x, ra);
+ access_set_byte(env, &desta, i, x, ra);
}
return c != 0;
}
uint64_t src, uintptr_t ra)
{
const int mmu_idx = cpu_mmu_index(env, false);
- S390Access desta;
+ S390Access srca, desta;
uint32_t i;
HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
/* MVC always copies one more byte than specified - maximum is 256 */
l++;
+ srca = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
/*
* behave like memmove().
*/
if (dest == src + 1) {
- access_memset(env, &desta, cpu_ldub_data_ra(env, src, ra), ra);
+ access_memset(env, &desta, access_get_byte(env, &srca, 0, ra), ra);
} else if (!is_destructive_overlap(env, dest, src, l)) {
- fast_memmove(env, dest, src, l, ra);
+ access_memmove(env, &desta, &srca, ra);
} else {
for (i = 0; i < l; i++) {
- uint8_t x = cpu_ldub_data_ra(env, src + i, ra);
- cpu_stb_data_ra(env, dest + i, x, ra);
+ uint8_t byte = access_get_byte(env, &srca, i, ra);
+
+ access_set_byte(env, &desta, i, byte, ra);
}
}
/* move inverse */
void HELPER(mvcin)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
{
+ const int mmu_idx = cpu_mmu_index(env, false);
+ S390Access srca, desta;
uintptr_t ra = GETPC();
int i;
- for (i = 0; i <= l; i++) {
- uint8_t v = cpu_ldub_data_ra(env, src - i, ra);
- cpu_stb_data_ra(env, dest + i, v, ra);
+ /* MVCIN always copies one more byte than specified - maximum is 256 */
+ l++;
+
+ src = wrap_address(env, src - l + 1);
+ srca = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ for (i = 0; i < l; i++) {
+ const uint8_t x = access_get_byte(env, &srca, l - i - 1, ra);
+
+ access_set_byte(env, &desta, i, x, ra);
}
}
/* move numerics */
void HELPER(mvn)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
{
+ const int mmu_idx = cpu_mmu_index(env, false);
+ S390Access srca1, srca2, desta;
uintptr_t ra = GETPC();
int i;
- for (i = 0; i <= l; i++) {
- uint8_t v = cpu_ldub_data_ra(env, dest + i, ra) & 0xf0;
- v |= cpu_ldub_data_ra(env, src + i, ra) & 0x0f;
- cpu_stb_data_ra(env, dest + i, v, ra);
+ /* MVN always copies one more byte than specified - maximum is 256 */
+ l++;
+
+ srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
+ desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ for (i = 0; i < l; i++) {
+ const uint8_t x = (access_get_byte(env, &srca1, i, ra) & 0x0f) |
+ (access_get_byte(env, &srca2, i, ra) & 0xf0);
+
+ access_set_byte(env, &desta, i, x, ra);
}
}
/* move with offset */
void HELPER(mvo)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
{
+ const int mmu_idx = cpu_mmu_index(env, false);
+ /* MVO always processes one more byte than specified - maximum is 16 */
+ const int len_dest = (l >> 4) + 1;
+ const int len_src = (l & 0xf) + 1;
uintptr_t ra = GETPC();
- int len_dest = l >> 4;
- int len_src = l & 0xf;
uint8_t byte_dest, byte_src;
- int i;
+ S390Access srca, desta;
+ int i, j;
- src += len_src;
- dest += len_dest;
+ srca = access_prepare(env, src, len_src, MMU_DATA_LOAD, mmu_idx, ra);
+ desta = access_prepare(env, dest, len_dest, MMU_DATA_STORE, mmu_idx, ra);
/* Handle rightmost byte */
- byte_src = cpu_ldub_data_ra(env, src, ra);
- byte_dest = cpu_ldub_data_ra(env, dest, ra);
+ byte_dest = cpu_ldub_data_ra(env, dest + len_dest - 1, ra);
+ byte_src = access_get_byte(env, &srca, len_src - 1, ra);
byte_dest = (byte_dest & 0x0f) | (byte_src << 4);
- cpu_stb_data_ra(env, dest, byte_dest, ra);
+ access_set_byte(env, &desta, len_dest - 1, byte_dest, ra);
/* Process remaining bytes from right to left */
- for (i = 1; i <= len_dest; i++) {
+ for (i = len_dest - 2, j = len_src - 2; i >= 0; i--, j--) {
byte_dest = byte_src >> 4;
- if (len_src - i >= 0) {
- byte_src = cpu_ldub_data_ra(env, src - i, ra);
+ if (j >= 0) {
+ byte_src = access_get_byte(env, &srca, j, ra);
} else {
byte_src = 0;
}
byte_dest |= byte_src << 4;
- cpu_stb_data_ra(env, dest - i, byte_dest, ra);
+ access_set_byte(env, &desta, i, byte_dest, ra);
}
}
/* move zones */
void HELPER(mvz)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
{
+ const int mmu_idx = cpu_mmu_index(env, false);
+ S390Access srca1, srca2, desta;
uintptr_t ra = GETPC();
int i;
- for (i = 0; i <= l; i++) {
- uint8_t b = cpu_ldub_data_ra(env, dest + i, ra) & 0x0f;
- b |= cpu_ldub_data_ra(env, src + i, ra) & 0xf0;
- cpu_stb_data_ra(env, dest + i, b, ra);
+ /* MVZ always copies one more byte than specified - maximum is 256 */
+ l++;
+
+ srca1 = access_prepare(env, src, l, MMU_DATA_LOAD, mmu_idx, ra);
+ srca2 = access_prepare(env, dest, l, MMU_DATA_LOAD, mmu_idx, ra);
+ desta = access_prepare(env, dest, l, MMU_DATA_STORE, mmu_idx, ra);
+ for (i = 0; i < l; i++) {
+ const uint8_t x = (access_get_byte(env, &srca1, i, ra) & 0xf0) |
+ (access_get_byte(env, &srca2, i, ra) & 0x0f);
+
+ access_set_byte(env, &desta, i, x, ra);
}
}
/* Bits 32-55 must contain all 0. */
if (env->regs[0] & 0xffffff00u) {
- s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
str = get_address(env, r2);
/* Bits 32-47 of R0 must be zero. */
if (env->regs[0] & 0xffff0000u) {
- s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
str = get_address(env, r2);
/* move page */
uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
{
+ const int mmu_idx = cpu_mmu_index(env, false);
const bool f = extract64(r0, 11, 1);
const bool s = extract64(r0, 10, 1);
+ uintptr_t ra = GETPC();
+ S390Access srca, desta;
if ((f && s) || extract64(r0, 12, 4)) {
- s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, GETPC());
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
}
r1 = wrap_address(env, r1 & TARGET_PAGE_MASK);
* - CC-option with surpression of page-translation exceptions
* - Store r1/r2 register identifiers at real location 162
*/
- fast_memmove(env, r1, r2, TARGET_PAGE_SIZE, GETPC());
+ srca = access_prepare(env, r2, TARGET_PAGE_SIZE, MMU_DATA_LOAD, mmu_idx,
+ ra);
+ desta = access_prepare(env, r1, TARGET_PAGE_SIZE, MMU_DATA_STORE, mmu_idx,
+ ra);
+ access_memmove(env, &desta, &srca, ra);
return 0; /* data moved */
}
/* string copy */
uint32_t HELPER(mvst)(CPUS390XState *env, uint32_t r1, uint32_t r2)
{
+ const int mmu_idx = cpu_mmu_index(env, false);
const uint64_t d = get_address(env, r1);
const uint64_t s = get_address(env, r2);
const uint8_t c = env->regs[0];
+ const int len = MIN(-(d | TARGET_PAGE_MASK), -(s | TARGET_PAGE_MASK));
+ S390Access srca, desta;
uintptr_t ra = GETPC();
- uint32_t len;
+ int i;
if (env->regs[0] & 0xffffff00ull) {
- s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, ra);
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
- /* Lest we fail to service interrupts in a timely manner, limit the
- amount of work we're willing to do. For now, let's cap at 8k. */
- for (len = 0; len < 0x2000; ++len) {
- uint8_t v = cpu_ldub_data_ra(env, s + len, ra);
- cpu_stb_data_ra(env, d + len, v, ra);
+ /*
+ * Our access should not exceed single pages, as we must not report access
+ * exceptions exceeding the actually copied range (which we don't know at
+ * this point). We might over-indicate watchpoints within the pages
+ * (if we ever care, we have to limit processing to a single byte).
+ */
+ srca = access_prepare(env, s, len, MMU_DATA_LOAD, mmu_idx, ra);
+ desta = access_prepare(env, d, len, MMU_DATA_STORE, mmu_idx, ra);
+ for (i = 0; i < len; i++) {
+ const uint8_t v = access_get_byte(env, &srca, i, ra);
+
+ access_set_byte(env, &desta, i, v, ra);
if (v == c) {
- set_address_zero(env, r1, d + len);
+ set_address_zero(env, r1, d + i);
return 1;
}
}
int i;
if (a2 & 0x3) {
- /* we either came here by lam or lamy, which have different lengths */
- s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, ra);
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
for (i = r1;; i = (i + 1) % 16) {
int i;
if (a2 & 0x3) {
- s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
for (i = r1;; i = (i + 1) % 16) {
{
const int mmu_idx = cpu_mmu_index(env, false);
int len = MIN(*destlen, -(*dest | TARGET_PAGE_MASK));
- S390Access desta;
+ S390Access srca, desta;
int i, cc;
if (*destlen == *srclen) {
len = MIN(MIN(*srclen, -(*src | TARGET_PAGE_MASK)), len);
*destlen -= len;
*srclen -= len;
- fast_memmove(env, *dest, *src, len, ra);
+ srca = access_prepare(env, *src, len, MMU_DATA_LOAD, mmu_idx, ra);
+ desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
+ access_memmove(env, &desta, &srca, ra);
*src = wrap_address(env, *src + len);
*dest = wrap_address(env, *dest + len);
} else if (wordsize == 1) {
access_memset(env, &desta, pad, ra);
*dest = wrap_address(env, *dest + len);
} else {
+ desta = access_prepare(env, *dest, len, MMU_DATA_STORE, mmu_idx, ra);
+
/* The remaining length selects the padding byte. */
for (i = 0; i < len; (*destlen)--, i++) {
if (*destlen & 1) {
- cpu_stb_data_ra(env, *dest, pad, ra);
+ access_set_byte(env, &desta, i, pad, ra);
} else {
- cpu_stb_data_ra(env, *dest, pad >> 8, ra);
+ access_set_byte(env, &desta, i, pad >> 8, ra);
}
- *dest = wrap_address(env, *dest + 1);
}
+ *dest = wrap_address(env, *dest + len);
}
return *destlen ? 3 : cc;
uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
uint64_t src = get_address(env, r2);
uint8_t pad = env->regs[r2 + 1] >> 24;
+ CPUState *cs = env_cpu(env);
+ S390Access srca, desta;
uint32_t cc, cur_len;
- S390Access desta;
if (is_destructive_overlap(env, dest, src, MIN(srclen, destlen))) {
cc = 3;
} else {
cur_len = MIN(MIN(srclen, -(src | TARGET_PAGE_MASK)), cur_len);
- fast_memmove(env, dest, src, cur_len, ra);
+ srca = access_prepare(env, src, cur_len, MMU_DATA_LOAD, mmu_idx,
+ ra);
+ desta = access_prepare(env, dest, cur_len, MMU_DATA_STORE, mmu_idx,
+ ra);
+ access_memmove(env, &desta, &srca, ra);
src = wrap_address(env, src + cur_len);
srclen -= cur_len;
env->regs[r2 + 1] = deposit64(env->regs[r2 + 1], 0, 24, srclen);
env->regs[r1 + 1] = deposit64(env->regs[r1 + 1], 0, 24, destlen);
set_address_zero(env, r1, dest);
- /* TODO: Deliver interrupts. */
+ /*
+ * MVCL is interruptible. Return to the main loop if requested after
+ * writing back all state to registers. If no interrupt will get
+ * injected, we'll end up back in this handler and continue processing
+ * the remaining parts.
+ */
+ if (destlen && unlikely(cpu_loop_exit_requested(cs))) {
+ cpu_loop_exit_restore(cs, ra);
+ }
}
return cc;
}
if (parallel) {
#ifdef CONFIG_USER_ONLY
uint32_t *haddr = g2h(a1);
- ov = atomic_cmpxchg__nocheck(haddr, cv, nv);
+ ov = qatomic_cmpxchg__nocheck(haddr, cv, nv);
#else
TCGMemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx);
ov = helper_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi, ra);
#ifdef CONFIG_ATOMIC64
# ifdef CONFIG_USER_ONLY
uint64_t *haddr = g2h(a1);
- ov = atomic_cmpxchg__nocheck(haddr, cv, nv);
+ ov = qatomic_cmpxchg__nocheck(haddr, cv, nv);
# else
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx);
ov = helper_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra);
return cc;
spec_exception:
- s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
- g_assert_not_reached();
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
uint32_t HELPER(csst)(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2)
uint32_t i;
if (src & 0x7) {
- s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
for (i = r1;; i = (i + 1) % 16) {
uint32_t i;
if (src & 0x3) {
- s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
for (i = r1;; i = (i + 1) % 16) {
uint32_t i;
if (dest & 0x7) {
- s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
for (i = r1;; i = (i + 1) % 16) {
uint32_t i;
if (dest & 0x3) {
- s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
for (i = r1;; i = (i + 1) % 16) {
real_addr = wrap_address(env, real_addr) & TARGET_PAGE_MASK;
for (i = 0; i < TARGET_PAGE_SIZE; i += 8) {
- cpu_stq_real_ra(env, real_addr + i, 0, ra);
+ cpu_stq_mmuidx_ra(env, real_addr + i, 0, MMU_REAL_IDX, ra);
}
return 0;
/* insert storage key extended */
uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2)
{
+ MachineState *ms = MACHINE(qdev_get_machine());
static S390SKeysState *ss;
static S390SKeysClass *skeyclass;
uint64_t addr = wrap_address(env, r2);
uint8_t key;
- if (addr > ram_size) {
+ if (addr > ms->ram_size) {
return 0;
}
/* set storage key extended */
void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2)
{
+ MachineState *ms = MACHINE(qdev_get_machine());
static S390SKeysState *ss;
static S390SKeysClass *skeyclass;
uint64_t addr = wrap_address(env, r2);
uint8_t key;
- if (addr > ram_size) {
+ if (addr > ms->ram_size) {
return;
}
/* reset reference bit extended */
uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2)
{
+ MachineState *ms = MACHINE(qdev_get_machine());
static S390SKeysState *ss;
static S390SKeysClass *skeyclass;
uint8_t re, key;
- if (r2 > ram_size) {
+ if (r2 > ms->ram_size) {
return 0;
}
uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
{
const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC;
+ S390Access srca, desta;
uintptr_t ra = GETPC();
- int cc = 0, i;
+ int cc = 0;
HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
__func__, l, a1, a2);
if (!(env->psw.mask & PSW_MASK_DAT) || !(env->cregs[0] & CR0_SECONDARY) ||
psw_as == AS_HOME || psw_as == AS_ACCREG) {
- s390_program_interrupt(env, PGM_SPECIAL_OP, ILEN_AUTO, ra);
+ s390_program_interrupt(env, PGM_SPECIAL_OP, ra);
}
l = wrap_length32(env, l);
return cc;
}
- /* XXX replace w/ memcpy */
- for (i = 0; i < l; i++) {
- uint8_t x = cpu_ldub_primary_ra(env, a2 + i, ra);
- cpu_stb_secondary_ra(env, a1 + i, x, ra);
- }
-
+ /* TODO: Access key handling */
+ srca = access_prepare(env, a2, l, MMU_DATA_LOAD, MMU_PRIMARY_IDX, ra);
+ desta = access_prepare(env, a1, l, MMU_DATA_STORE, MMU_SECONDARY_IDX, ra);
+ access_memmove(env, &desta, &srca, ra);
return cc;
}
uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
{
const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC;
+ S390Access srca, desta;
uintptr_t ra = GETPC();
- int cc = 0, i;
+ int cc = 0;
HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
__func__, l, a1, a2);
if (!(env->psw.mask & PSW_MASK_DAT) || !(env->cregs[0] & CR0_SECONDARY) ||
psw_as == AS_HOME || psw_as == AS_ACCREG) {
- s390_program_interrupt(env, PGM_SPECIAL_OP, ILEN_AUTO, ra);
+ s390_program_interrupt(env, PGM_SPECIAL_OP, ra);
}
l = wrap_length32(env, l);
return cc;
}
- /* XXX replace w/ memcpy */
- for (i = 0; i < l; i++) {
- uint8_t x = cpu_ldub_secondary_ra(env, a2 + i, ra);
- cpu_stb_primary_ra(env, a1 + i, x, ra);
- }
-
+ /* TODO: Access key handling */
+ srca = access_prepare(env, a2, l, MMU_DATA_LOAD, MMU_SECONDARY_IDX, ra);
+ desta = access_prepare(env, a1, l, MMU_DATA_STORE, MMU_PRIMARY_IDX, ra);
+ access_memmove(env, &desta, &srca, ra);
return cc;
}
uint16_t entries, i, index = 0;
if (r2 & 0xff000) {
- s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
+ tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra);
}
if (!(r2 & 0x800)) {
for (i = 0; i < entries; i++) {
/* addresses are not wrapped in 24/31bit mode but table index is */
raddr = table + ((index + i) & 0x7ff) * sizeof(entry);
- entry = cpu_ldq_real_ra(env, raddr, ra);
- if (!(entry & REGION_ENTRY_INV)) {
+ entry = cpu_ldq_mmuidx_ra(env, raddr, MMU_REAL_IDX, ra);
+ if (!(entry & REGION_ENTRY_I)) {
/* we are allowed to not store if already invalid */
- entry |= REGION_ENTRY_INV;
- cpu_stq_real_ra(env, raddr, entry, ra);
+ entry |= REGION_ENTRY_I;
+ cpu_stq_mmuidx_ra(env, raddr, entry, MMU_REAL_IDX, ra);
}
}
}
/* Compute the page table entry address */
pte_addr = (pto & SEGMENT_ENTRY_ORIGIN);
- pte_addr += (vaddr & VADDR_PX) >> 9;
+ pte_addr += VADDR_PAGE_TX(vaddr) * 8;
/* Mark the page table entry as invalid */
- pte = cpu_ldq_real_ra(env, pte_addr, ra);
- pte |= PAGE_INVALID;
- cpu_stq_real_ra(env, pte_addr, pte, ra);
+ pte = cpu_ldq_mmuidx_ra(env, pte_addr, MMU_REAL_IDX, ra);
+ pte |= PAGE_ENTRY_I;
+ cpu_stq_mmuidx_ra(env, pte_addr, pte, MMU_REAL_IDX, ra);
/* XXX we exploit the fact that Linux passes the exact virtual
address here - it's not obliged to! */
if (m4 & 1) {
- if (vaddr & ~VADDR_PX) {
+ if (vaddr & ~VADDR_PAGE_TX_MASK) {
tlb_flush_page(cs, page);
/* XXX 31-bit hack */
tlb_flush_page(cs, page ^ 0x80000000);
tlb_flush(cs);
}
} else {
- if (vaddr & ~VADDR_PX) {
+ if (vaddr & ~VADDR_PAGE_TX_MASK) {
tlb_flush_page_all_cpus_synced(cs, page);
/* XXX 31-bit hack */
tlb_flush_page_all_cpus_synced(cs, page ^ 0x80000000);
tlb_flush_all_cpus_synced(env_cpu(env));
}
-/* load using real address */
-uint64_t HELPER(lura)(CPUS390XState *env, uint64_t addr)
-{
- return cpu_ldl_real_ra(env, wrap_address(env, addr), GETPC());
-}
-
-uint64_t HELPER(lurag)(CPUS390XState *env, uint64_t addr)
-{
- return cpu_ldq_real_ra(env, wrap_address(env, addr), GETPC());
-}
-
-/* store using real address */
-void HELPER(stura)(CPUS390XState *env, uint64_t addr, uint64_t v1)
-{
- cpu_stl_real_ra(env, wrap_address(env, addr), (uint32_t)v1, GETPC());
-
- if ((env->psw.mask & PSW_MASK_PER) &&
- (env->cregs[9] & PER_CR9_EVENT_STORE) &&
- (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
- /* PSW is saved just before calling the helper. */
- env->per_address = env->psw.addr;
- env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
- }
-}
-
-void HELPER(sturg)(CPUS390XState *env, uint64_t addr, uint64_t v1)
-{
- cpu_stq_real_ra(env, wrap_address(env, addr), v1, GETPC());
-
- if ((env->psw.mask & PSW_MASK_PER) &&
- (env->cregs[9] & PER_CR9_EVENT_STORE) &&
- (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
- /* PSW is saved just before calling the helper. */
- env->per_address = env->psw.addr;
- env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
- }
-}
-
/* load real address */
uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
{
- CPUState *cs = env_cpu(env);
- uint32_t cc = 0;
uint64_t asc = env->psw.mask & PSW_MASK_ASC;
- uint64_t ret;
- int old_exc, flags;
+ uint64_t ret, tec;
+ int flags, exc, cc;
/* XXX incomplete - has more corner cases */
if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
- s390_program_interrupt(env, PGM_SPECIAL_OP, 2, GETPC());
+ tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, GETPC());
}
- old_exc = cs->exception_index;
- if (mmu_translate(env, addr, 0, asc, &ret, &flags, true)) {
+ exc = mmu_translate(env, addr, 0, asc, &ret, &flags, &tec);
+ if (exc) {
cc = 3;
- }
- if (cs->exception_index == EXCP_PGM) {
- ret = env->int_pgm_code | 0x80000000;
+ ret = exc | 0x80000000;
} else {
+ cc = 0;
ret |= addr & ~TARGET_PAGE_MASK;
}
- cs->exception_index = old_exc;
env->cc_op = cc;
return ret;
uint32_t d1 = extract64(insn, 32, 12);
uint32_t b2 = extract64(insn, 28, 4);
uint32_t d2 = extract64(insn, 16, 12);
- uint64_t a1 = wrap_address(env, env->regs[b1] + d1);
- uint64_t a2 = wrap_address(env, env->regs[b2] + d2);
+ uint64_t a1 = wrap_address(env, (b1 ? env->regs[b1] : 0) + d1);
+ uint64_t a2 = wrap_address(env, (b2 ? env->regs[b2] : 0) + d2);
env->cc_op = helper(env, l, a1, a2, 0);
env->psw.addr += ilen;
__func__, dest, src, len);
if (!(env->psw.mask & PSW_MASK_DAT)) {
- s390_program_interrupt(env, PGM_SPECIAL_OP, 6, ra);
+ tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra);
}
/* OAC (operand access control) for the first operand -> dest */
}
if (dest_a && dest_as == AS_HOME && (env->psw.mask & PSW_MASK_PSTATE)) {
- s390_program_interrupt(env, PGM_SPECIAL_OP, 6, ra);
+ tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra);
}
if (!(env->cregs[0] & CR0_SECONDARY) &&
(dest_as == AS_SECONDARY || src_as == AS_SECONDARY)) {
- s390_program_interrupt(env, PGM_SPECIAL_OP, 6, ra);
+ tcg_s390_program_interrupt(env, PGM_SPECIAL_OP, ra);
}
if (!psw_key_valid(env, dest_key) || !psw_key_valid(env, src_key)) {
- s390_program_interrupt(env, PGM_PRIVILEGED, 6, ra);
+ tcg_s390_program_interrupt(env, PGM_PRIVILEGED, ra);
}
len = wrap_length32(env, len);
(env->psw.mask & PSW_MASK_PSTATE)) {
qemu_log_mask(LOG_UNIMP, "%s: AR-mode and PSTATE support missing\n",
__func__);
- s390_program_interrupt(env, PGM_ADDRESSING, 6, ra);
+ tcg_s390_program_interrupt(env, PGM_ADDRESSING, ra);
}
- /* FIXME: a) LAP
- * b) Access using correct keys
- * c) AR-mode
- */
-#ifdef CONFIG_USER_ONLY
- /* psw keys are never valid in user mode, we will never reach this */
- g_assert_not_reached();
-#else
- fast_memmove_as(env, dest, src, len, dest_as, src_as, ra);
-#endif
+ /* FIXME: Access using correct keys and AR-mode */
+ if (len) {
+ S390Access srca = access_prepare(env, src, len, MMU_DATA_LOAD,
+ mmu_idx_from_as(src_as), ra);
+ S390Access desta = access_prepare(env, dest, len, MMU_DATA_STORE,
+ mmu_idx_from_as(dest_as), ra);
+
+ access_memmove(env, &desta, &srca, ra);
+ }
return cc;
}