return 0;
}
- if (s390_cpu_virt_mem_read(cpu, env->regs[r2], buffer, sizeof(*reqh))) {
+ if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, sizeof(*reqh))) {
return 0;
}
reqh = (ClpReqHdr *)buffer;
return 0;
}
- if (s390_cpu_virt_mem_read(cpu, env->regs[r2], buffer,
+ if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
req_len + sizeof(*resh))) {
return 0;
}
return 0;
}
- if (s390_cpu_virt_mem_read(cpu, env->regs[r2], buffer,
+ if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
req_len + res_len)) {
return 0;
}
}
out:
- if (s390_cpu_virt_mem_write(cpu, env->regs[r2], buffer,
+ if (s390_cpu_virt_mem_write(cpu, env->regs[r2], r2, buffer,
req_len + res_len)) {
return 0;
}
return 0;
}
-int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr)
+int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
+ uint8_t ar)
{
CPUS390XState *env = &cpu->env;
S390PCIBusDevice *pbdev;
return 0;
}
- if (s390_cpu_virt_mem_read(cpu, gaddr, buffer, len)) {
+ if (s390_cpu_virt_mem_read(cpu, gaddr, ar, buffer, len)) {
return 0;
}
pbdev->g_iota = 0;
}
-int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba)
+int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
{
CPUS390XState *env = &cpu->env;
uint8_t oc;
return 0;
}
- if (s390_cpu_virt_mem_read(cpu, fiba, (uint8_t *)&fib, sizeof(fib))) {
+ if (s390_cpu_virt_mem_read(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
return 0;
}
return 0;
}
-int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba)
+int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
{
CPUS390XState *env = &cpu->env;
uint32_t fh;
fib.fc |= 0x10;
}
- if (s390_cpu_virt_mem_write(cpu, fiba, (uint8_t *)&fib, sizeof(fib))) {
+ if (s390_cpu_virt_mem_write(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
return 0;
}
int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2);
int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2);
int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2);
-int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr);
-int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba);
-int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba);
+int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
+ uint8_t ar);
+int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar);
+int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar);
#endif
#ifndef CONFIG_USER_ONLY
void do_restart_interrupt(CPUS390XState *env);
-static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb)
+static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb,
+ uint8_t *ar)
{
hwaddr addr = 0;
uint8_t reg;
addr = env->regs[reg];
}
addr += (ipb >> 16) & 0xfff;
+ if (ar) {
+ *ar = reg;
+ }
return addr;
}
void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq);
int kvm_s390_inject_flic(struct kvm_s390_irq *irq);
void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code);
-int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, void *hostbuf, int len,
- bool is_write);
+int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf,
+ int len, bool is_write);
int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_clock);
int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_clock);
#else
{
return -ENOSYS;
}
-static inline int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, void *hostbuf,
- int len, bool is_write)
+static inline int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar,
+ void *hostbuf, int len, bool is_write)
{
return -ENOSYS;
}
uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
uint64_t vr);
-int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, void *hostbuf, int len,
- bool is_write);
+int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
+ int len, bool is_write);
-#define s390_cpu_virt_mem_read(cpu, laddr, dest, len) \
- s390_cpu_virt_mem_rw(cpu, laddr, dest, len, false)
-#define s390_cpu_virt_mem_write(cpu, laddr, dest, len) \
- s390_cpu_virt_mem_rw(cpu, laddr, dest, len, true)
-#define s390_cpu_virt_mem_check_write(cpu, laddr, len) \
- s390_cpu_virt_mem_rw(cpu, laddr, NULL, len, true)
+#define s390_cpu_virt_mem_read(cpu, laddr, ar, dest, len) \
+ s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, false)
+#define s390_cpu_virt_mem_write(cpu, laddr, ar, dest, len) \
+ s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, true)
+#define s390_cpu_virt_mem_check_write(cpu, laddr, ar, len) \
+ s390_cpu_virt_mem_rw(cpu, laddr, ar, NULL, len, true)
/* The value of the TOD clock for 1.1.1970. */
#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
int ret = -ENODEV;
int cc;
CPUS390XState *env = &cpu->env;
+ uint8_t ar;
- addr = decode_basedisp_s(env, ipb);
+ addr = decode_basedisp_s(env, ipb, &ar);
if (addr & 3) {
program_interrupt(env, PGM_SPECIFICATION, 2);
return;
}
- if (s390_cpu_virt_mem_read(cpu, addr, &schib, sizeof(schib))) {
+ if (s390_cpu_virt_mem_read(cpu, addr, ar, &schib, sizeof(schib))) {
return;
}
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) ||
int ret = -ENODEV;
int cc;
CPUS390XState *env = &cpu->env;
+ uint8_t ar;
- addr = decode_basedisp_s(env, ipb);
+ addr = decode_basedisp_s(env, ipb, &ar);
if (addr & 3) {
program_interrupt(env, PGM_SPECIFICATION, 2);
return;
}
- if (s390_cpu_virt_mem_read(cpu, addr, &orig_orb, sizeof(orb))) {
+ if (s390_cpu_virt_mem_read(cpu, addr, ar, &orig_orb, sizeof(orb))) {
return;
}
copy_orb_from_guest(&orb, &orig_orb);
uint64_t addr;
int cc;
CPUS390XState *env = &cpu->env;
+ uint8_t ar;
- addr = decode_basedisp_s(env, ipb);
+ addr = decode_basedisp_s(env, ipb, &ar);
if (addr & 3) {
program_interrupt(env, PGM_SPECIFICATION, 2);
return;
cc = css_do_stcrw(&crw);
/* 0 - crw stored, 1 - zeroes stored */
- if (s390_cpu_virt_mem_write(cpu, addr, &crw, sizeof(crw)) == 0) {
+ if (s390_cpu_virt_mem_write(cpu, addr, ar, &crw, sizeof(crw)) == 0) {
setcc(cpu, cc);
} else if (cc == 0) {
/* Write failed: requeue CRW since STCRW is a suppressing instruction */
int cc;
SCHIB schib;
CPUS390XState *env = &cpu->env;
+ uint8_t ar;
- addr = decode_basedisp_s(env, ipb);
+ addr = decode_basedisp_s(env, ipb, &ar);
if (addr & 3) {
program_interrupt(env, PGM_SPECIFICATION, 2);
return;
* we check whether the memory area is writeable (injecting the
* access execption if it is not) first.
*/
- if (!s390_cpu_virt_mem_check_write(cpu, addr, sizeof(schib))) {
+ if (!s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib))) {
program_interrupt(env, PGM_OPERAND, 2);
}
return;
}
}
if (cc != 3) {
- if (s390_cpu_virt_mem_write(cpu, addr, &schib, sizeof(schib)) != 0) {
+ if (s390_cpu_virt_mem_write(cpu, addr, ar, &schib,
+ sizeof(schib)) != 0) {
return;
}
} else {
/* Access exceptions have a higher priority than cc3 */
- if (s390_cpu_virt_mem_check_write(cpu, addr, sizeof(schib)) != 0) {
+ if (s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib)) != 0) {
return;
}
}
IRB irb;
uint64_t addr;
int cc, irb_len;
+ uint8_t ar;
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
program_interrupt(env, PGM_OPERAND, 2);
return -EIO;
}
trace_ioinst_sch_id("tsch", cssid, ssid, schid);
- addr = decode_basedisp_s(env, ipb);
+ addr = decode_basedisp_s(env, ipb, &ar);
if (addr & 3) {
program_interrupt(env, PGM_SPECIFICATION, 2);
return -EIO;
}
/* 0 - status pending, 1 - not status pending, 3 - not operational */
if (cc != 3) {
- if (s390_cpu_virt_mem_write(cpu, addr, &irb, irb_len) != 0) {
+ if (s390_cpu_virt_mem_write(cpu, addr, ar, &irb, irb_len) != 0) {
return -EFAULT;
}
css_do_tsch_update_subch(sch);
} else {
irb_len = sizeof(irb) - sizeof(irb.emw);
/* Access exceptions have a higher priority than cc3 */
- if (s390_cpu_virt_mem_check_write(cpu, addr, irb_len) != 0) {
+ if (s390_cpu_virt_mem_check_write(cpu, addr, ar, irb_len) != 0) {
return -EFAULT;
}
}
* present CHSC sub-handlers ... if we ever need more, we should take
* care of req->len here first.
*/
- if (s390_cpu_virt_mem_read(cpu, addr, buf, sizeof(ChscReq))) {
+ if (s390_cpu_virt_mem_read(cpu, addr, reg, buf, sizeof(ChscReq))) {
return;
}
req = (ChscReq *)buf;
break;
}
- if (!s390_cpu_virt_mem_write(cpu, addr + len, res, be16_to_cpu(res->len))) {
+ if (!s390_cpu_virt_mem_write(cpu, addr + len, reg, res,
+ be16_to_cpu(res->len))) {
setcc(cpu, 0); /* Command execution complete */
}
}
IOIntCode int_code;
hwaddr len;
int ret;
+ uint8_t ar;
trace_ioinst("tpi");
- addr = decode_basedisp_s(env, ipb);
+ addr = decode_basedisp_s(env, ipb, &ar);
if (addr & 3) {
program_interrupt(env, PGM_SPECIFICATION, 2);
return -EIO;
len = lowcore ? 8 /* two words */ : 12 /* three words */;
ret = css_do_tpi(&int_code, lowcore);
if (ret == 1) {
- s390_cpu_virt_mem_write(cpu, lowcore ? 184 : addr, &int_code, len);
+ s390_cpu_virt_mem_write(cpu, lowcore ? 184 : addr, ar, &int_code, len);
}
return ret;
}
/**
* kvm_s390_mem_op:
* @addr: the logical start address in guest memory
+ * @ar: the access register number
* @hostbuf: buffer in host memory. NULL = do only checks w/o copying
* @len: length that should be transfered
* @is_write: true = write, false = read
* Use KVM ioctl to read/write from/to guest memory. An access exception
* is injected into the vCPU in case of translation errors.
*/
-int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, void *hostbuf, int len,
- bool is_write)
+int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf,
+ int len, bool is_write)
{
struct kvm_s390_mem_op mem_op = {
.gaddr = addr,
.op = is_write ? KVM_S390_MEMOP_LOGICAL_WRITE
: KVM_S390_MEMOP_LOGICAL_READ,
.buf = (uint64_t)hostbuf,
+ .ar = ar,
};
int ret;
return rc;
}
-static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run)
+static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run,
+ uint8_t *ar)
{
CPUS390XState *env = &cpu->env;
uint32_t x2 = (run->s390_sieic.ipa & 0x000f);
if (disp2 & 0x80000) {
disp2 += 0xfff00000;
}
+ if (ar) {
+ *ar = base2;
+ }
return (base2 ? env->regs[base2] : 0) +
(x2 ? env->regs[x2] : 0) + (long)(int)disp2;
}
-static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run)
+static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run,
+ uint8_t *ar)
{
CPUS390XState *env = &cpu->env;
uint32_t base2 = run->s390_sieic.ipb >> 28;
if (disp2 & 0x80000) {
disp2 += 0xfff00000;
}
+ if (ar) {
+ *ar = base2;
+ }
return (base2 ? env->regs[base2] : 0) + (long)(int)disp2;
}
{
uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
uint64_t fiba;
+ uint8_t ar;
cpu_synchronize_state(CPU(cpu));
- fiba = get_base_disp_rxy(cpu, run);
+ fiba = get_base_disp_rxy(cpu, run, &ar);
- return stpcifc_service_call(cpu, r1, fiba);
+ return stpcifc_service_call(cpu, r1, fiba, ar);
}
static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run)
uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
uint8_t r3 = run->s390_sieic.ipa & 0x000f;
uint64_t gaddr;
+ uint8_t ar;
cpu_synchronize_state(CPU(cpu));
- gaddr = get_base_disp_rsy(cpu, run);
+ gaddr = get_base_disp_rsy(cpu, run, &ar);
- return pcistb_service_call(cpu, r1, r3, gaddr);
+ return pcistb_service_call(cpu, r1, r3, gaddr, ar);
}
static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
{
uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
uint64_t fiba;
+ uint8_t ar;
cpu_synchronize_state(CPU(cpu));
- fiba = get_base_disp_rxy(cpu, run);
+ fiba = get_base_disp_rxy(cpu, run, &ar);
- return mpcifc_service_call(cpu, r1, fiba);
+ return mpcifc_service_call(cpu, r1, fiba, ar);
}
static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
* For any diagnose call we support, bits 48-63 of the resulting
* address specify the function code; the remainder is ignored.
*/
- func_code = decode_basedisp_rs(&cpu->env, ipb) & DIAG_KVM_CODE_MASK;
+ func_code = decode_basedisp_rs(&cpu->env, ipb, NULL) & DIAG_KVM_CODE_MASK;
switch (func_code) {
case DIAG_IPL:
kvm_handle_diag_308(cpu, run);
cpu_synchronize_state(CPU(cpu));
/* get order code */
- order = decode_basedisp_rs(env, run->s390_sieic.ipb) & SIGP_ORDER_MASK;
+ order = decode_basedisp_rs(env, run->s390_sieic.ipb, NULL)
+ & SIGP_ORDER_MASK;
status_reg = &env->regs[r1];
param = (r1 % 2) ? env->regs[r1] : env->regs[r1 + 1];
return ret;
}
-static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr)
+static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr, uint8_t ar)
{
struct sysib_322 sysib;
int del;
- if (s390_cpu_virt_mem_read(cpu, addr, &sysib, sizeof(sysib))) {
+ if (s390_cpu_virt_mem_read(cpu, addr, ar, &sysib, sizeof(sysib))) {
return;
}
/* Shift the stack of Extended Names to prepare for our own data */
/* Insert UUID */
memcpy(sysib.vm[0].uuid, qemu_uuid, sizeof(sysib.vm[0].uuid));
- s390_cpu_virt_mem_write(cpu, addr, &sysib, sizeof(sysib));
+ s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, sizeof(sysib));
}
static int handle_stsi(S390CPU *cpu)
return 0;
}
/* Only sysib 3.2.2 needs post-handling for now. */
- insert_stsi_3_2_2(cpu, run->s390_stsi.addr);
+ insert_stsi_3_2_2(cpu, run->s390_stsi.addr, run->s390_stsi.ar);
return 0;
default:
return 0;
/**
* s390_cpu_virt_mem_rw:
* @laddr: the logical start address
+ * @ar: the access register number
* @hostbuf: buffer in host memory. NULL = do only checks w/o copying
* @len: length that should be transfered
* @is_write: true = write, false = read
* Copy from/to guest memory using logical addresses. Note that we inject a
* program interrupt in case there is an error while accessing the memory.
*/
-int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, void *hostbuf,
+int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
int len, bool is_write)
{
int currlen, nr_pages, i;
int ret;
if (kvm_enabled()) {
- ret = kvm_s390_mem_op(cpu, laddr, hostbuf, len, is_write);
+ ret = kvm_s390_mem_op(cpu, laddr, ar, hostbuf, len, is_write);
if (ret >= 0) {
return ret;
}