#include "qapi/error.h"
#include "qapi/visitor.h"
#include "hw/qdev.h"
+#include "qemu/error-report.h"
#include "qemu/bitops.h"
#include "exec/address-spaces.h"
#include "cpu.h"
uint32_t id;
uint8_t type;
uint8_t isc;
- QTAILQ_ENTRY(IoAdapter) sibling;
} IoAdapter;
typedef struct ChannelSubSys {
uint64_t chnmon_area;
CssImage *css[MAX_CSSID + 1];
uint8_t default_cssid;
- QTAILQ_HEAD(, IoAdapter) io_adapters;
+ IoAdapter *io_adapters[CSS_IO_ADAPTER_TYPE_NUMS][MAX_ISC + 1];
QTAILQ_HEAD(, IndAddr) indicator_addresses;
} ChannelSubSys;
.do_crw_mchk = true,
.crws_lost = false,
.chnmon_active = false,
- .io_adapters = QTAILQ_HEAD_INITIALIZER(channel_subsys.io_adapters),
.indicator_addresses =
QTAILQ_HEAD_INITIALIZER(channel_subsys.indicator_addresses),
};
int css_create_css_image(uint8_t cssid, bool default_image)
{
trace_css_new_image(cssid, default_image ? "(default)" : "");
- if (cssid > MAX_CSSID) {
+ /* 255 is reserved */
+ if (cssid == 255) {
return -EINVAL;
}
if (channel_subsys.css[cssid]) {
return 0;
}
-int css_register_io_adapter(uint8_t type, uint8_t isc, bool swap,
- bool maskable, uint32_t *id)
+uint32_t css_get_adapter_id(CssIoAdapterType type, uint8_t isc)
{
+ if (type >= CSS_IO_ADAPTER_TYPE_NUMS || isc > MAX_ISC ||
+ !channel_subsys.io_adapters[type][isc]) {
+ return -1;
+ }
+
+ return channel_subsys.io_adapters[type][isc]->id;
+}
+
+/**
+ * css_register_io_adapters: Register I/O adapters per ISC during init
+ *
+ * @swap: an indication if byte swap is needed.
+ * @maskable: an indication if the adapter is subject to the mask operation.
+ * @errp: location to store error information.
+ */
+void css_register_io_adapters(CssIoAdapterType type, bool swap, bool maskable,
+ Error **errp)
+{
+ uint32_t id;
+ int ret, isc;
IoAdapter *adapter;
- bool found = false;
- int ret;
S390FLICState *fs = s390_get_flic();
S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
- *id = 0;
- QTAILQ_FOREACH(adapter, &channel_subsys.io_adapters, sibling) {
- if ((adapter->type == type) && (adapter->isc == isc)) {
- *id = adapter->id;
- found = true;
- ret = 0;
+ /*
+ * Disallow multiple registrations for the same device type.
+ * Report an error if registering for an already registered type.
+ */
+ if (channel_subsys.io_adapters[type][0]) {
+ error_setg(errp, "Adapters for type %d already registered", type);
+ }
+
+ for (isc = 0; isc <= MAX_ISC; isc++) {
+ id = (type << 3) | isc;
+ ret = fsc->register_io_adapter(fs, id, isc, swap, maskable);
+ if (ret == 0) {
+ adapter = g_new0(IoAdapter, 1);
+ adapter->id = id;
+ adapter->isc = isc;
+ adapter->type = type;
+ channel_subsys.io_adapters[type][isc] = adapter;
+ } else {
+ error_setg_errno(errp, -ret, "Unexpected error %d when "
+ "registering adapter %d", ret, id);
break;
}
- if (adapter->id >= *id) {
- *id = adapter->id + 1;
- }
- }
- if (found) {
- goto out;
}
- adapter = g_new0(IoAdapter, 1);
- ret = fsc->register_io_adapter(fs, *id, isc, swap, maskable);
- if (ret == 0) {
- adapter->id = *id;
- adapter->isc = isc;
- adapter->type = type;
- QTAILQ_INSERT_TAIL(&channel_subsys.io_adapters, adapter, sibling);
- } else {
- g_free(adapter);
- fprintf(stderr, "Unexpected error %d when registering adapter %d\n",
- ret, *id);
+
+ /*
+ * No need to free registered adapters in kvm: kvm will clean up
+ * when the machine goes away.
+ */
+ if (ret) {
+ for (isc--; isc >= 0; isc--) {
+ g_free(channel_subsys.io_adapters[type][isc]);
+ channel_subsys.io_adapters[type][isc] = NULL;
+ }
}
-out:
- return ret;
+
}
static void css_clear_io_interrupt(uint16_t subchannel_id,
return css_do_build_subchannel_id(sch->cssid, sch->ssid);
}
-static void css_inject_io_interrupt(SubchDev *sch)
+void css_inject_io_interrupt(SubchDev *sch)
{
uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11;
ret.cda = be32_to_cpu(tmp1.cda);
} else {
cpu_physical_memory_read(addr, &tmp0, sizeof(tmp0));
- ret.cmd_code = tmp0.cmd_code;
- ret.flags = tmp0.flags;
- ret.count = be16_to_cpu(tmp0.count);
- ret.cda = be16_to_cpu(tmp0.cda1) | (tmp0.cda0 << 16);
- if ((ret.cmd_code & 0x0f) == CCW_CMD_TIC) {
- ret.cmd_code &= 0x0f;
+ if ((tmp0.cmd_code & 0x0f) == CCW_CMD_TIC) {
+ ret.cmd_code = CCW_CMD_TIC;
+ ret.flags = 0;
+ ret.count = 0;
+ } else {
+ ret.cmd_code = tmp0.cmd_code;
+ ret.flags = tmp0.flags;
+ ret.count = be16_to_cpu(tmp0.count);
}
+ ret.cda = be16_to_cpu(tmp0.cda1) | (tmp0.cda0 << 16);
}
return ret;
}
return ret;
}
-static void sch_handle_start_func(SubchDev *sch, ORB *orb)
+static void sch_handle_start_func_virtual(SubchDev *sch, ORB *orb)
{
PMCW *p = &sch->curr_status.pmcw;
path = 0x80;
if (!(s->ctrl & SCSW_ACTL_SUSP)) {
+ /* Start Function triggered via ssch, i.e. we have an ORB */
s->cstat = 0;
s->dstat = 0;
/* Look at the orb and try to execute the channel program. */
return;
}
sch->ccw_fmt_1 = !!(orb->ctrl0 & ORB_CTRL0_MASK_FMT);
+ s->flags |= (sch->ccw_fmt_1) ? SCSW_FLAGS_MASK_FMT : 0;
sch->ccw_no_data_cnt = 0;
suspend_allowed = !!(orb->ctrl0 & ORB_CTRL0_MASK_SPND);
} else {
+ /* Start Function resumed via rsch, i.e. we don't have an
+ * ORB */
s->ctrl &= ~(SCSW_ACTL_SUSP | SCSW_ACTL_RESUME_PEND);
/* The channel program had been suspended before. */
suspend_allowed = true;
s->dstat = SCSW_DSTAT_CHANNEL_END | SCSW_DSTAT_DEVICE_END;
s->cpa = sch->channel_prog + 8;
break;
+ case -EIO:
+ /* I/O errors, status depends on specific devices */
+ break;
case -ENOSYS:
/* unsupported command, generate unit check (command reject) */
s->ctrl &= ~SCSW_ACTL_START_PEND;
}
+static int sch_handle_start_func_passthrough(SubchDev *sch, ORB *orb)
+{
+
+ PMCW *p = &sch->curr_status.pmcw;
+ SCSW *s = &sch->curr_status.scsw;
+ int ret;
+
+ if (!(s->ctrl & SCSW_ACTL_SUSP)) {
+ assert(orb != NULL);
+ p->intparm = orb->intparm;
+ }
+
+ /*
+ * Only support prefetch enable mode.
+ * Only support 64bit addressing idal.
+ */
+ if (!(orb->ctrl0 & ORB_CTRL0_MASK_PFCH) ||
+ !(orb->ctrl0 & ORB_CTRL0_MASK_C64)) {
+ return -EINVAL;
+ }
+
+ ret = s390_ccw_cmd_request(orb, s, sch->driver_data);
+ switch (ret) {
+ /* Currently we don't update control block and just return the cc code. */
+ case 0:
+ break;
+ case -EBUSY:
+ break;
+ case -ENODEV:
+ break;
+ case -EACCES:
+ /* Let's reflect an inaccessible host device by cc 3. */
+ ret = -ENODEV;
+ break;
+ default:
+ /*
+ * All other return codes will trigger a program check,
+ * or set cc to 1.
+ */
+ break;
+ };
+
+ return ret;
+}
+
/*
* On real machines, this would run asynchronously to the main vcpus.
* We might want to make some parts of the ssch handling (interpreting
* read/writes) asynchronous later on if we start supporting more than
* our current very simple devices.
*/
-static void do_subchannel_work(SubchDev *sch, ORB *orb)
+int do_subchannel_work_virtual(SubchDev *sch, ORB *orb)
{
SCSW *s = &sch->curr_status.scsw;
} else if (s->ctrl & SCSW_FCTL_HALT_FUNC) {
sch_handle_halt_func(sch);
} else if (s->ctrl & SCSW_FCTL_START_FUNC) {
- sch_handle_start_func(sch, orb);
+ /* Triggered by both ssch and rsch. */
+ sch_handle_start_func_virtual(sch, orb);
} else {
/* Cannot happen. */
- return;
+ return 0;
}
css_inject_io_interrupt(sch);
+ return 0;
+}
+
+int do_subchannel_work_passthrough(SubchDev *sch, ORB *orb)
+{
+ int ret;
+ SCSW *s = &sch->curr_status.scsw;
+
+ if (s->ctrl & SCSW_FCTL_CLEAR_FUNC) {
+ /* TODO: Clear handling */
+ sch_handle_clear_func(sch);
+ ret = 0;
+ } else if (s->ctrl & SCSW_FCTL_HALT_FUNC) {
+ /* TODO: Halt handling */
+ sch_handle_halt_func(sch);
+ ret = 0;
+ } else if (s->ctrl & SCSW_FCTL_START_FUNC) {
+ ret = sch_handle_start_func_passthrough(sch, orb);
+ } else {
+ /* Cannot happen. */
+ return -ENODEV;
+ }
+
+ return ret;
+}
+
+static int do_subchannel_work(SubchDev *sch, ORB *orb)
+{
+ if (sch->do_subchannel_work) {
+ return sch->do_subchannel_work(sch, orb);
+ } else {
+ return -EINVAL;
+ }
}
static void copy_pmcw_to_guest(PMCW *dest, const PMCW *src)
dest->chars = cpu_to_be32(src->chars);
}
-static void copy_scsw_to_guest(SCSW *dest, const SCSW *src)
+void copy_scsw_to_guest(SCSW *dest, const SCSW *src)
{
dest->flags = cpu_to_be16(src->flags);
dest->ctrl = cpu_to_be16(src->ctrl);
PMCW *p = &sch->curr_status.pmcw;
int ret;
- if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
+ if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
ret = -ENODEV;
goto out;
}
PMCW *p = &sch->curr_status.pmcw;
int ret;
- if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
+ if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
ret = -ENODEV;
goto out;
}
PMCW *p = &sch->curr_status.pmcw;
int ret;
- if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
+ if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
ret = -ENODEV;
goto out;
}
PMCW *p = &sch->curr_status.pmcw;
int ret;
- if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
+ if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
ret = -ENODEV;
goto out;
}
s->ctrl |= (SCSW_FCTL_START_FUNC | SCSW_ACTL_START_PEND);
s->flags &= ~SCSW_FLAGS_MASK_PNO;
- do_subchannel_work(sch, orb);
- ret = 0;
+ ret = do_subchannel_work(sch, orb);
out:
return ret;
uint16_t stctl;
IRB irb;
- if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
+ if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
return 3;
}
PMCW *p = &sch->curr_status.pmcw;
int ret;
- if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
+ if (~(p->flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
ret = -ENODEV;
goto out;
}
uint8_t real_cssid;
real_cssid = (!m && (cssid == 0)) ? channel_subsys.default_cssid : cssid;
- if (real_cssid > MAX_CSSID || ssid > MAX_SSID ||
+ if (ssid > MAX_SSID ||
!channel_subsys.css[real_cssid] ||
!channel_subsys.css[real_cssid]->sch_set[ssid]) {
return true;
(MAX_SCHID + 1) / sizeof(unsigned long));
}
-static int css_add_virtual_chpid(uint8_t cssid, uint8_t chpid, uint8_t type)
+unsigned int css_find_free_chpid(uint8_t cssid)
+{
+ CssImage *css = channel_subsys.css[cssid];
+ unsigned int chpid;
+
+ if (!css) {
+ return MAX_CHPID + 1;
+ }
+
+ for (chpid = 0; chpid <= MAX_CHPID; chpid++) {
+ /* skip reserved chpid */
+ if (chpid == VIRTIO_CCW_CHPID) {
+ continue;
+ }
+ if (!css->chpids[chpid].in_use) {
+ return chpid;
+ }
+ }
+ return MAX_CHPID + 1;
+}
+
+static int css_add_chpid(uint8_t cssid, uint8_t chpid, uint8_t type,
+ bool is_virt)
{
CssImage *css;
trace_css_chpid_add(cssid, chpid, type);
- if (cssid > MAX_CSSID) {
- return -EINVAL;
- }
css = channel_subsys.css[cssid];
if (!css) {
return -EINVAL;
}
css->chpids[chpid].in_use = 1;
css->chpids[chpid].type = type;
- css->chpids[chpid].is_virtual = 1;
+ css->chpids[chpid].is_virtual = is_virt;
css_generate_chp_crws(cssid, chpid);
p->pam = 0x80;
p->chpid[0] = chpid;
if (!css->chpids[chpid].in_use) {
- css_add_virtual_chpid(sch->cssid, chpid, type);
+ css_add_chpid(sch->cssid, chpid, type, true);
}
memset(s, 0, sizeof(SCSW));
int subch_device_load(SubchDev *s, QEMUFile *f)
{
+ SubchDev *old_s;
+ uint16_t old_schid = s->schid;
int i;
s->cssid = qemu_get_byte(f);
s->ssid = qemu_get_byte(f);
s->schid = qemu_get_be16(f);
s->devno = qemu_get_be16(f);
+ /* Re-assign subch. */
+ if (old_schid != s->schid) {
+ old_s = channel_subsys.css[s->cssid]->sch_set[s->ssid]->sch[old_schid];
+ /*
+ * (old_s != s) means that some other device has its correct
+ * subchannel already assigned (in load).
+ */
+ if (old_s == s) {
+ css_subch_assign(s->cssid, s->ssid, old_schid, s->devno, NULL);
+ }
+ /* It's OK to re-assign without a prior de-assign. */
+ css_subch_assign(s->cssid, s->ssid, s->schid, s->devno, s);
+ }
s->thinint_active = qemu_get_byte(f);
/* SCHIB */
/* PMCW */
.set = set_css_devid,
};
-SubchDev *css_create_virtual_sch(CssDevId bus_id, Error **errp)
+PropertyInfo css_devid_ro_propinfo = {
+ .name = "str",
+ .description = "Read-only identifier of an I/O device in the channel "
+ "subsystem, example: fe.1.23ab",
+ .get = get_css_devid,
+};
+
+SubchDev *css_create_sch(CssDevId bus_id, bool is_virtual, bool squash_mcss,
+ Error **errp)
{
uint16_t schid = 0;
SubchDev *sch;
if (bus_id.valid) {
- /* Enforce use of virtual cssid. */
- if (bus_id.cssid != VIRTUAL_CSSID) {
- error_setg(errp, "cssid %hhx not valid for virtual devices",
- bus_id.cssid);
+ if (is_virtual != (bus_id.cssid == VIRTUAL_CSSID)) {
+ error_setg(errp, "cssid %hhx not valid for %s devices",
+ bus_id.cssid,
+ (is_virtual ? "virtual" : "non-virtual"));
return NULL;
}
+ }
+
+ if (bus_id.valid) {
+ if (squash_mcss) {
+ bus_id.cssid = channel_subsys.default_cssid;
+ } else if (!channel_subsys.css[bus_id.cssid]) {
+ css_create_css_image(bus_id.cssid, false);
+ }
+
if (!css_find_free_subch_for_devno(bus_id.cssid, bus_id.ssid,
bus_id.devid, &schid, errp)) {
return NULL;
}
- } else {
- bus_id.cssid = VIRTUAL_CSSID;
+ } else if (squash_mcss || is_virtual) {
+ bus_id.cssid = channel_subsys.default_cssid;
+
if (!css_find_free_subch_and_devno(bus_id.cssid, &bus_id.ssid,
&bus_id.devid, &schid, errp)) {
return NULL;
}
+ } else {
+ for (bus_id.cssid = 0; bus_id.cssid < MAX_CSSID; ++bus_id.cssid) {
+ if (bus_id.cssid == VIRTUAL_CSSID) {
+ continue;
+ }
+
+ if (!channel_subsys.css[bus_id.cssid]) {
+ css_create_css_image(bus_id.cssid, false);
+ }
+
+ if (css_find_free_subch_and_devno(bus_id.cssid, &bus_id.ssid,
+ &bus_id.devid, &schid,
+ NULL)) {
+ break;
+ }
+ if (bus_id.cssid == MAX_CSSID) {
+ error_setg(errp, "Virtual channel subsystem is full!");
+ return NULL;
+ }
+ }
}
sch = g_malloc0(sizeof(*sch));
css_subch_assign(sch->cssid, sch->ssid, schid, sch->devno, sch);
return sch;
}
+
+static int css_sch_get_chpids(SubchDev *sch, CssDevId *dev_id)
+{
+ char *fid_path;
+ FILE *fd;
+ uint32_t chpid[8];
+ int i;
+ PMCW *p = &sch->curr_status.pmcw;
+
+ fid_path = g_strdup_printf("/sys/bus/css/devices/%x.%x.%04x/chpids",
+ dev_id->cssid, dev_id->ssid, dev_id->devid);
+ fd = fopen(fid_path, "r");
+ if (fd == NULL) {
+ error_report("%s: open %s failed", __func__, fid_path);
+ g_free(fid_path);
+ return -EINVAL;
+ }
+
+ if (fscanf(fd, "%x %x %x %x %x %x %x %x",
+ &chpid[0], &chpid[1], &chpid[2], &chpid[3],
+ &chpid[4], &chpid[5], &chpid[6], &chpid[7]) != 8) {
+ fclose(fd);
+ g_free(fid_path);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(p->chpid); i++) {
+ p->chpid[i] = chpid[i];
+ }
+
+ fclose(fd);
+ g_free(fid_path);
+
+ return 0;
+}
+
+static int css_sch_get_path_masks(SubchDev *sch, CssDevId *dev_id)
+{
+ char *fid_path;
+ FILE *fd;
+ uint32_t pim, pam, pom;
+ PMCW *p = &sch->curr_status.pmcw;
+
+ fid_path = g_strdup_printf("/sys/bus/css/devices/%x.%x.%04x/pimpampom",
+ dev_id->cssid, dev_id->ssid, dev_id->devid);
+ fd = fopen(fid_path, "r");
+ if (fd == NULL) {
+ error_report("%s: open %s failed", __func__, fid_path);
+ g_free(fid_path);
+ return -EINVAL;
+ }
+
+ if (fscanf(fd, "%x %x %x", &pim, &pam, &pom) != 3) {
+ fclose(fd);
+ g_free(fid_path);
+ return -EINVAL;
+ }
+
+ p->pim = pim;
+ p->pam = pam;
+ p->pom = pom;
+ fclose(fd);
+ g_free(fid_path);
+
+ return 0;
+}
+
+static int css_sch_get_chpid_type(uint8_t chpid, uint32_t *type,
+ CssDevId *dev_id)
+{
+ char *fid_path;
+ FILE *fd;
+
+ fid_path = g_strdup_printf("/sys/devices/css%x/chp0.%02x/type",
+ dev_id->cssid, chpid);
+ fd = fopen(fid_path, "r");
+ if (fd == NULL) {
+ error_report("%s: open %s failed", __func__, fid_path);
+ g_free(fid_path);
+ return -EINVAL;
+ }
+
+ if (fscanf(fd, "%x", type) != 1) {
+ fclose(fd);
+ g_free(fid_path);
+ return -EINVAL;
+ }
+
+ fclose(fd);
+ g_free(fid_path);
+
+ return 0;
+}
+
+/*
+ * We currently retrieve the real device information from sysfs to build the
+ * guest subchannel information block without considering the migration feature.
+ * We need to revisit this problem when we want to add migration support.
+ */
+int css_sch_build_schib(SubchDev *sch, CssDevId *dev_id)
+{
+ CssImage *css = channel_subsys.css[sch->cssid];
+ PMCW *p = &sch->curr_status.pmcw;
+ SCSW *s = &sch->curr_status.scsw;
+ uint32_t type;
+ int i, ret;
+
+ assert(css != NULL);
+ memset(p, 0, sizeof(PMCW));
+ p->flags |= PMCW_FLAGS_MASK_DNV;
+ /* We are dealing with I/O subchannels only. */
+ p->devno = sch->devno;
+
+ /* Grab path mask from sysfs. */
+ ret = css_sch_get_path_masks(sch, dev_id);
+ if (ret) {
+ return ret;
+ }
+
+ /* Grab chpids from sysfs. */
+ ret = css_sch_get_chpids(sch, dev_id);
+ if (ret) {
+ return ret;
+ }
+
+ /* Build chpid type. */
+ for (i = 0; i < ARRAY_SIZE(p->chpid); i++) {
+ if (p->chpid[i] && !css->chpids[p->chpid[i]].in_use) {
+ ret = css_sch_get_chpid_type(p->chpid[i], &type, dev_id);
+ if (ret) {
+ return ret;
+ }
+ css_add_chpid(sch->cssid, p->chpid[i], type, false);
+ }
+ }
+
+ memset(s, 0, sizeof(SCSW));
+ sch->curr_status.mba = 0;
+ for (i = 0; i < ARRAY_SIZE(sch->curr_status.mda); i++) {
+ sch->curr_status.mda[i] = 0;
+ }
+
+ return 0;
+}