DEFINE_PROP_END_OF_LIST(),
};
+static void ccw_device_reset(DeviceState *d)
+{
+ CcwDevice *ccw_dev = CCW_DEVICE(d);
+
+ css_reset_sch(ccw_dev->sch);
+}
+
static void ccw_device_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
k->realize = ccw_device_realize;
k->refill_ids = ccw_device_refill_ids;
dc->props = ccw_device_properties;
+ dc->reset = ccw_device_reset;
}
const VMStateDescription vmstate_ccw_dev = {
void css_conditional_io_interrupt(SubchDev *sch)
{
+ /*
+ * If the subchannel is not enabled, it is not made status pending
+ * (see PoP p. 16-17, "Status Control").
+ */
+ if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA)) {
+ return;
+ }
+
/*
* If the subchannel is not currently status pending, make it pending
* with alert status.
#include "qemu/config-file.h"
#include "qemu/cutils.h"
#include "qemu/option.h"
+#include "exec/exec-all.h"
#define KERN_IMAGE_START 0x010000UL
#define KERN_PARM_AREA 0x010480UL
return &ipl->iplb;
}
-void s390_reipl_request(void)
+void s390_ipl_reset_request(CPUState *cs, enum s390_reset reset_type)
{
S390IPLState *ipl = get_ipl_device();
- ipl->reipl_requested = true;
- if (ipl->iplb_valid &&
+ if (reset_type == S390_RESET_EXTERNAL || reset_type == S390_RESET_REIPL) {
+ /* use CPU 0 for full resets */
+ ipl->reset_cpu_index = 0;
+ } else {
+ ipl->reset_cpu_index = cs->cpu_index;
+ }
+ ipl->reset_type = reset_type;
+
+ if (reset_type == S390_RESET_REIPL &&
+ ipl->iplb_valid &&
!ipl->netboot &&
ipl->iplb.pbt == S390_IPL_TYPE_CCW &&
is_virtio_scsi_device(&ipl->iplb)) {
}
}
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
+ /* as this is triggered by a CPU, make sure to exit the loop */
+ if (tcg_enabled()) {
+ cpu_loop_exit(cs);
+ }
+}
+
+void s390_ipl_get_reset_request(CPUState **cs, enum s390_reset *reset_type)
+{
+ S390IPLState *ipl = get_ipl_device();
+
+ *cs = qemu_get_cpu(ipl->reset_cpu_index);
+ if (!*cs) {
+ /* use any CPU */
+ *cs = first_cpu;
+ }
+ *reset_type = ipl->reset_type;
+}
+
+void s390_ipl_clear_reset_request(void)
+{
+ S390IPLState *ipl = get_ipl_device();
+
+ ipl->reset_type = S390_RESET_EXTERNAL;
+ /* use CPU 0 for full resets */
+ ipl->reset_cpu_index = 0;
}
static void s390_ipl_prepare_qipl(S390CPU *cpu)
{
S390IPLState *ipl = S390_IPL(dev);
- if (!ipl->reipl_requested) {
+ if (ipl->reset_type != S390_RESET_REIPL) {
ipl->iplb_valid = false;
memset(&ipl->iplb, 0, sizeof(IplParameterBlock));
}
- ipl->reipl_requested = false;
}
static void s390_ipl_class_init(ObjectClass *klass, void *data)
void s390_ipl_update_diag308(IplParameterBlock *iplb);
void s390_ipl_prepare_cpu(S390CPU *cpu);
IplParameterBlock *s390_ipl_get_iplb(void);
-void s390_reipl_request(void);
+
+enum s390_reset {
+ /* default is a reset not triggered by a CPU e.g. issued by QMP */
+ S390_RESET_EXTERNAL = 0,
+ S390_RESET_REIPL,
+ S390_RESET_MODIFIED_CLEAR,
+ S390_RESET_LOAD_NORMAL,
+};
+void s390_ipl_reset_request(CPUState *cs, enum s390_reset reset_type);
+void s390_ipl_get_reset_request(CPUState **cs, enum s390_reset *reset_type);
+void s390_ipl_clear_reset_request(void);
#define QIPL_ADDRESS 0xcc
bool enforce_bios;
IplParameterBlock iplb;
bool iplb_valid;
- bool reipl_requested;
bool netboot;
QemuIplParameters qipl;
+ /* reset related properties don't have to be migrated or reset */
+ enum s390_reset reset_type;
+ int reset_cpu_index;
/*< public >*/
char *kernel;
"diag288",
};
-void subsystem_reset(void)
+static void subsystem_reset(void)
{
DeviceState *dev;
int i;
}
}
+static inline void s390_do_cpu_ipl(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+
+ s390_ipl_prepare_cpu(cpu);
+ s390_cpu_set_state(S390_CPU_STATE_OPERATING, cpu);
+}
+
static void s390_machine_reset(void)
{
- S390CPU *ipl_cpu = S390_CPU(qemu_get_cpu(0));
+ enum s390_reset reset_type;
+ CPUState *cs, *t;
+ /* get the reset parameters, reset them once done */
+ s390_ipl_get_reset_request(&cs, &reset_type);
+
+ /* all CPUs are paused and synchronized at this point */
s390_cmma_reset();
- qemu_devices_reset();
- s390_crypto_reset();
- /* all cpus are stopped - configure and start the ipl cpu only */
- s390_ipl_prepare_cpu(ipl_cpu);
- s390_cpu_set_state(S390_CPU_STATE_OPERATING, ipl_cpu);
+ switch (reset_type) {
+ case S390_RESET_EXTERNAL:
+ case S390_RESET_REIPL:
+ qemu_devices_reset();
+ s390_crypto_reset();
+
+ /* configure and start the ipl CPU only */
+ run_on_cpu(cs, s390_do_cpu_ipl, RUN_ON_CPU_NULL);
+ break;
+ case S390_RESET_MODIFIED_CLEAR:
+ CPU_FOREACH(t) {
+ run_on_cpu(t, s390_do_cpu_full_reset, RUN_ON_CPU_NULL);
+ }
+ subsystem_reset();
+ s390_crypto_reset();
+ run_on_cpu(cs, s390_do_cpu_load_normal, RUN_ON_CPU_NULL);
+ break;
+ case S390_RESET_LOAD_NORMAL:
+ CPU_FOREACH(t) {
+ run_on_cpu(t, s390_do_cpu_reset, RUN_ON_CPU_NULL);
+ }
+ subsystem_reset();
+ run_on_cpu(cs, s390_do_cpu_initial_reset, RUN_ON_CPU_NULL);
+ run_on_cpu(cs, s390_do_cpu_load_normal, RUN_ON_CPU_NULL);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ s390_ipl_clear_reset_request();
}
static void s390_machine_device_plug(HotplugHandler *hotplug_dev,
{
VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
- CcwDevice *ccw_dev = CCW_DEVICE(d);
+ VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
virtio_ccw_reset_virtio(dev, vdev);
- css_reset_sch(ccw_dev->sch);
+ if (vdc->parent_reset) {
+ vdc->parent_reset(d);
+ }
}
static void virtio_ccw_vmstate_change(DeviceState *d, bool running)
k->realize = virtio_ccw_net_realize;
k->unrealize = virtio_ccw_unrealize;
- dc->reset = virtio_ccw_reset;
dc->props = virtio_ccw_net_properties;
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
}
k->realize = virtio_ccw_blk_realize;
k->unrealize = virtio_ccw_unrealize;
- dc->reset = virtio_ccw_reset;
dc->props = virtio_ccw_blk_properties;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
}
k->realize = virtio_ccw_serial_realize;
k->unrealize = virtio_ccw_unrealize;
- dc->reset = virtio_ccw_reset;
dc->props = virtio_ccw_serial_properties;
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
}
k->realize = virtio_ccw_balloon_realize;
k->unrealize = virtio_ccw_unrealize;
- dc->reset = virtio_ccw_reset;
dc->props = virtio_ccw_balloon_properties;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
}
k->realize = virtio_ccw_scsi_realize;
k->unrealize = virtio_ccw_unrealize;
- dc->reset = virtio_ccw_reset;
dc->props = virtio_ccw_scsi_properties;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
}
k->realize = vhost_ccw_scsi_realize;
k->unrealize = virtio_ccw_unrealize;
- dc->reset = virtio_ccw_reset;
dc->props = vhost_ccw_scsi_properties;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
}
k->realize = virtio_ccw_rng_realize;
k->unrealize = virtio_ccw_unrealize;
- dc->reset = virtio_ccw_reset;
dc->props = virtio_ccw_rng_properties;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
}
k->realize = virtio_ccw_crypto_realize;
k->unrealize = virtio_ccw_unrealize;
- dc->reset = virtio_ccw_reset;
dc->props = virtio_ccw_crypto_properties;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
}
k->realize = virtio_ccw_gpu_realize;
k->unrealize = virtio_ccw_unrealize;
- dc->reset = virtio_ccw_reset;
dc->props = virtio_ccw_gpu_properties;
dc->hotpluggable = false;
set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
k->realize = virtio_ccw_input_realize;
k->unrealize = virtio_ccw_unrealize;
- dc->reset = virtio_ccw_reset;
dc->props = virtio_ccw_input_properties;
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
}
{
DeviceClass *dc = DEVICE_CLASS(klass);
CCWDeviceClass *k = CCW_DEVICE_CLASS(dc);
+ VirtIOCCWDeviceClass *vdc = VIRTIO_CCW_DEVICE_CLASS(klass);
k->unplug = virtio_ccw_busdev_unplug;
dc->realize = virtio_ccw_busdev_realize;
dc->unrealize = virtio_ccw_busdev_unrealize;
dc->bus_type = TYPE_VIRTUAL_CSS_BUS;
+ device_class_set_parent_reset(dc, virtio_ccw_reset, &vdc->parent_reset);
}
static const TypeInfo virtio_ccw_device_info = {
k->unrealize = virtio_ccw_unrealize;
k->realize = virtio_ccw_9p_realize;
- dc->reset = virtio_ccw_reset;
dc->props = virtio_ccw_9p_properties;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
}
k->unrealize = virtio_ccw_unrealize;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
dc->props = vhost_vsock_ccw_properties;
- dc->reset = virtio_ccw_reset;
}
static void vhost_vsock_ccw_instance_init(Object *obj)
CCWDeviceClass parent_class;
void (*realize)(VirtioCcwDevice *dev, Error **errp);
void (*unrealize)(VirtioCcwDevice *dev, Error **errp);
+ void (*parent_reset)(DeviceState *dev);
} VirtIOCCWDeviceClass;
/* Performance improves when virtqueue kick processing is decoupled from the
*/
bool css_migration_enabled(void);
-void subsystem_reset(void);
-
#endif
__u32 reserved3 : 12;
__u32 int_type : 3;
__u32 reserved4 : 12;
-} __attribute__ ((packed));
+} __attribute__ ((packed, aligned(4)));
/* channel command word (type 1) */
struct ccw1 {
return mcic;
}
+static inline void s390_do_cpu_full_reset(CPUState *cs, run_on_cpu_data arg)
+{
+ cpu_reset(cs);
+}
+
+static inline void s390_do_cpu_reset(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPUClass *scc = S390_CPU_GET_CLASS(cs);
+
+ scc->cpu_reset(cs);
+}
+
+static inline void s390_do_cpu_initial_reset(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPUClass *scc = S390_CPU_GET_CLASS(cs);
+
+ scc->initial_cpu_reset(cs);
+}
+
+static inline void s390_do_cpu_load_normal(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPUClass *scc = S390_CPU_GET_CLASS(cs);
+
+ scc->load_normal(cs);
+}
+
/* cpu.c */
int s390_get_clock(uint8_t *tod_high, uint64_t *tod_low);
#include "hw/s390x/ipl.h"
#include "hw/s390x/s390-virtio-ccw.h"
-static int modified_clear_reset(S390CPU *cpu)
-{
- S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
- CPUState *t;
-
- pause_all_vcpus();
- cpu_synchronize_all_states();
- CPU_FOREACH(t) {
- run_on_cpu(t, s390_do_cpu_full_reset, RUN_ON_CPU_NULL);
- }
- s390_cmma_reset();
- subsystem_reset();
- s390_crypto_reset();
- scc->load_normal(CPU(cpu));
- cpu_synchronize_all_post_reset();
- resume_all_vcpus();
- return 0;
-}
-
-static inline void s390_do_cpu_reset(CPUState *cs, run_on_cpu_data arg)
-{
- S390CPUClass *scc = S390_CPU_GET_CLASS(cs);
-
- scc->cpu_reset(cs);
-}
-
-static int load_normal_reset(S390CPU *cpu)
-{
- S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
- CPUState *t;
-
- pause_all_vcpus();
- cpu_synchronize_all_states();
- CPU_FOREACH(t) {
- run_on_cpu(t, s390_do_cpu_reset, RUN_ON_CPU_NULL);
- }
- s390_cmma_reset();
- subsystem_reset();
- scc->initial_cpu_reset(CPU(cpu));
- scc->load_normal(CPU(cpu));
- cpu_synchronize_all_post_reset();
- resume_all_vcpus();
- return 0;
-}
-
int handle_diag_288(CPUS390XState *env, uint64_t r1, uint64_t r3)
{
uint64_t func = env->regs[r1];
void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3, uintptr_t ra)
{
+ CPUState *cs = CPU(s390_env_get_cpu(env));
uint64_t addr = env->regs[r1];
uint64_t subcode = env->regs[r3];
IplParameterBlock *iplb;
switch (subcode) {
case 0:
- modified_clear_reset(s390_env_get_cpu(env));
- if (tcg_enabled()) {
- cpu_loop_exit(CPU(s390_env_get_cpu(env)));
- }
+ s390_ipl_reset_request(cs, S390_RESET_MODIFIED_CLEAR);
break;
case 1:
- load_normal_reset(s390_env_get_cpu(env));
- if (tcg_enabled()) {
- cpu_loop_exit(CPU(s390_env_get_cpu(env)));
- }
+ s390_ipl_reset_request(cs, S390_RESET_LOAD_NORMAL);
break;
case 3:
- s390_reipl_request();
- if (tcg_enabled()) {
- cpu_loop_exit(CPU(s390_env_get_cpu(env)));
- }
+ s390_ipl_reset_request(cs, S390_RESET_REIPL);
break;
case 5:
if ((r1 & 1) || (addr & 0x0fffULL)) {
/* Base/displacement are at the same locations. */
#define decode_basedisp_rs decode_basedisp_s
-static inline void s390_do_cpu_full_reset(CPUState *cs, run_on_cpu_data arg)
-{
- cpu_reset(cs);
-}
-
-
/* arch_dump.c */
int s390_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
int cpuid, void *opaque);
ret = handle_intercept(cpu);
break;
case KVM_EXIT_S390_RESET:
- s390_reipl_request();
+ s390_ipl_reset_request(cs, S390_RESET_REIPL);
break;
case KVM_EXIT_S390_TSCH:
ret = handle_tsch(cpu);
const MachineState *ms = MACHINE(qdev_get_machine());
uint16_t total_cpus = 0, conf_cpus = 0, reserved_cpus = 0;
S390CPU *cpu = s390_env_get_cpu(env);
- SysIB sysib = { 0 };
+ SysIB sysib = { };
int i, cc = 0;
if ((r0 & STSI_R0_FC_MASK) > STSI_R0_FC_LEVEL_3) {