#include "sysemu/sysemu.h"
#include "hw/hw.h"
#include "hw/pci/msi.h"
+#include "hw/s390x/adapter.h"
#include "exec/gdbstub.h"
#include "sysemu/kvm.h"
#include "qemu/bswap.h"
#include "qemu/event_notifier.h"
#include "trace.h"
+#include "hw/boards.h"
+
/* This check must be after config-host.h is included */
#ifdef CONFIG_EVENTFD
#include <sys/eventfd.h>
* they're not. Linux, glibc and *BSD all treat ioctl numbers as
* unsigned, and treating them as signed here can break things */
unsigned irq_set_ioctl;
+ unsigned int sigmask_len;
#ifdef KVM_CAP_IRQ_ROUTING
struct kvm_irq_routing *irq_routes;
int nr_allocated_irq_routes;
bool kvm_kernel_irqchip;
bool kvm_async_interrupts_allowed;
bool kvm_halt_in_kernel_allowed;
+bool kvm_eventfds_allowed;
bool kvm_irqfds_allowed;
bool kvm_msi_via_irqfd_allowed;
bool kvm_gsi_routing_allowed;
return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
}
-static void kvm_reset_vcpu(void *opaque)
-{
- CPUState *cpu = opaque;
-
- kvm_arch_reset_vcpu(cpu);
-}
-
int kvm_init_vcpu(CPUState *cpu)
{
KVMState *s = kvm_state;
}
ret = kvm_arch_init_vcpu(cpu);
- if (ret == 0) {
- qemu_register_reset(kvm_reset_vcpu, cpu);
- kvm_arch_reset_vcpu(cpu);
- }
err:
return ret;
}
return ret;
}
+int kvm_vm_check_extension(KVMState *s, unsigned int extension)
+{
+ int ret;
+
+ ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension);
+ if (ret < 0) {
+ /* VM wide version not implemented, use global one instead */
+ ret = kvm_check_extension(s, extension);
+ }
+
+ return ret;
+}
+
static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val,
bool assign, uint32_t size, bool datamatch)
{
{
int gsi_count, i;
- gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING);
+ gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
if (gsi_count > 0) {
unsigned int gsi_bits, i;
return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
}
+int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
+{
+ struct kvm_irq_routing_entry kroute;
+ int virq;
+
+ if (!kvm_gsi_routing_enabled()) {
+ return -ENOSYS;
+ }
+
+ virq = kvm_irqchip_get_virq(s);
+ if (virq < 0) {
+ return virq;
+ }
+
+ kroute.gsi = virq;
+ kroute.type = KVM_IRQ_ROUTING_S390_ADAPTER;
+ kroute.flags = 0;
+ kroute.u.adapter.summary_addr = adapter->summary_addr;
+ kroute.u.adapter.ind_addr = adapter->ind_addr;
+ kroute.u.adapter.summary_offset = adapter->summary_offset;
+ kroute.u.adapter.ind_offset = adapter->ind_offset;
+ kroute.u.adapter.adapter_id = adapter->adapter_id;
+
+ kvm_add_routing_entry(s, &kroute);
+ kvm_irqchip_commit_routes(s);
+
+ return virq;
+}
+
#else /* !KVM_CAP_IRQ_ROUTING */
void kvm_init_irq_routing(KVMState *s)
return -ENOSYS;
}
+int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
+{
+ return -ENOSYS;
+}
+
static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign)
{
abort();
int ret;
if (!qemu_opt_get_bool(qemu_get_machine_opts(), "kernel_irqchip", true) ||
- !kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
+ (!kvm_check_extension(s, KVM_CAP_IRQCHIP) &&
+ (kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0) < 0))) {
return 0;
}
return (ret) ? ret : kvm_recommended_vcpus(s);
}
-int kvm_init(void)
+int kvm_init(MachineClass *mc)
{
static const char upgrade_note[] =
"Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
KVMState *s;
const KVMCapabilityInfo *missing_cap;
int ret;
- int i;
+ int i, type = 0;
+ const char *kvm_type;
s = g_malloc0(sizeof(KVMState));
assert(TARGET_PAGE_SIZE <= getpagesize());
page_size_init();
+ s->sigmask_len = 8;
+
#ifdef KVM_CAP_SET_GUEST_DEBUG
QTAILQ_INIT(&s->kvm_sw_breakpoints);
#endif
ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
if (ret < KVM_API_VERSION) {
- if (ret > 0) {
+ if (ret >= 0) {
ret = -EINVAL;
}
fprintf(stderr, "kvm version too old\n");
nc->name, nc->num, soft_vcpus_limit);
if (nc->num > hard_vcpus_limit) {
- ret = -EINVAL;
fprintf(stderr, "Number of %s cpus requested (%d) exceeds "
"the maximum cpus supported by KVM (%d)\n",
nc->name, nc->num, hard_vcpus_limit);
- goto err;
+ exit(1);
}
}
nc++;
}
+ kvm_type = qemu_opt_get(qemu_get_machine_opts(), "kvm-type");
+ if (mc->kvm_type) {
+ type = mc->kvm_type(kvm_type);
+ } else if (kvm_type) {
+ ret = -EINVAL;
+ fprintf(stderr, "Invalid argument kvm-type=%s\n", kvm_type);
+ goto err;
+ }
+
do {
- ret = kvm_ioctl(s, KVM_CREATE_VM, 0);
+ ret = kvm_ioctl(s, KVM_CREATE_VM, type);
} while (ret == -EINTR);
if (ret < 0) {
(kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
#endif
+ kvm_eventfds_allowed =
+ (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0);
+
ret = kvm_arch_init(s);
if (ret < 0) {
goto err;
return 0;
err:
+ assert(ret < 0);
if (s->vmfd >= 0) {
close(s->vmfd);
}
return ret;
}
+void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len)
+{
+ s->sigmask_len = sigmask_len;
+}
+
static void kvm_handle_io(uint16_t port, void *data, int direction, int size,
uint32_t count)
{
}
}
-void kvm_cpu_synchronize_post_reset(CPUState *cpu)
+static void do_kvm_cpu_synchronize_post_reset(void *arg)
{
+ CPUState *cpu = arg;
+
kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
cpu->kvm_vcpu_dirty = false;
}
-void kvm_cpu_synchronize_post_init(CPUState *cpu)
+void kvm_cpu_synchronize_post_reset(CPUState *cpu)
+{
+ run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, cpu);
+}
+
+static void do_kvm_cpu_synchronize_post_init(void *arg)
{
+ CPUState *cpu = arg;
+
kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
cpu->kvm_vcpu_dirty = false;
}
+void kvm_cpu_synchronize_post_init(CPUState *cpu)
+{
+ run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, cpu);
+}
+
int kvm_cpu_exec(CPUState *cpu)
{
struct kvm_run *run = cpu->kvm_run;
case KVM_EXIT_INTERNAL_ERROR:
ret = kvm_handle_internal_error(cpu, run);
break;
+ case KVM_EXIT_SYSTEM_EVENT:
+ switch (run->system_event.type) {
+ case KVM_SYSTEM_EVENT_SHUTDOWN:
+ qemu_system_shutdown_request();
+ ret = EXCP_INTERRUPT;
+ break;
+ case KVM_SYSTEM_EVENT_RESET:
+ qemu_system_reset_request();
+ ret = EXCP_INTERRUPT;
+ break;
+ default:
+ DPRINTF("kvm_arch_handle_exit\n");
+ ret = kvm_arch_handle_exit(cpu, run);
+ break;
+ }
+ break;
default:
DPRINTF("kvm_arch_handle_exit\n");
ret = kvm_arch_handle_exit(cpu, run);
{
struct kvm_sw_breakpoint *bp, *next;
KVMState *s = cpu->kvm_state;
+ CPUState *tmpcpu;
QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
/* Try harder to find a CPU that currently sees the breakpoint. */
- CPU_FOREACH(cpu) {
- if (kvm_arch_remove_sw_breakpoint(cpu, bp) == 0) {
+ CPU_FOREACH(tmpcpu) {
+ if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
break;
}
}
int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset)
{
+ KVMState *s = kvm_state;
struct kvm_signal_mask *sigmask;
int r;
sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
- sigmask->len = 8;
+ sigmask->len = s->sigmask_len;
memcpy(sigmask->sigset, sigset, sizeof(*sigset));
r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
g_free(sigmask);
return test ? 0 : create_dev.fd;
}
+
+int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source)
+{
+ struct kvm_one_reg reg;
+ int r;
+
+ reg.id = id;
+ reg.addr = (uintptr_t) source;
+ r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
+ if (r) {
+ trace_kvm_failed_reg_set(id, strerror(r));
+ }
+ return r;
+}
+
+int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
+{
+ struct kvm_one_reg reg;
+ int r;
+
+ reg.id = id;
+ reg.addr = (uintptr_t) target;
+ r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
+ if (r) {
+ trace_kvm_failed_reg_get(id, strerror(r));
+ }
+ return r;
+}