X-Git-Url: https://repo.jachan.dev/qemu.git/blobdiff_plain/9fbee91a131a05e443d7108d7fbdf3ca91020290..34bf774485a1259fc50515e8d761bd543bb316c8:/kvm-all.c diff --git a/kvm-all.c b/kvm-all.c index fd8157ad5e..b1cf703f9e 100644 --- a/kvm-all.c +++ b/kvm-all.c @@ -27,6 +27,7 @@ #include "sysemu/sysemu.h" #include "hw/hw.h" #include "hw/pci/msi.h" +#include "hw/s390x/adapter.h" #include "exec/gdbstub.h" #include "sysemu/kvm.h" #include "qemu/bswap.h" @@ -36,6 +37,8 @@ #include "qemu/event_notifier.h" #include "trace.h" +#include "hw/boards.h" + /* This check must be after config-host.h is included */ #ifdef CONFIG_EVENTFD #include @@ -96,6 +99,7 @@ struct KVMState * they're not. Linux, glibc and *BSD all treat ioctl numbers as * unsigned, and treating them as signed here can break things */ unsigned irq_set_ioctl; + unsigned int sigmask_len; #ifdef KVM_CAP_IRQ_ROUTING struct kvm_irq_routing *irq_routes; int nr_allocated_irq_routes; @@ -110,6 +114,7 @@ KVMState *kvm_state; bool kvm_kernel_irqchip; bool kvm_async_interrupts_allowed; bool kvm_halt_in_kernel_allowed; +bool kvm_eventfds_allowed; bool kvm_irqfds_allowed; bool kvm_msi_via_irqfd_allowed; bool kvm_gsi_routing_allowed; @@ -221,13 +226,6 @@ static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot) return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); } -static void kvm_reset_vcpu(void *opaque) -{ - CPUState *cpu = opaque; - - kvm_arch_reset_vcpu(cpu); -} - int kvm_init_vcpu(CPUState *cpu) { KVMState *s = kvm_state; @@ -267,10 +265,6 @@ int kvm_init_vcpu(CPUState *cpu) } ret = kvm_arch_init_vcpu(cpu); - if (ret == 0) { - qemu_register_reset(kvm_reset_vcpu, cpu); - kvm_arch_reset_vcpu(cpu); - } err: return ret; } @@ -499,6 +493,19 @@ int kvm_check_extension(KVMState *s, unsigned int extension) return ret; } +int kvm_vm_check_extension(KVMState *s, unsigned int extension) +{ + int ret; + + ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension); + if (ret < 0) { + /* VM wide version not implemented, use global one instead */ + ret = kvm_check_extension(s, extension); + } + + return ret; +} + static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val, bool assign, uint32_t size, bool datamatch) { @@ -946,7 +953,7 @@ void kvm_init_irq_routing(KVMState *s) { int gsi_count, i; - gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING); + gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1; if (gsi_count > 0) { unsigned int gsi_bits, i; @@ -1245,6 +1252,35 @@ static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int rfd, int virq, return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd); } +int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter) +{ + struct kvm_irq_routing_entry kroute; + int virq; + + if (!kvm_gsi_routing_enabled()) { + return -ENOSYS; + } + + virq = kvm_irqchip_get_virq(s); + if (virq < 0) { + return virq; + } + + kroute.gsi = virq; + kroute.type = KVM_IRQ_ROUTING_S390_ADAPTER; + kroute.flags = 0; + kroute.u.adapter.summary_addr = adapter->summary_addr; + kroute.u.adapter.ind_addr = adapter->ind_addr; + kroute.u.adapter.summary_offset = adapter->summary_offset; + kroute.u.adapter.ind_offset = adapter->ind_offset; + kroute.u.adapter.adapter_id = adapter->adapter_id; + + kvm_add_routing_entry(s, &kroute); + kvm_irqchip_commit_routes(s); + + return virq; +} + #else /* !KVM_CAP_IRQ_ROUTING */ void kvm_init_irq_routing(KVMState *s) @@ -1265,6 +1301,11 @@ int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg) return -ENOSYS; } +int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter) +{ + return -ENOSYS; +} + static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign) { abort(); @@ -1294,7 +1335,8 @@ static int kvm_irqchip_create(KVMState *s) int ret; if (!qemu_opt_get_bool(qemu_get_machine_opts(), "kernel_irqchip", true) || - !kvm_check_extension(s, KVM_CAP_IRQCHIP)) { + (!kvm_check_extension(s, KVM_CAP_IRQCHIP) && + (kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0) < 0))) { return 0; } @@ -1339,7 +1381,7 @@ static int kvm_max_vcpus(KVMState *s) return (ret) ? ret : kvm_recommended_vcpus(s); } -int kvm_init(void) +int kvm_init(MachineClass *mc) { static const char upgrade_note[] = "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n" @@ -1356,7 +1398,8 @@ int kvm_init(void) KVMState *s; const KVMCapabilityInfo *missing_cap; int ret; - int i; + int i, type = 0; + const char *kvm_type; s = g_malloc0(sizeof(KVMState)); @@ -1369,6 +1412,8 @@ int kvm_init(void) assert(TARGET_PAGE_SIZE <= getpagesize()); page_size_init(); + s->sigmask_len = 8; + #ifdef KVM_CAP_SET_GUEST_DEBUG QTAILQ_INIT(&s->kvm_sw_breakpoints); #endif @@ -1382,7 +1427,7 @@ int kvm_init(void) ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0); if (ret < KVM_API_VERSION) { - if (ret > 0) { + if (ret >= 0) { ret = -EINVAL; } fprintf(stderr, "kvm version too old\n"); @@ -1420,18 +1465,26 @@ int kvm_init(void) nc->name, nc->num, soft_vcpus_limit); if (nc->num > hard_vcpus_limit) { - ret = -EINVAL; fprintf(stderr, "Number of %s cpus requested (%d) exceeds " "the maximum cpus supported by KVM (%d)\n", nc->name, nc->num, hard_vcpus_limit); - goto err; + exit(1); } } nc++; } + kvm_type = qemu_opt_get(qemu_get_machine_opts(), "kvm-type"); + if (mc->kvm_type) { + type = mc->kvm_type(kvm_type); + } else if (kvm_type) { + ret = -EINVAL; + fprintf(stderr, "Invalid argument kvm-type=%s\n", kvm_type); + goto err; + } + do { - ret = kvm_ioctl(s, KVM_CREATE_VM, 0); + ret = kvm_ioctl(s, KVM_CREATE_VM, type); } while (ret == -EINTR); if (ret < 0) { @@ -1505,6 +1558,9 @@ int kvm_init(void) (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0); #endif + kvm_eventfds_allowed = + (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0); + ret = kvm_arch_init(s); if (ret < 0) { goto err; @@ -1526,6 +1582,7 @@ int kvm_init(void) return 0; err: + assert(ret < 0); if (s->vmfd >= 0) { close(s->vmfd); } @@ -1538,6 +1595,11 @@ err: return ret; } +void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len) +{ + s->sigmask_len = sigmask_len; +} + static void kvm_handle_io(uint16_t port, void *data, int direction, int size, uint32_t count) { @@ -1620,18 +1682,32 @@ void kvm_cpu_synchronize_state(CPUState *cpu) } } -void kvm_cpu_synchronize_post_reset(CPUState *cpu) +static void do_kvm_cpu_synchronize_post_reset(void *arg) { + CPUState *cpu = arg; + kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE); cpu->kvm_vcpu_dirty = false; } -void kvm_cpu_synchronize_post_init(CPUState *cpu) +void kvm_cpu_synchronize_post_reset(CPUState *cpu) +{ + run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, cpu); +} + +static void do_kvm_cpu_synchronize_post_init(void *arg) { + CPUState *cpu = arg; + kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE); cpu->kvm_vcpu_dirty = false; } +void kvm_cpu_synchronize_post_init(CPUState *cpu) +{ + run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, cpu); +} + int kvm_cpu_exec(CPUState *cpu) { struct kvm_run *run = cpu->kvm_run; @@ -1714,6 +1790,22 @@ int kvm_cpu_exec(CPUState *cpu) case KVM_EXIT_INTERNAL_ERROR: ret = kvm_handle_internal_error(cpu, run); break; + case KVM_EXIT_SYSTEM_EVENT: + switch (run->system_event.type) { + case KVM_SYSTEM_EVENT_SHUTDOWN: + qemu_system_shutdown_request(); + ret = EXCP_INTERRUPT; + break; + case KVM_SYSTEM_EVENT_RESET: + qemu_system_reset_request(); + ret = EXCP_INTERRUPT; + break; + default: + DPRINTF("kvm_arch_handle_exit\n"); + ret = kvm_arch_handle_exit(cpu, run); + break; + } + break; default: DPRINTF("kvm_arch_handle_exit\n"); ret = kvm_arch_handle_exit(cpu, run); @@ -2012,12 +2104,13 @@ void kvm_remove_all_breakpoints(CPUState *cpu) { struct kvm_sw_breakpoint *bp, *next; KVMState *s = cpu->kvm_state; + CPUState *tmpcpu; QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) { if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) { /* Try harder to find a CPU that currently sees the breakpoint. */ - CPU_FOREACH(cpu) { - if (kvm_arch_remove_sw_breakpoint(cpu, bp) == 0) { + CPU_FOREACH(tmpcpu) { + if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) { break; } } @@ -2058,6 +2151,7 @@ void kvm_remove_all_breakpoints(CPUState *cpu) int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset) { + KVMState *s = kvm_state; struct kvm_signal_mask *sigmask; int r; @@ -2067,7 +2161,7 @@ int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset) sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset)); - sigmask->len = 8; + sigmask->len = s->sigmask_len; memcpy(sigmask->sigset, sigset, sizeof(*sigset)); r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask); g_free(sigmask); @@ -2104,3 +2198,31 @@ int kvm_create_device(KVMState *s, uint64_t type, bool test) return test ? 0 : create_dev.fd; } + +int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source) +{ + struct kvm_one_reg reg; + int r; + + reg.id = id; + reg.addr = (uintptr_t) source; + r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); + if (r) { + trace_kvm_failed_reg_set(id, strerror(r)); + } + return r; +} + +int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target) +{ + struct kvm_one_reg reg; + int r; + + reg.id = id; + reg.addr = (uintptr_t) target; + r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); + if (r) { + trace_kvm_failed_reg_get(id, strerror(r)); + } + return r; +}