struct KVMState
{
+ AccelState parent_obj;
+
KVMSlot *slots;
int nr_slots;
int fd;
#define TYPE_KVM_ACCEL ACCEL_CLASS_NAME("kvm")
+#define KVM_STATE(obj) \
+ OBJECT_CHECK(KVMState, (obj), TYPE_KVM_ACCEL)
+
KVMState *kvm_state;
bool kvm_kernel_irqchip;
bool kvm_async_interrupts_allowed;
bool kvm_halt_in_kernel_allowed;
bool kvm_eventfds_allowed;
bool kvm_irqfds_allowed;
+bool kvm_resamplefds_allowed;
bool kvm_msi_via_irqfd_allowed;
bool kvm_gsi_routing_allowed;
bool kvm_gsi_direct_mapping;
KVM_CAP_LAST_INFO
};
-static KVMSlot *kvm_alloc_slot(KVMState *s)
+static KVMSlot *kvm_get_free_slot(KVMState *s)
{
int i;
}
}
+ return NULL;
+}
+
+bool kvm_has_free_slot(MachineState *ms)
+{
+ return kvm_get_free_slot(KVM_STATE(ms->accelerator));
+}
+
+static KVMSlot *kvm_alloc_slot(KVMState *s)
+{
+ KVMSlot *slot = kvm_get_free_slot(s);
+
+ if (slot) {
+ return slot;
+ }
+
fprintf(stderr, "%s: no free slot available\n", __func__);
abort();
}
{
KVMState *s = kvm_state;
unsigned long size, allocated_size = 0;
- KVMDirtyLog d;
+ KVMDirtyLog d = {};
KVMSlot *mem;
int ret = 0;
hwaddr start_addr = section->offset_within_address_space;
unsigned delta;
/* kvm works in page size chunks, but the function may be called
- with sub-page size and unaligned start address. */
- delta = TARGET_PAGE_ALIGN(size) - size;
+ with sub-page size and unaligned start address. Pad the start
+ address to next and truncate size to previous page boundary. */
+ delta = (TARGET_PAGE_SIZE - (start_addr & ~TARGET_PAGE_MASK));
+ delta &= ~TARGET_PAGE_MASK;
if (delta > size) {
return;
}
kroute.u.msi.address_lo = (uint32_t)msg.address;
kroute.u.msi.address_hi = msg.address >> 32;
kroute.u.msi.data = le32_to_cpu(msg.data);
+ if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data)) {
+ kvm_irqchip_release_virq(s, virq);
+ return -EINVAL;
+ }
kvm_add_routing_entry(s, &kroute);
kvm_irqchip_commit_routes(s);
kroute.u.msi.address_lo = (uint32_t)msg.address;
kroute.u.msi.address_hi = msg.address >> 32;
kroute.u.msi.data = le32_to_cpu(msg.data);
+ if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data)) {
+ return -EINVAL;
+ }
return kvm_update_routing_entry(s, &kroute);
}
int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
{
- struct kvm_irq_routing_entry kroute;
+ struct kvm_irq_routing_entry kroute = {};
int virq;
if (!kvm_gsi_routing_enabled()) {
int i, type = 0;
const char *kvm_type;
- s = g_malloc0(sizeof(KVMState));
+ s = KVM_STATE(ms->accelerator);
/*
* On systems where the kernel can support different base page
kvm_eventfds_allowed =
(kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0);
+ kvm_irqfds_allowed =
+ (kvm_check_extension(s, KVM_CAP_IRQFD) > 0);
+
+ kvm_resamplefds_allowed =
+ (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
+
ret = kvm_arch_init(s);
if (ret < 0) {
goto err;
close(s->fd);
}
g_free(s->slots);
- g_free(s);
return ret;
}
}
bp = g_malloc(sizeof(struct kvm_sw_breakpoint));
- if (!bp) {
- return -ENOMEM;
- }
-
bp->pc = addr;
bp->use_count = 1;
err = kvm_arch_insert_sw_breakpoint(cpu, bp);
.name = TYPE_KVM_ACCEL,
.parent = TYPE_ACCEL,
.class_init = kvm_accel_class_init,
+ .instance_size = sizeof(KVMState),
};
static void kvm_type_init(void)