return kvm_update_routing_entry(s, &kroute);
}
-static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign)
+static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int rfd, int virq,
+ bool assign)
{
struct kvm_irqfd irqfd = {
.fd = fd,
.flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
};
+ if (rfd != -1) {
+ irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
+ irqfd.resamplefd = rfd;
+ }
+
if (!kvm_irqfds_enabled()) {
return -ENOSYS;
}
}
#endif /* !KVM_CAP_IRQ_ROUTING */
-int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n, int virq)
+int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
+ EventNotifier *rn, int virq)
{
- return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), virq, true);
+ return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n),
+ rn ? event_notifier_get_fd(rn) : -1, virq, true);
}
int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n, int virq)
{
- return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), virq, false);
+ return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), -1, virq,
+ false);
}
static int kvm_irqchip_create(KVMState *s)
goto err;
}
+ if (max_cpus > max_vcpus) {
+ ret = -EINVAL;
+ fprintf(stderr, "Number of hotpluggable cpus requested (%d) exceeds max cpus "
+ "supported by KVM (%d)\n", max_cpus, max_vcpus);
+ goto err;
+ }
+
s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0);
if (s->vmfd < 0) {
#ifdef TARGET_S390X
uint8_t *ptr = data;
for (i = 0; i < count; i++) {
- if (direction == KVM_EXIT_IO_IN) {
- switch (size) {
- case 1:
- stb_p(ptr, cpu_inb(port));
- break;
- case 2:
- stw_p(ptr, cpu_inw(port));
- break;
- case 4:
- stl_p(ptr, cpu_inl(port));
- break;
- }
- } else {
- switch (size) {
- case 1:
- cpu_outb(port, ldub_p(ptr));
- break;
- case 2:
- cpu_outw(port, lduw_p(ptr));
- break;
- case 4:
- cpu_outl(port, ldl_p(ptr));
- break;
- }
- }
-
+ address_space_rw(&address_space_io, port, ptr, size,
+ direction == KVM_EXIT_IO_OUT);
ptr += size;
}
}
&dbg_data->dbg);
}
-int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap)
+int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
{
- CPUState *cpu = ENV_GET_CPU(env);
struct kvm_set_guest_debug_data data;
data.dbg.control = reinject_trap;
- if (env->singlestep_enabled) {
+ if (cpu->singlestep_enabled) {
data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
}
kvm_arch_update_guest_debug(cpu, &data.dbg);
return data.err;
}
-int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
+int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
target_ulong len, int type)
{
- CPUState *current_cpu = ENV_GET_CPU(current_env);
struct kvm_sw_breakpoint *bp;
- CPUArchState *env;
int err;
if (type == GDB_BREAKPOINT_SW) {
- bp = kvm_find_sw_breakpoint(current_cpu, addr);
+ bp = kvm_find_sw_breakpoint(cpu, addr);
if (bp) {
bp->use_count++;
return 0;
bp->pc = addr;
bp->use_count = 1;
- err = kvm_arch_insert_sw_breakpoint(current_cpu, bp);
+ err = kvm_arch_insert_sw_breakpoint(cpu, bp);
if (err) {
g_free(bp);
return err;
}
- QTAILQ_INSERT_HEAD(¤t_cpu->kvm_state->kvm_sw_breakpoints,
- bp, entry);
+ QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
} else {
err = kvm_arch_insert_hw_breakpoint(addr, len, type);
if (err) {
}
}
- for (env = first_cpu; env != NULL; env = env->next_cpu) {
- err = kvm_update_guest_debug(env, 0);
+ CPU_FOREACH(cpu) {
+ err = kvm_update_guest_debug(cpu, 0);
if (err) {
return err;
}
return 0;
}
-int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
+int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
target_ulong len, int type)
{
- CPUState *current_cpu = ENV_GET_CPU(current_env);
struct kvm_sw_breakpoint *bp;
- CPUArchState *env;
int err;
if (type == GDB_BREAKPOINT_SW) {
- bp = kvm_find_sw_breakpoint(current_cpu, addr);
+ bp = kvm_find_sw_breakpoint(cpu, addr);
if (!bp) {
return -ENOENT;
}
return 0;
}
- err = kvm_arch_remove_sw_breakpoint(current_cpu, bp);
+ err = kvm_arch_remove_sw_breakpoint(cpu, bp);
if (err) {
return err;
}
- QTAILQ_REMOVE(¤t_cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
+ QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
g_free(bp);
} else {
err = kvm_arch_remove_hw_breakpoint(addr, len, type);
}
}
- for (env = first_cpu; env != NULL; env = env->next_cpu) {
- err = kvm_update_guest_debug(env, 0);
+ CPU_FOREACH(cpu) {
+ err = kvm_update_guest_debug(cpu, 0);
if (err) {
return err;
}
return 0;
}
-void kvm_remove_all_breakpoints(CPUArchState *current_env)
+void kvm_remove_all_breakpoints(CPUState *cpu)
{
- CPUState *current_cpu = ENV_GET_CPU(current_env);
struct kvm_sw_breakpoint *bp, *next;
- KVMState *s = current_cpu->kvm_state;
- CPUArchState *env;
- CPUState *cpu;
+ KVMState *s = cpu->kvm_state;
QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
- if (kvm_arch_remove_sw_breakpoint(current_cpu, bp) != 0) {
+ if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
/* Try harder to find a CPU that currently sees the breakpoint. */
- for (env = first_cpu; env != NULL; env = env->next_cpu) {
- cpu = ENV_GET_CPU(env);
+ CPU_FOREACH(cpu) {
if (kvm_arch_remove_sw_breakpoint(cpu, bp) == 0) {
break;
}
}
kvm_arch_remove_all_hw_breakpoints();
- for (env = first_cpu; env != NULL; env = env->next_cpu) {
- kvm_update_guest_debug(env, 0);
+ CPU_FOREACH(cpu) {
+ kvm_update_guest_debug(cpu, 0);
}
}
#else /* !KVM_CAP_SET_GUEST_DEBUG */
-int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap)
+int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
{
return -EINVAL;
}
-int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
+int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
target_ulong len, int type)
{
return -EINVAL;
}
-int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
+int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
target_ulong len, int type)
{
return -EINVAL;
}
-void kvm_remove_all_breakpoints(CPUArchState *current_env)
+void kvm_remove_all_breakpoints(CPUState *cpu)
{
}
#endif /* !KVM_CAP_SET_GUEST_DEBUG */