cpu->kvm_fd = ret;
cpu->kvm_state = s;
- cpu->kvm_vcpu_dirty = true;
+ cpu->vcpu_dirty = true;
mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
if (mmap_size < 0) {
static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
{
- if (!cpu->kvm_vcpu_dirty) {
+ if (!cpu->vcpu_dirty) {
kvm_arch_get_registers(cpu);
- cpu->kvm_vcpu_dirty = true;
+ cpu->vcpu_dirty = true;
}
}
void kvm_cpu_synchronize_state(CPUState *cpu)
{
- if (!cpu->kvm_vcpu_dirty) {
+ if (!cpu->vcpu_dirty) {
run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
}
}
static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
{
kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
- cpu->kvm_vcpu_dirty = false;
+ cpu->vcpu_dirty = false;
}
void kvm_cpu_synchronize_post_reset(CPUState *cpu)
static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
{
kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
- cpu->kvm_vcpu_dirty = false;
+ cpu->vcpu_dirty = false;
}
void kvm_cpu_synchronize_post_init(CPUState *cpu)
static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
{
- cpu->kvm_vcpu_dirty = true;
+ cpu->vcpu_dirty = true;
}
void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu)
do {
MemTxAttrs attrs;
- if (cpu->kvm_vcpu_dirty) {
+ if (cpu->vcpu_dirty) {
kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
- cpu->kvm_vcpu_dirty = false;
+ cpu->vcpu_dirty = false;
}
kvm_arch_pre_run(cpu, run);
vaddr mem_io_vaddr;
int kvm_fd;
- bool kvm_vcpu_dirty;
struct KVMState *kvm_state;
struct kvm_run *kvm_run;
uint32_t can_do_io;
int32_t exception_index; /* used by m68k TCG */
+ /* shared by kvm, hax and hvf */
+ bool vcpu_dirty;
+
/* Used to keep track of an outstanding cpu throttle thread for migration
* autoconverge
*/
icount_decr_u16 u16;
} icount_decr;
- bool hax_vcpu_dirty;
struct hax_vcpu_state *hax_vcpu;
/* The pending_tlb_flush flag is set and cleared atomically to
}
cpu->hax_vcpu = hax_global.vm->vcpus[cpu->cpu_index];
- cpu->hax_vcpu_dirty = true;
+ cpu->vcpu_dirty = true;
qemu_register_reset(hax_reset_vcpu_state, (CPUArchState *) (cpu->env_ptr));
return ret;
CPUArchState *env = cpu->env_ptr;
hax_arch_get_registers(env);
- cpu->hax_vcpu_dirty = true;
+ cpu->vcpu_dirty = true;
}
void hax_cpu_synchronize_state(CPUState *cpu)
{
- if (!cpu->hax_vcpu_dirty) {
+ if (!cpu->vcpu_dirty) {
run_on_cpu(cpu, do_hax_cpu_synchronize_state, RUN_ON_CPU_NULL);
}
}
CPUArchState *env = cpu->env_ptr;
hax_vcpu_sync_state(env, 1);
- cpu->hax_vcpu_dirty = false;
+ cpu->vcpu_dirty = false;
}
void hax_cpu_synchronize_post_reset(CPUState *cpu)
CPUArchState *env = cpu->env_ptr;
hax_vcpu_sync_state(env, 1);
- cpu->hax_vcpu_dirty = false;
+ cpu->vcpu_dirty = false;
}
void hax_cpu_synchronize_post_init(CPUState *cpu)
static void do_hax_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
{
- cpu->hax_vcpu_dirty = true;
+ cpu->vcpu_dirty = true;
}
void hax_cpu_synchronize_pre_loadvm(CPUState *cpu)
* already saved and can be restored when it is synced back to KVM.
*/
if (!running) {
- if (!cs->kvm_vcpu_dirty) {
+ if (!cs->vcpu_dirty) {
ret = kvm_mips_save_count(cs);
if (ret < 0) {
fprintf(stderr, "Failed saving count\n");
return;
}
- if (!cs->kvm_vcpu_dirty) {
+ if (!cs->vcpu_dirty) {
ret = kvm_mips_restore_count(cs);
if (ret < 0) {
fprintf(stderr, "Failed restoring count\n");