2 * QEMU Windows Hypervisor Platform accelerator (WHPX)
4 * Copyright Microsoft Corp. 2017
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
13 #include "exec/address-spaces.h"
14 #include "exec/ioport.h"
15 #include "qemu-common.h"
16 #include "sysemu/accel.h"
17 #include "sysemu/whpx.h"
18 #include "sysemu/cpus.h"
19 #include "sysemu/runstate.h"
20 #include "qemu/main-loop.h"
21 #include "hw/boards.h"
22 #include "qemu/error-report.h"
23 #include "qapi/error.h"
24 #include "migration/blocker.h"
25 #include "whp-dispatch.h"
27 #include <WinHvPlatform.h>
28 #include <WinHvEmulation.h>
32 WHV_PARTITION_HANDLE partition;
35 static const WHV_REGISTER_NAME whpx_register_names[] = {
37 /* X64 General purpose registers */
57 /* X64 Segment registers */
67 /* X64 Table registers */
71 /* X64 Control Registers */
78 /* X64 Debug Registers */
88 /* X64 Floating Point and Vector Registers */
105 WHvX64RegisterFpMmx0,
106 WHvX64RegisterFpMmx1,
107 WHvX64RegisterFpMmx2,
108 WHvX64RegisterFpMmx3,
109 WHvX64RegisterFpMmx4,
110 WHvX64RegisterFpMmx5,
111 WHvX64RegisterFpMmx6,
112 WHvX64RegisterFpMmx7,
113 WHvX64RegisterFpControlStatus,
114 WHvX64RegisterXmmControlStatus,
119 WHvX64RegisterKernelGsBase,
121 WHvX64RegisterApicBase,
122 /* WHvX64RegisterPat, */
123 WHvX64RegisterSysenterCs,
124 WHvX64RegisterSysenterEip,
125 WHvX64RegisterSysenterEsp,
130 WHvX64RegisterSfmask,
133 /* Interrupt / Event Registers */
135 * WHvRegisterPendingInterruption,
136 * WHvRegisterInterruptState,
137 * WHvRegisterPendingEvent0,
138 * WHvRegisterPendingEvent1
139 * WHvX64RegisterDeliverabilityNotifications,
143 struct whpx_register_set {
144 WHV_REGISTER_VALUE values[RTL_NUMBER_OF(whpx_register_names)];
148 WHV_EMULATOR_HANDLE emulator;
149 bool window_registered;
153 bool interruption_pending;
155 /* Must be the last field as it may have a tail */
156 WHV_RUN_VP_EXIT_CONTEXT exit_ctx;
159 static bool whpx_allowed;
160 static bool whp_dispatch_initialized;
161 static HMODULE hWinHvPlatform, hWinHvEmulation;
163 struct whpx_state whpx_global;
164 struct WHPDispatch whp_dispatch;
171 static struct whpx_vcpu *get_whpx_vcpu(CPUState *cpu)
173 return (struct whpx_vcpu *)cpu->hax_vcpu;
176 static WHV_X64_SEGMENT_REGISTER whpx_seg_q2h(const SegmentCache *qs, int v86,
179 WHV_X64_SEGMENT_REGISTER hs;
180 unsigned flags = qs->flags;
183 hs.Limit = qs->limit;
184 hs.Selector = qs->selector;
190 hs.DescriptorPrivilegeLevel = 3;
191 hs.NonSystemSegment = 1;
194 hs.Attributes = (flags >> DESC_TYPE_SHIFT);
197 /* hs.Base &= 0xfffff; */
204 static SegmentCache whpx_seg_h2q(const WHV_X64_SEGMENT_REGISTER *hs)
209 qs.limit = hs->Limit;
210 qs.selector = hs->Selector;
212 qs.flags = ((uint32_t)hs->Attributes) << DESC_TYPE_SHIFT;
217 static int whpx_set_tsc(CPUState *cpu)
219 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
220 WHV_REGISTER_NAME tsc_reg = WHvX64RegisterTsc;
221 WHV_REGISTER_VALUE tsc_val;
223 struct whpx_state *whpx = &whpx_global;
226 * Suspend the partition prior to setting the TSC to reduce the variance
227 * in TSC across vCPUs. When the first vCPU runs post suspend, the
228 * partition is automatically resumed.
230 if (whp_dispatch.WHvSuspendPartitionTime) {
233 * Unable to suspend partition while setting TSC is not a fatal
234 * error. It just increases the likelihood of TSC variance between
235 * vCPUs and some guest OS are able to handle that just fine.
237 hr = whp_dispatch.WHvSuspendPartitionTime(whpx->partition);
239 warn_report("WHPX: Failed to suspend partition, hr=%08lx", hr);
243 tsc_val.Reg64 = env->tsc;
244 hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
245 whpx->partition, cpu->cpu_index, &tsc_reg, 1, &tsc_val);
247 error_report("WHPX: Failed to set TSC, hr=%08lx", hr);
254 static void whpx_set_registers(CPUState *cpu, int level)
256 struct whpx_state *whpx = &whpx_global;
257 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
258 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
259 X86CPU *x86_cpu = X86_CPU(cpu);
260 struct whpx_register_set vcxt;
267 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
270 * Following MSRs have side effects on the guest or are too heavy for
271 * runtime. Limit them to full state update.
273 if (level >= WHPX_SET_RESET_STATE) {
277 memset(&vcxt, 0, sizeof(struct whpx_register_set));
279 v86 = (env->eflags & VM_MASK);
280 r86 = !(env->cr[0] & CR0_PE_MASK);
282 vcpu->tpr = cpu_get_apic_tpr(x86_cpu->apic_state);
283 vcpu->apic_base = cpu_get_apic_base(x86_cpu->apic_state);
287 /* Indexes for first 16 registers match between HV and QEMU definitions */
289 for (idx = 0; idx < CPU_NB_REGS; idx += 1) {
290 vcxt.values[idx].Reg64 = (uint64_t)env->regs[idx];
294 /* Same goes for RIP and RFLAGS */
295 assert(whpx_register_names[idx] == WHvX64RegisterRip);
296 vcxt.values[idx++].Reg64 = env->eip;
298 assert(whpx_register_names[idx] == WHvX64RegisterRflags);
299 vcxt.values[idx++].Reg64 = env->eflags;
301 /* Translate 6+4 segment registers. HV and QEMU order matches */
302 assert(idx == WHvX64RegisterEs);
303 for (i = 0; i < 6; i += 1, idx += 1) {
304 vcxt.values[idx].Segment = whpx_seg_q2h(&env->segs[i], v86, r86);
307 assert(idx == WHvX64RegisterLdtr);
308 vcxt.values[idx++].Segment = whpx_seg_q2h(&env->ldt, 0, 0);
310 assert(idx == WHvX64RegisterTr);
311 vcxt.values[idx++].Segment = whpx_seg_q2h(&env->tr, 0, 0);
313 assert(idx == WHvX64RegisterIdtr);
314 vcxt.values[idx].Table.Base = env->idt.base;
315 vcxt.values[idx].Table.Limit = env->idt.limit;
318 assert(idx == WHvX64RegisterGdtr);
319 vcxt.values[idx].Table.Base = env->gdt.base;
320 vcxt.values[idx].Table.Limit = env->gdt.limit;
323 /* CR0, 2, 3, 4, 8 */
324 assert(whpx_register_names[idx] == WHvX64RegisterCr0);
325 vcxt.values[idx++].Reg64 = env->cr[0];
326 assert(whpx_register_names[idx] == WHvX64RegisterCr2);
327 vcxt.values[idx++].Reg64 = env->cr[2];
328 assert(whpx_register_names[idx] == WHvX64RegisterCr3);
329 vcxt.values[idx++].Reg64 = env->cr[3];
330 assert(whpx_register_names[idx] == WHvX64RegisterCr4);
331 vcxt.values[idx++].Reg64 = env->cr[4];
332 assert(whpx_register_names[idx] == WHvX64RegisterCr8);
333 vcxt.values[idx++].Reg64 = vcpu->tpr;
335 /* 8 Debug Registers - Skipped */
337 /* 16 XMM registers */
338 assert(whpx_register_names[idx] == WHvX64RegisterXmm0);
340 for (i = 0; i < sizeof(env->xmm_regs) / sizeof(ZMMReg); i += 1, idx += 1) {
341 vcxt.values[idx].Reg128.Low64 = env->xmm_regs[i].ZMM_Q(0);
342 vcxt.values[idx].Reg128.High64 = env->xmm_regs[i].ZMM_Q(1);
347 assert(whpx_register_names[idx] == WHvX64RegisterFpMmx0);
348 for (i = 0; i < 8; i += 1, idx += 1) {
349 vcxt.values[idx].Fp.AsUINT128.Low64 = env->fpregs[i].mmx.MMX_Q(0);
350 /* vcxt.values[idx].Fp.AsUINT128.High64 =
351 env->fpregs[i].mmx.MMX_Q(1);
355 /* FP control status register */
356 assert(whpx_register_names[idx] == WHvX64RegisterFpControlStatus);
357 vcxt.values[idx].FpControlStatus.FpControl = env->fpuc;
358 vcxt.values[idx].FpControlStatus.FpStatus =
359 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
360 vcxt.values[idx].FpControlStatus.FpTag = 0;
361 for (i = 0; i < 8; ++i) {
362 vcxt.values[idx].FpControlStatus.FpTag |= (!env->fptags[i]) << i;
364 vcxt.values[idx].FpControlStatus.Reserved = 0;
365 vcxt.values[idx].FpControlStatus.LastFpOp = env->fpop;
366 vcxt.values[idx].FpControlStatus.LastFpRip = env->fpip;
369 /* XMM control status register */
370 assert(whpx_register_names[idx] == WHvX64RegisterXmmControlStatus);
371 vcxt.values[idx].XmmControlStatus.LastFpRdp = 0;
372 vcxt.values[idx].XmmControlStatus.XmmStatusControl = env->mxcsr;
373 vcxt.values[idx].XmmControlStatus.XmmStatusControlMask = 0x0000ffff;
377 assert(whpx_register_names[idx] == WHvX64RegisterEfer);
378 vcxt.values[idx++].Reg64 = env->efer;
380 assert(whpx_register_names[idx] == WHvX64RegisterKernelGsBase);
381 vcxt.values[idx++].Reg64 = env->kernelgsbase;
384 assert(whpx_register_names[idx] == WHvX64RegisterApicBase);
385 vcxt.values[idx++].Reg64 = vcpu->apic_base;
387 /* WHvX64RegisterPat - Skipped */
389 assert(whpx_register_names[idx] == WHvX64RegisterSysenterCs);
390 vcxt.values[idx++].Reg64 = env->sysenter_cs;
391 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEip);
392 vcxt.values[idx++].Reg64 = env->sysenter_eip;
393 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEsp);
394 vcxt.values[idx++].Reg64 = env->sysenter_esp;
395 assert(whpx_register_names[idx] == WHvX64RegisterStar);
396 vcxt.values[idx++].Reg64 = env->star;
398 assert(whpx_register_names[idx] == WHvX64RegisterLstar);
399 vcxt.values[idx++].Reg64 = env->lstar;
400 assert(whpx_register_names[idx] == WHvX64RegisterCstar);
401 vcxt.values[idx++].Reg64 = env->cstar;
402 assert(whpx_register_names[idx] == WHvX64RegisterSfmask);
403 vcxt.values[idx++].Reg64 = env->fmask;
406 /* Interrupt / Event Registers - Skipped */
408 assert(idx == RTL_NUMBER_OF(whpx_register_names));
410 hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
411 whpx->partition, cpu->cpu_index,
413 RTL_NUMBER_OF(whpx_register_names),
417 error_report("WHPX: Failed to set virtual processor context, hr=%08lx",
424 static int whpx_get_tsc(CPUState *cpu)
426 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
427 WHV_REGISTER_NAME tsc_reg = WHvX64RegisterTsc;
428 WHV_REGISTER_VALUE tsc_val;
430 struct whpx_state *whpx = &whpx_global;
432 hr = whp_dispatch.WHvGetVirtualProcessorRegisters(
433 whpx->partition, cpu->cpu_index, &tsc_reg, 1, &tsc_val);
435 error_report("WHPX: Failed to get TSC, hr=%08lx", hr);
439 env->tsc = tsc_val.Reg64;
443 static void whpx_get_registers(CPUState *cpu)
445 struct whpx_state *whpx = &whpx_global;
446 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
447 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
448 X86CPU *x86_cpu = X86_CPU(cpu);
449 struct whpx_register_set vcxt;
450 uint64_t tpr, apic_base;
456 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
458 if (!env->tsc_valid) {
460 env->tsc_valid = !runstate_is_running();
463 hr = whp_dispatch.WHvGetVirtualProcessorRegisters(
464 whpx->partition, cpu->cpu_index,
466 RTL_NUMBER_OF(whpx_register_names),
469 error_report("WHPX: Failed to get virtual processor context, hr=%08lx",
475 /* Indexes for first 16 registers match between HV and QEMU definitions */
477 for (idx = 0; idx < CPU_NB_REGS; idx += 1) {
478 env->regs[idx] = vcxt.values[idx].Reg64;
482 /* Same goes for RIP and RFLAGS */
483 assert(whpx_register_names[idx] == WHvX64RegisterRip);
484 env->eip = vcxt.values[idx++].Reg64;
485 assert(whpx_register_names[idx] == WHvX64RegisterRflags);
486 env->eflags = vcxt.values[idx++].Reg64;
488 /* Translate 6+4 segment registers. HV and QEMU order matches */
489 assert(idx == WHvX64RegisterEs);
490 for (i = 0; i < 6; i += 1, idx += 1) {
491 env->segs[i] = whpx_seg_h2q(&vcxt.values[idx].Segment);
494 assert(idx == WHvX64RegisterLdtr);
495 env->ldt = whpx_seg_h2q(&vcxt.values[idx++].Segment);
496 assert(idx == WHvX64RegisterTr);
497 env->tr = whpx_seg_h2q(&vcxt.values[idx++].Segment);
498 assert(idx == WHvX64RegisterIdtr);
499 env->idt.base = vcxt.values[idx].Table.Base;
500 env->idt.limit = vcxt.values[idx].Table.Limit;
502 assert(idx == WHvX64RegisterGdtr);
503 env->gdt.base = vcxt.values[idx].Table.Base;
504 env->gdt.limit = vcxt.values[idx].Table.Limit;
507 /* CR0, 2, 3, 4, 8 */
508 assert(whpx_register_names[idx] == WHvX64RegisterCr0);
509 env->cr[0] = vcxt.values[idx++].Reg64;
510 assert(whpx_register_names[idx] == WHvX64RegisterCr2);
511 env->cr[2] = vcxt.values[idx++].Reg64;
512 assert(whpx_register_names[idx] == WHvX64RegisterCr3);
513 env->cr[3] = vcxt.values[idx++].Reg64;
514 assert(whpx_register_names[idx] == WHvX64RegisterCr4);
515 env->cr[4] = vcxt.values[idx++].Reg64;
516 assert(whpx_register_names[idx] == WHvX64RegisterCr8);
517 tpr = vcxt.values[idx++].Reg64;
518 if (tpr != vcpu->tpr) {
520 cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
523 /* 8 Debug Registers - Skipped */
525 /* 16 XMM registers */
526 assert(whpx_register_names[idx] == WHvX64RegisterXmm0);
528 for (i = 0; i < sizeof(env->xmm_regs) / sizeof(ZMMReg); i += 1, idx += 1) {
529 env->xmm_regs[i].ZMM_Q(0) = vcxt.values[idx].Reg128.Low64;
530 env->xmm_regs[i].ZMM_Q(1) = vcxt.values[idx].Reg128.High64;
535 assert(whpx_register_names[idx] == WHvX64RegisterFpMmx0);
536 for (i = 0; i < 8; i += 1, idx += 1) {
537 env->fpregs[i].mmx.MMX_Q(0) = vcxt.values[idx].Fp.AsUINT128.Low64;
538 /* env->fpregs[i].mmx.MMX_Q(1) =
539 vcxt.values[idx].Fp.AsUINT128.High64;
543 /* FP control status register */
544 assert(whpx_register_names[idx] == WHvX64RegisterFpControlStatus);
545 env->fpuc = vcxt.values[idx].FpControlStatus.FpControl;
546 env->fpstt = (vcxt.values[idx].FpControlStatus.FpStatus >> 11) & 0x7;
547 env->fpus = vcxt.values[idx].FpControlStatus.FpStatus & ~0x3800;
548 for (i = 0; i < 8; ++i) {
549 env->fptags[i] = !((vcxt.values[idx].FpControlStatus.FpTag >> i) & 1);
551 env->fpop = vcxt.values[idx].FpControlStatus.LastFpOp;
552 env->fpip = vcxt.values[idx].FpControlStatus.LastFpRip;
555 /* XMM control status register */
556 assert(whpx_register_names[idx] == WHvX64RegisterXmmControlStatus);
557 env->mxcsr = vcxt.values[idx].XmmControlStatus.XmmStatusControl;
561 assert(whpx_register_names[idx] == WHvX64RegisterEfer);
562 env->efer = vcxt.values[idx++].Reg64;
564 assert(whpx_register_names[idx] == WHvX64RegisterKernelGsBase);
565 env->kernelgsbase = vcxt.values[idx++].Reg64;
568 assert(whpx_register_names[idx] == WHvX64RegisterApicBase);
569 apic_base = vcxt.values[idx++].Reg64;
570 if (apic_base != vcpu->apic_base) {
571 vcpu->apic_base = apic_base;
572 cpu_set_apic_base(x86_cpu->apic_state, vcpu->apic_base);
575 /* WHvX64RegisterPat - Skipped */
577 assert(whpx_register_names[idx] == WHvX64RegisterSysenterCs);
578 env->sysenter_cs = vcxt.values[idx++].Reg64;
579 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEip);
580 env->sysenter_eip = vcxt.values[idx++].Reg64;
581 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEsp);
582 env->sysenter_esp = vcxt.values[idx++].Reg64;
583 assert(whpx_register_names[idx] == WHvX64RegisterStar);
584 env->star = vcxt.values[idx++].Reg64;
586 assert(whpx_register_names[idx] == WHvX64RegisterLstar);
587 env->lstar = vcxt.values[idx++].Reg64;
588 assert(whpx_register_names[idx] == WHvX64RegisterCstar);
589 env->cstar = vcxt.values[idx++].Reg64;
590 assert(whpx_register_names[idx] == WHvX64RegisterSfmask);
591 env->fmask = vcxt.values[idx++].Reg64;
594 /* Interrupt / Event Registers - Skipped */
596 assert(idx == RTL_NUMBER_OF(whpx_register_names));
601 static HRESULT CALLBACK whpx_emu_ioport_callback(
603 WHV_EMULATOR_IO_ACCESS_INFO *IoAccess)
605 MemTxAttrs attrs = { 0 };
606 address_space_rw(&address_space_io, IoAccess->Port, attrs,
607 &IoAccess->Data, IoAccess->AccessSize,
608 IoAccess->Direction);
612 static HRESULT CALLBACK whpx_emu_mmio_callback(
614 WHV_EMULATOR_MEMORY_ACCESS_INFO *ma)
616 cpu_physical_memory_rw(ma->GpaAddress, ma->Data, ma->AccessSize,
621 static HRESULT CALLBACK whpx_emu_getreg_callback(
623 const WHV_REGISTER_NAME *RegisterNames,
624 UINT32 RegisterCount,
625 WHV_REGISTER_VALUE *RegisterValues)
628 struct whpx_state *whpx = &whpx_global;
629 CPUState *cpu = (CPUState *)ctx;
631 hr = whp_dispatch.WHvGetVirtualProcessorRegisters(
632 whpx->partition, cpu->cpu_index,
633 RegisterNames, RegisterCount,
636 error_report("WHPX: Failed to get virtual processor registers,"
643 static HRESULT CALLBACK whpx_emu_setreg_callback(
645 const WHV_REGISTER_NAME *RegisterNames,
646 UINT32 RegisterCount,
647 const WHV_REGISTER_VALUE *RegisterValues)
650 struct whpx_state *whpx = &whpx_global;
651 CPUState *cpu = (CPUState *)ctx;
653 hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
654 whpx->partition, cpu->cpu_index,
655 RegisterNames, RegisterCount,
658 error_report("WHPX: Failed to set virtual processor registers,"
663 * The emulator just successfully wrote the register state. We clear the
664 * dirty state so we avoid the double write on resume of the VP.
666 cpu->vcpu_dirty = false;
671 static HRESULT CALLBACK whpx_emu_translate_callback(
673 WHV_GUEST_VIRTUAL_ADDRESS Gva,
674 WHV_TRANSLATE_GVA_FLAGS TranslateFlags,
675 WHV_TRANSLATE_GVA_RESULT_CODE *TranslationResult,
676 WHV_GUEST_PHYSICAL_ADDRESS *Gpa)
679 struct whpx_state *whpx = &whpx_global;
680 CPUState *cpu = (CPUState *)ctx;
681 WHV_TRANSLATE_GVA_RESULT res;
683 hr = whp_dispatch.WHvTranslateGva(whpx->partition, cpu->cpu_index,
684 Gva, TranslateFlags, &res, Gpa);
686 error_report("WHPX: Failed to translate GVA, hr=%08lx", hr);
688 *TranslationResult = res.ResultCode;
694 static const WHV_EMULATOR_CALLBACKS whpx_emu_callbacks = {
695 .Size = sizeof(WHV_EMULATOR_CALLBACKS),
696 .WHvEmulatorIoPortCallback = whpx_emu_ioport_callback,
697 .WHvEmulatorMemoryCallback = whpx_emu_mmio_callback,
698 .WHvEmulatorGetVirtualProcessorRegisters = whpx_emu_getreg_callback,
699 .WHvEmulatorSetVirtualProcessorRegisters = whpx_emu_setreg_callback,
700 .WHvEmulatorTranslateGvaPage = whpx_emu_translate_callback,
703 static int whpx_handle_mmio(CPUState *cpu, WHV_MEMORY_ACCESS_CONTEXT *ctx)
706 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
707 WHV_EMULATOR_STATUS emu_status;
709 hr = whp_dispatch.WHvEmulatorTryMmioEmulation(
711 &vcpu->exit_ctx.VpContext, ctx,
714 error_report("WHPX: Failed to parse MMIO access, hr=%08lx", hr);
718 if (!emu_status.EmulationSuccessful) {
719 error_report("WHPX: Failed to emulate MMIO access with"
720 " EmulatorReturnStatus: %u", emu_status.AsUINT32);
727 static int whpx_handle_portio(CPUState *cpu,
728 WHV_X64_IO_PORT_ACCESS_CONTEXT *ctx)
731 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
732 WHV_EMULATOR_STATUS emu_status;
734 hr = whp_dispatch.WHvEmulatorTryIoEmulation(
736 &vcpu->exit_ctx.VpContext, ctx,
739 error_report("WHPX: Failed to parse PortIO access, hr=%08lx", hr);
743 if (!emu_status.EmulationSuccessful) {
744 error_report("WHPX: Failed to emulate PortIO access with"
745 " EmulatorReturnStatus: %u", emu_status.AsUINT32);
752 static int whpx_handle_halt(CPUState *cpu)
754 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
757 qemu_mutex_lock_iothread();
758 if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
759 (env->eflags & IF_MASK)) &&
760 !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
761 cpu->exception_index = EXCP_HLT;
765 qemu_mutex_unlock_iothread();
770 static void whpx_vcpu_pre_run(CPUState *cpu)
773 struct whpx_state *whpx = &whpx_global;
774 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
775 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
776 X86CPU *x86_cpu = X86_CPU(cpu);
779 WHV_X64_PENDING_INTERRUPTION_REGISTER new_int;
780 UINT32 reg_count = 0;
781 WHV_REGISTER_VALUE reg_values[3];
782 WHV_REGISTER_NAME reg_names[3];
784 memset(&new_int, 0, sizeof(new_int));
785 memset(reg_values, 0, sizeof(reg_values));
787 qemu_mutex_lock_iothread();
790 if (!vcpu->interruption_pending &&
791 cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
792 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
793 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
794 vcpu->interruptable = false;
795 new_int.InterruptionType = WHvX64PendingNmi;
796 new_int.InterruptionPending = 1;
797 new_int.InterruptionVector = 2;
799 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
800 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
805 * Force the VCPU out of its inner loop to process any INIT requests or
806 * commit pending TPR access.
808 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
809 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
810 !(env->hflags & HF_SMM_MASK)) {
811 cpu->exit_request = 1;
813 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
814 cpu->exit_request = 1;
818 /* Get pending hard interruption or replay one that was overwritten */
819 if (!vcpu->interruption_pending &&
820 vcpu->interruptable && (env->eflags & IF_MASK)) {
821 assert(!new_int.InterruptionPending);
822 if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
823 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
824 irq = cpu_get_pic_interrupt(env);
826 new_int.InterruptionType = WHvX64PendingInterrupt;
827 new_int.InterruptionPending = 1;
828 new_int.InterruptionVector = irq;
833 /* Setup interrupt state if new one was prepared */
834 if (new_int.InterruptionPending) {
835 reg_values[reg_count].PendingInterruption = new_int;
836 reg_names[reg_count] = WHvRegisterPendingInterruption;
840 /* Sync the TPR to the CR8 if was modified during the intercept */
841 tpr = cpu_get_apic_tpr(x86_cpu->apic_state);
842 if (tpr != vcpu->tpr) {
844 reg_values[reg_count].Reg64 = tpr;
845 cpu->exit_request = 1;
846 reg_names[reg_count] = WHvX64RegisterCr8;
850 /* Update the state of the interrupt delivery notification */
851 if (!vcpu->window_registered &&
852 cpu->interrupt_request & CPU_INTERRUPT_HARD) {
853 reg_values[reg_count].DeliverabilityNotifications.InterruptNotification
855 vcpu->window_registered = 1;
856 reg_names[reg_count] = WHvX64RegisterDeliverabilityNotifications;
860 qemu_mutex_unlock_iothread();
863 hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
864 whpx->partition, cpu->cpu_index,
865 reg_names, reg_count, reg_values);
867 error_report("WHPX: Failed to set interrupt state registers,"
875 static void whpx_vcpu_post_run(CPUState *cpu)
877 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
878 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
879 X86CPU *x86_cpu = X86_CPU(cpu);
881 env->eflags = vcpu->exit_ctx.VpContext.Rflags;
883 uint64_t tpr = vcpu->exit_ctx.VpContext.Cr8;
884 if (vcpu->tpr != tpr) {
886 qemu_mutex_lock_iothread();
887 cpu_set_apic_tpr(x86_cpu->apic_state, vcpu->tpr);
888 qemu_mutex_unlock_iothread();
891 vcpu->interruption_pending =
892 vcpu->exit_ctx.VpContext.ExecutionState.InterruptionPending;
894 vcpu->interruptable =
895 !vcpu->exit_ctx.VpContext.ExecutionState.InterruptShadow;
900 static void whpx_vcpu_process_async_events(CPUState *cpu)
902 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
903 X86CPU *x86_cpu = X86_CPU(cpu);
904 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
906 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
907 !(env->hflags & HF_SMM_MASK)) {
908 whpx_cpu_synchronize_state(cpu);
909 do_cpu_init(x86_cpu);
910 vcpu->interruptable = true;
913 if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
914 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
915 apic_poll_irq(x86_cpu->apic_state);
918 if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
919 (env->eflags & IF_MASK)) ||
920 (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
924 if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
925 whpx_cpu_synchronize_state(cpu);
926 do_cpu_sipi(x86_cpu);
929 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
930 cpu->interrupt_request &= ~CPU_INTERRUPT_TPR;
931 whpx_cpu_synchronize_state(cpu);
932 apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip,
933 env->tpr_access_type);
939 static int whpx_vcpu_run(CPUState *cpu)
942 struct whpx_state *whpx = &whpx_global;
943 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
946 whpx_vcpu_process_async_events(cpu);
948 cpu->exception_index = EXCP_HLT;
949 atomic_set(&cpu->exit_request, false);
953 qemu_mutex_unlock_iothread();
957 if (cpu->vcpu_dirty) {
958 whpx_set_registers(cpu, WHPX_SET_RUNTIME_STATE);
959 cpu->vcpu_dirty = false;
962 whpx_vcpu_pre_run(cpu);
964 if (atomic_read(&cpu->exit_request)) {
968 hr = whp_dispatch.WHvRunVirtualProcessor(
969 whpx->partition, cpu->cpu_index,
970 &vcpu->exit_ctx, sizeof(vcpu->exit_ctx));
973 error_report("WHPX: Failed to exec a virtual processor,"
979 whpx_vcpu_post_run(cpu);
981 switch (vcpu->exit_ctx.ExitReason) {
982 case WHvRunVpExitReasonMemoryAccess:
983 ret = whpx_handle_mmio(cpu, &vcpu->exit_ctx.MemoryAccess);
986 case WHvRunVpExitReasonX64IoPortAccess:
987 ret = whpx_handle_portio(cpu, &vcpu->exit_ctx.IoPortAccess);
990 case WHvRunVpExitReasonX64InterruptWindow:
991 vcpu->window_registered = 0;
995 case WHvRunVpExitReasonX64Halt:
996 ret = whpx_handle_halt(cpu);
999 case WHvRunVpExitReasonCanceled:
1000 cpu->exception_index = EXCP_INTERRUPT;
1004 case WHvRunVpExitReasonX64MsrAccess: {
1005 WHV_REGISTER_VALUE reg_values[3] = {0};
1006 WHV_REGISTER_NAME reg_names[3];
1009 reg_names[0] = WHvX64RegisterRip;
1010 reg_names[1] = WHvX64RegisterRax;
1011 reg_names[2] = WHvX64RegisterRdx;
1013 reg_values[0].Reg64 =
1014 vcpu->exit_ctx.VpContext.Rip +
1015 vcpu->exit_ctx.VpContext.InstructionLength;
1018 * For all unsupported MSR access we:
1022 reg_count = vcpu->exit_ctx.MsrAccess.AccessInfo.IsWrite ?
1025 hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
1028 reg_names, reg_count,
1032 error_report("WHPX: Failed to set MsrAccess state "
1033 " registers, hr=%08lx", hr);
1038 case WHvRunVpExitReasonX64Cpuid: {
1039 WHV_REGISTER_VALUE reg_values[5];
1040 WHV_REGISTER_NAME reg_names[5];
1041 UINT32 reg_count = 5;
1042 UINT64 cpuid_fn, rip = 0, rax = 0, rcx = 0, rdx = 0, rbx = 0;
1043 X86CPU *x86_cpu = X86_CPU(cpu);
1044 CPUX86State *env = &x86_cpu->env;
1046 memset(reg_values, 0, sizeof(reg_values));
1048 rip = vcpu->exit_ctx.VpContext.Rip +
1049 vcpu->exit_ctx.VpContext.InstructionLength;
1050 cpuid_fn = vcpu->exit_ctx.CpuidAccess.Rax;
1053 * Ideally, these should be supplied to the hypervisor during VCPU
1054 * initialization and it should be able to satisfy this request.
1055 * But, currently, WHPX doesn't support setting CPUID values in the
1056 * hypervisor once the partition has been setup, which is too late
1057 * since VCPUs are realized later. For now, use the values from
1058 * QEMU to satisfy these requests, until WHPX adds support for
1059 * being able to set these values in the hypervisor at runtime.
1061 cpu_x86_cpuid(env, cpuid_fn, 0, (UINT32 *)&rax, (UINT32 *)&rbx,
1062 (UINT32 *)&rcx, (UINT32 *)&rdx);
1065 /* Remove any support of OSVW */
1066 rcx &= ~CPUID_EXT3_OSVW;
1070 reg_names[0] = WHvX64RegisterRip;
1071 reg_names[1] = WHvX64RegisterRax;
1072 reg_names[2] = WHvX64RegisterRcx;
1073 reg_names[3] = WHvX64RegisterRdx;
1074 reg_names[4] = WHvX64RegisterRbx;
1076 reg_values[0].Reg64 = rip;
1077 reg_values[1].Reg64 = rax;
1078 reg_values[2].Reg64 = rcx;
1079 reg_values[3].Reg64 = rdx;
1080 reg_values[4].Reg64 = rbx;
1082 hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
1083 whpx->partition, cpu->cpu_index,
1089 error_report("WHPX: Failed to set CpuidAccess state registers,"
1095 case WHvRunVpExitReasonNone:
1096 case WHvRunVpExitReasonUnrecoverableException:
1097 case WHvRunVpExitReasonInvalidVpRegisterValue:
1098 case WHvRunVpExitReasonUnsupportedFeature:
1099 case WHvRunVpExitReasonException:
1101 error_report("WHPX: Unexpected VP exit code %d",
1102 vcpu->exit_ctx.ExitReason);
1103 whpx_get_registers(cpu);
1104 qemu_mutex_lock_iothread();
1105 qemu_system_guest_panicked(cpu_get_crash_info(cpu));
1106 qemu_mutex_unlock_iothread();
1113 qemu_mutex_lock_iothread();
1116 atomic_set(&cpu->exit_request, false);
1121 static void do_whpx_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
1123 if (!cpu->vcpu_dirty) {
1124 whpx_get_registers(cpu);
1125 cpu->vcpu_dirty = true;
1129 static void do_whpx_cpu_synchronize_post_reset(CPUState *cpu,
1130 run_on_cpu_data arg)
1132 whpx_set_registers(cpu, WHPX_SET_RESET_STATE);
1133 cpu->vcpu_dirty = false;
1136 static void do_whpx_cpu_synchronize_post_init(CPUState *cpu,
1137 run_on_cpu_data arg)
1139 whpx_set_registers(cpu, WHPX_SET_FULL_STATE);
1140 cpu->vcpu_dirty = false;
1143 static void do_whpx_cpu_synchronize_pre_loadvm(CPUState *cpu,
1144 run_on_cpu_data arg)
1146 cpu->vcpu_dirty = true;
1153 void whpx_cpu_synchronize_state(CPUState *cpu)
1155 if (!cpu->vcpu_dirty) {
1156 run_on_cpu(cpu, do_whpx_cpu_synchronize_state, RUN_ON_CPU_NULL);
1160 void whpx_cpu_synchronize_post_reset(CPUState *cpu)
1162 run_on_cpu(cpu, do_whpx_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
1165 void whpx_cpu_synchronize_post_init(CPUState *cpu)
1167 run_on_cpu(cpu, do_whpx_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
1170 void whpx_cpu_synchronize_pre_loadvm(CPUState *cpu)
1172 run_on_cpu(cpu, do_whpx_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
1179 static Error *whpx_migration_blocker;
1181 static void whpx_cpu_update_state(void *opaque, int running, RunState state)
1183 CPUX86State *env = opaque;
1186 env->tsc_valid = false;
1190 int whpx_init_vcpu(CPUState *cpu)
1193 struct whpx_state *whpx = &whpx_global;
1194 struct whpx_vcpu *vcpu;
1195 Error *local_error = NULL;
1197 /* Add migration blockers for all unsupported features of the
1198 * Windows Hypervisor Platform
1200 if (whpx_migration_blocker == NULL) {
1201 error_setg(&whpx_migration_blocker,
1202 "State blocked due to non-migratable CPUID feature support,"
1203 "dirty memory tracking support, and XSAVE/XRSTOR support");
1205 (void)migrate_add_blocker(whpx_migration_blocker, &local_error);
1207 error_report_err(local_error);
1208 migrate_del_blocker(whpx_migration_blocker);
1209 error_free(whpx_migration_blocker);
1214 vcpu = g_malloc0(sizeof(struct whpx_vcpu));
1217 error_report("WHPX: Failed to allocte VCPU context.");
1221 hr = whp_dispatch.WHvEmulatorCreateEmulator(
1222 &whpx_emu_callbacks,
1225 error_report("WHPX: Failed to setup instruction completion support,"
1231 hr = whp_dispatch.WHvCreateVirtualProcessor(
1232 whpx->partition, cpu->cpu_index, 0);
1234 error_report("WHPX: Failed to create a virtual processor,"
1236 whp_dispatch.WHvEmulatorDestroyEmulator(vcpu->emulator);
1241 vcpu->interruptable = true;
1243 cpu->vcpu_dirty = true;
1244 cpu->hax_vcpu = (struct hax_vcpu_state *)vcpu;
1245 qemu_add_vm_change_state_handler(whpx_cpu_update_state, cpu->env_ptr);
1250 int whpx_vcpu_exec(CPUState *cpu)
1256 if (cpu->exception_index >= EXCP_INTERRUPT) {
1257 ret = cpu->exception_index;
1258 cpu->exception_index = -1;
1262 fatal = whpx_vcpu_run(cpu);
1265 error_report("WHPX: Failed to exec a virtual processor");
1273 void whpx_destroy_vcpu(CPUState *cpu)
1275 struct whpx_state *whpx = &whpx_global;
1276 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
1278 whp_dispatch.WHvDeleteVirtualProcessor(whpx->partition, cpu->cpu_index);
1279 whp_dispatch.WHvEmulatorDestroyEmulator(vcpu->emulator);
1280 g_free(cpu->hax_vcpu);
1284 void whpx_vcpu_kick(CPUState *cpu)
1286 struct whpx_state *whpx = &whpx_global;
1287 whp_dispatch.WHvCancelRunVirtualProcessor(
1288 whpx->partition, cpu->cpu_index, 0);
1295 static void whpx_update_mapping(hwaddr start_pa, ram_addr_t size,
1296 void *host_va, int add, int rom,
1299 struct whpx_state *whpx = &whpx_global;
1304 printf("WHPX: ADD PA:%p Size:%p, Host:%p, %s, '%s'\n",
1305 (void*)start_pa, (void*)size, host_va,
1306 (rom ? "ROM" : "RAM"), name);
1308 printf("WHPX: DEL PA:%p Size:%p, Host:%p, '%s'\n",
1309 (void*)start_pa, (void*)size, host_va, name);
1314 hr = whp_dispatch.WHvMapGpaRange(whpx->partition,
1318 (WHvMapGpaRangeFlagRead |
1319 WHvMapGpaRangeFlagExecute |
1320 (rom ? 0 : WHvMapGpaRangeFlagWrite)));
1322 hr = whp_dispatch.WHvUnmapGpaRange(whpx->partition,
1328 error_report("WHPX: Failed to %s GPA range '%s' PA:%p, Size:%p bytes,"
1329 " Host:%p, hr=%08lx",
1330 (add ? "MAP" : "UNMAP"), name,
1331 (void *)(uintptr_t)start_pa, (void *)size, host_va, hr);
1335 static void whpx_process_section(MemoryRegionSection *section, int add)
1337 MemoryRegion *mr = section->mr;
1338 hwaddr start_pa = section->offset_within_address_space;
1339 ram_addr_t size = int128_get64(section->size);
1343 if (!memory_region_is_ram(mr)) {
1347 delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask);
1348 delta &= ~qemu_real_host_page_mask;
1354 size &= qemu_real_host_page_mask;
1355 if (!size || (start_pa & ~qemu_real_host_page_mask)) {
1359 host_va = (uintptr_t)memory_region_get_ram_ptr(mr)
1360 + section->offset_within_region + delta;
1362 whpx_update_mapping(start_pa, size, (void *)(uintptr_t)host_va, add,
1363 memory_region_is_rom(mr), mr->name);
1366 static void whpx_region_add(MemoryListener *listener,
1367 MemoryRegionSection *section)
1369 memory_region_ref(section->mr);
1370 whpx_process_section(section, 1);
1373 static void whpx_region_del(MemoryListener *listener,
1374 MemoryRegionSection *section)
1376 whpx_process_section(section, 0);
1377 memory_region_unref(section->mr);
1380 static void whpx_transaction_begin(MemoryListener *listener)
1384 static void whpx_transaction_commit(MemoryListener *listener)
1388 static void whpx_log_sync(MemoryListener *listener,
1389 MemoryRegionSection *section)
1391 MemoryRegion *mr = section->mr;
1393 if (!memory_region_is_ram(mr)) {
1397 memory_region_set_dirty(mr, 0, int128_get64(section->size));
1400 static MemoryListener whpx_memory_listener = {
1401 .begin = whpx_transaction_begin,
1402 .commit = whpx_transaction_commit,
1403 .region_add = whpx_region_add,
1404 .region_del = whpx_region_del,
1405 .log_sync = whpx_log_sync,
1409 static void whpx_memory_init(void)
1411 memory_listener_register(&whpx_memory_listener, &address_space_memory);
1414 static void whpx_handle_interrupt(CPUState *cpu, int mask)
1416 cpu->interrupt_request |= mask;
1418 if (!qemu_cpu_is_self(cpu)) {
1424 * Load the functions from the given library, using the given handle. If a
1425 * handle is provided, it is used, otherwise the library is opened. The
1426 * handle will be updated on return with the opened one.
1428 static bool load_whp_dispatch_fns(HMODULE *handle,
1429 WHPFunctionList function_list)
1431 HMODULE hLib = *handle;
1433 #define WINHV_PLATFORM_DLL "WinHvPlatform.dll"
1434 #define WINHV_EMULATION_DLL "WinHvEmulation.dll"
1435 #define WHP_LOAD_FIELD_OPTIONAL(return_type, function_name, signature) \
1436 whp_dispatch.function_name = \
1437 (function_name ## _t)GetProcAddress(hLib, #function_name); \
1439 #define WHP_LOAD_FIELD(return_type, function_name, signature) \
1440 whp_dispatch.function_name = \
1441 (function_name ## _t)GetProcAddress(hLib, #function_name); \
1442 if (!whp_dispatch.function_name) { \
1443 error_report("Could not load function %s", #function_name); \
1447 #define WHP_LOAD_LIB(lib_name, handle_lib) \
1448 if (!handle_lib) { \
1449 handle_lib = LoadLibrary(lib_name); \
1450 if (!handle_lib) { \
1451 error_report("Could not load library %s.", lib_name); \
1456 switch (function_list) {
1457 case WINHV_PLATFORM_FNS_DEFAULT:
1458 WHP_LOAD_LIB(WINHV_PLATFORM_DLL, hLib)
1459 LIST_WINHVPLATFORM_FUNCTIONS(WHP_LOAD_FIELD)
1462 case WINHV_EMULATION_FNS_DEFAULT:
1463 WHP_LOAD_LIB(WINHV_EMULATION_DLL, hLib)
1464 LIST_WINHVEMULATION_FUNCTIONS(WHP_LOAD_FIELD)
1467 case WINHV_PLATFORM_FNS_SUPPLEMENTAL:
1468 WHP_LOAD_LIB(WINHV_PLATFORM_DLL, hLib)
1469 LIST_WINHVPLATFORM_FUNCTIONS_SUPPLEMENTAL(WHP_LOAD_FIELD_OPTIONAL)
1488 static int whpx_accel_init(MachineState *ms)
1490 struct whpx_state *whpx;
1493 WHV_CAPABILITY whpx_cap;
1494 UINT32 whpx_cap_size;
1495 WHV_PARTITION_PROPERTY prop;
1497 whpx = &whpx_global;
1499 if (!init_whp_dispatch()) {
1504 memset(whpx, 0, sizeof(struct whpx_state));
1505 whpx->mem_quota = ms->ram_size;
1507 hr = whp_dispatch.WHvGetCapability(
1508 WHvCapabilityCodeHypervisorPresent, &whpx_cap,
1509 sizeof(whpx_cap), &whpx_cap_size);
1510 if (FAILED(hr) || !whpx_cap.HypervisorPresent) {
1511 error_report("WHPX: No accelerator found, hr=%08lx", hr);
1516 hr = whp_dispatch.WHvCreatePartition(&whpx->partition);
1518 error_report("WHPX: Failed to create partition, hr=%08lx", hr);
1523 memset(&prop, 0, sizeof(WHV_PARTITION_PROPERTY));
1524 prop.ProcessorCount = ms->smp.cpus;
1525 hr = whp_dispatch.WHvSetPartitionProperty(
1527 WHvPartitionPropertyCodeProcessorCount,
1529 sizeof(WHV_PARTITION_PROPERTY));
1532 error_report("WHPX: Failed to set partition core count to %d,"
1533 " hr=%08lx", ms->smp.cores, hr);
1538 memset(&prop, 0, sizeof(WHV_PARTITION_PROPERTY));
1539 prop.ExtendedVmExits.X64MsrExit = 1;
1540 prop.ExtendedVmExits.X64CpuidExit = 1;
1541 hr = whp_dispatch.WHvSetPartitionProperty(
1543 WHvPartitionPropertyCodeExtendedVmExits,
1545 sizeof(WHV_PARTITION_PROPERTY));
1548 error_report("WHPX: Failed to enable partition extended X64MsrExit and"
1549 " X64CpuidExit hr=%08lx", hr);
1554 UINT32 cpuidExitList[] = {1, 0x80000001};
1555 hr = whp_dispatch.WHvSetPartitionProperty(
1557 WHvPartitionPropertyCodeCpuidExitList,
1559 RTL_NUMBER_OF(cpuidExitList) * sizeof(UINT32));
1562 error_report("WHPX: Failed to set partition CpuidExitList hr=%08lx",
1568 hr = whp_dispatch.WHvSetupPartition(whpx->partition);
1570 error_report("WHPX: Failed to setup partition, hr=%08lx", hr);
1577 cpu_interrupt_handler = whpx_handle_interrupt;
1579 printf("Windows Hypervisor Platform accelerator is operational\n");
1584 if (NULL != whpx->partition) {
1585 whp_dispatch.WHvDeletePartition(whpx->partition);
1586 whpx->partition = NULL;
1593 int whpx_enabled(void)
1595 return whpx_allowed;
1598 static void whpx_accel_class_init(ObjectClass *oc, void *data)
1600 AccelClass *ac = ACCEL_CLASS(oc);
1602 ac->init_machine = whpx_accel_init;
1603 ac->allowed = &whpx_allowed;
1606 static const TypeInfo whpx_accel_type = {
1607 .name = ACCEL_CLASS_NAME("whpx"),
1608 .parent = TYPE_ACCEL,
1609 .class_init = whpx_accel_class_init,
1612 static void whpx_type_init(void)
1614 type_register_static(&whpx_accel_type);
1617 bool init_whp_dispatch(void)
1619 if (whp_dispatch_initialized) {
1623 if (!load_whp_dispatch_fns(&hWinHvPlatform, WINHV_PLATFORM_FNS_DEFAULT)) {
1627 if (!load_whp_dispatch_fns(&hWinHvEmulation, WINHV_EMULATION_FNS_DEFAULT)) {
1631 assert(load_whp_dispatch_fns(&hWinHvPlatform,
1632 WINHV_PLATFORM_FNS_SUPPLEMENTAL));
1633 whp_dispatch_initialized = true;
1637 if (hWinHvPlatform) {
1638 FreeLibrary(hWinHvPlatform);
1641 if (hWinHvEmulation) {
1642 FreeLibrary(hWinHvEmulation);
1648 type_init(whpx_type_init);