2 * QEMU Windows Hypervisor Platform accelerator (WHPX)
4 * Copyright Microsoft Corp. 2017
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
13 #include "exec/address-spaces.h"
14 #include "exec/ioport.h"
15 #include "qemu-common.h"
16 #include "sysemu/accel.h"
17 #include "sysemu/whpx.h"
18 #include "sysemu/cpus.h"
19 #include "sysemu/runstate.h"
20 #include "qemu/main-loop.h"
21 #include "hw/boards.h"
22 #include "qemu/error-report.h"
23 #include "qapi/error.h"
24 #include "migration/blocker.h"
25 #include "whp-dispatch.h"
27 #include <WinHvPlatform.h>
28 #include <WinHvEmulation.h>
32 WHV_PARTITION_HANDLE partition;
35 static const WHV_REGISTER_NAME whpx_register_names[] = {
37 /* X64 General purpose registers */
57 /* X64 Segment registers */
67 /* X64 Table registers */
71 /* X64 Control Registers */
78 /* X64 Debug Registers */
88 /* X64 Floating Point and Vector Registers */
105 WHvX64RegisterFpMmx0,
106 WHvX64RegisterFpMmx1,
107 WHvX64RegisterFpMmx2,
108 WHvX64RegisterFpMmx3,
109 WHvX64RegisterFpMmx4,
110 WHvX64RegisterFpMmx5,
111 WHvX64RegisterFpMmx6,
112 WHvX64RegisterFpMmx7,
113 WHvX64RegisterFpControlStatus,
114 WHvX64RegisterXmmControlStatus,
119 WHvX64RegisterKernelGsBase,
121 WHvX64RegisterApicBase,
122 /* WHvX64RegisterPat, */
123 WHvX64RegisterSysenterCs,
124 WHvX64RegisterSysenterEip,
125 WHvX64RegisterSysenterEsp,
130 WHvX64RegisterSfmask,
133 /* Interrupt / Event Registers */
135 * WHvRegisterPendingInterruption,
136 * WHvRegisterInterruptState,
137 * WHvRegisterPendingEvent0,
138 * WHvRegisterPendingEvent1
139 * WHvX64RegisterDeliverabilityNotifications,
143 struct whpx_register_set {
144 WHV_REGISTER_VALUE values[RTL_NUMBER_OF(whpx_register_names)];
148 WHV_EMULATOR_HANDLE emulator;
149 bool window_registered;
153 bool interruption_pending;
155 /* Must be the last field as it may have a tail */
156 WHV_RUN_VP_EXIT_CONTEXT exit_ctx;
159 static bool whpx_allowed;
160 static bool whp_dispatch_initialized;
161 static HMODULE hWinHvPlatform, hWinHvEmulation;
163 struct whpx_state whpx_global;
164 struct WHPDispatch whp_dispatch;
171 static struct whpx_vcpu *get_whpx_vcpu(CPUState *cpu)
173 return (struct whpx_vcpu *)cpu->hax_vcpu;
176 static WHV_X64_SEGMENT_REGISTER whpx_seg_q2h(const SegmentCache *qs, int v86,
179 WHV_X64_SEGMENT_REGISTER hs;
180 unsigned flags = qs->flags;
183 hs.Limit = qs->limit;
184 hs.Selector = qs->selector;
190 hs.DescriptorPrivilegeLevel = 3;
191 hs.NonSystemSegment = 1;
194 hs.Attributes = (flags >> DESC_TYPE_SHIFT);
197 /* hs.Base &= 0xfffff; */
204 static SegmentCache whpx_seg_h2q(const WHV_X64_SEGMENT_REGISTER *hs)
209 qs.limit = hs->Limit;
210 qs.selector = hs->Selector;
212 qs.flags = ((uint32_t)hs->Attributes) << DESC_TYPE_SHIFT;
217 static int whpx_set_tsc(CPUState *cpu)
219 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
220 WHV_REGISTER_NAME tsc_reg = WHvX64RegisterTsc;
221 WHV_REGISTER_VALUE tsc_val;
223 struct whpx_state *whpx = &whpx_global;
226 * Suspend the partition prior to setting the TSC to reduce the variance
227 * in TSC across vCPUs. When the first vCPU runs post suspend, the
228 * partition is automatically resumed.
230 if (whp_dispatch.WHvSuspendPartitionTime) {
233 * Unable to suspend partition while setting TSC is not a fatal
234 * error. It just increases the likelihood of TSC variance between
235 * vCPUs and some guest OS are able to handle that just fine.
237 hr = whp_dispatch.WHvSuspendPartitionTime(whpx->partition);
239 warn_report("WHPX: Failed to suspend partition, hr=%08lx", hr);
243 tsc_val.Reg64 = env->tsc;
244 hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
245 whpx->partition, cpu->cpu_index, &tsc_reg, 1, &tsc_val);
247 error_report("WHPX: Failed to set TSC, hr=%08lx", hr);
254 static void whpx_set_registers(CPUState *cpu, int level)
256 struct whpx_state *whpx = &whpx_global;
257 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
258 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
259 X86CPU *x86_cpu = X86_CPU(cpu);
260 struct whpx_register_set vcxt;
267 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
270 * Following MSRs have side effects on the guest or are too heavy for
271 * runtime. Limit them to full state update.
273 if (level >= WHPX_SET_RESET_STATE) {
277 memset(&vcxt, 0, sizeof(struct whpx_register_set));
279 v86 = (env->eflags & VM_MASK);
280 r86 = !(env->cr[0] & CR0_PE_MASK);
282 vcpu->tpr = cpu_get_apic_tpr(x86_cpu->apic_state);
283 vcpu->apic_base = cpu_get_apic_base(x86_cpu->apic_state);
287 /* Indexes for first 16 registers match between HV and QEMU definitions */
289 for (idx = 0; idx < CPU_NB_REGS; idx += 1) {
290 vcxt.values[idx].Reg64 = (uint64_t)env->regs[idx];
294 /* Same goes for RIP and RFLAGS */
295 assert(whpx_register_names[idx] == WHvX64RegisterRip);
296 vcxt.values[idx++].Reg64 = env->eip;
298 assert(whpx_register_names[idx] == WHvX64RegisterRflags);
299 vcxt.values[idx++].Reg64 = env->eflags;
301 /* Translate 6+4 segment registers. HV and QEMU order matches */
302 assert(idx == WHvX64RegisterEs);
303 for (i = 0; i < 6; i += 1, idx += 1) {
304 vcxt.values[idx].Segment = whpx_seg_q2h(&env->segs[i], v86, r86);
307 assert(idx == WHvX64RegisterLdtr);
308 vcxt.values[idx++].Segment = whpx_seg_q2h(&env->ldt, 0, 0);
310 assert(idx == WHvX64RegisterTr);
311 vcxt.values[idx++].Segment = whpx_seg_q2h(&env->tr, 0, 0);
313 assert(idx == WHvX64RegisterIdtr);
314 vcxt.values[idx].Table.Base = env->idt.base;
315 vcxt.values[idx].Table.Limit = env->idt.limit;
318 assert(idx == WHvX64RegisterGdtr);
319 vcxt.values[idx].Table.Base = env->gdt.base;
320 vcxt.values[idx].Table.Limit = env->gdt.limit;
323 /* CR0, 2, 3, 4, 8 */
324 assert(whpx_register_names[idx] == WHvX64RegisterCr0);
325 vcxt.values[idx++].Reg64 = env->cr[0];
326 assert(whpx_register_names[idx] == WHvX64RegisterCr2);
327 vcxt.values[idx++].Reg64 = env->cr[2];
328 assert(whpx_register_names[idx] == WHvX64RegisterCr3);
329 vcxt.values[idx++].Reg64 = env->cr[3];
330 assert(whpx_register_names[idx] == WHvX64RegisterCr4);
331 vcxt.values[idx++].Reg64 = env->cr[4];
332 assert(whpx_register_names[idx] == WHvX64RegisterCr8);
333 vcxt.values[idx++].Reg64 = vcpu->tpr;
335 /* 8 Debug Registers - Skipped */
337 /* 16 XMM registers */
338 assert(whpx_register_names[idx] == WHvX64RegisterXmm0);
340 for (i = 0; i < sizeof(env->xmm_regs) / sizeof(ZMMReg); i += 1, idx += 1) {
341 vcxt.values[idx].Reg128.Low64 = env->xmm_regs[i].ZMM_Q(0);
342 vcxt.values[idx].Reg128.High64 = env->xmm_regs[i].ZMM_Q(1);
347 assert(whpx_register_names[idx] == WHvX64RegisterFpMmx0);
348 for (i = 0; i < 8; i += 1, idx += 1) {
349 vcxt.values[idx].Fp.AsUINT128.Low64 = env->fpregs[i].mmx.MMX_Q(0);
350 /* vcxt.values[idx].Fp.AsUINT128.High64 =
351 env->fpregs[i].mmx.MMX_Q(1);
355 /* FP control status register */
356 assert(whpx_register_names[idx] == WHvX64RegisterFpControlStatus);
357 vcxt.values[idx].FpControlStatus.FpControl = env->fpuc;
358 vcxt.values[idx].FpControlStatus.FpStatus =
359 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
360 vcxt.values[idx].FpControlStatus.FpTag = 0;
361 for (i = 0; i < 8; ++i) {
362 vcxt.values[idx].FpControlStatus.FpTag |= (!env->fptags[i]) << i;
364 vcxt.values[idx].FpControlStatus.Reserved = 0;
365 vcxt.values[idx].FpControlStatus.LastFpOp = env->fpop;
366 vcxt.values[idx].FpControlStatus.LastFpRip = env->fpip;
369 /* XMM control status register */
370 assert(whpx_register_names[idx] == WHvX64RegisterXmmControlStatus);
371 vcxt.values[idx].XmmControlStatus.LastFpRdp = 0;
372 vcxt.values[idx].XmmControlStatus.XmmStatusControl = env->mxcsr;
373 vcxt.values[idx].XmmControlStatus.XmmStatusControlMask = 0x0000ffff;
377 assert(whpx_register_names[idx] == WHvX64RegisterEfer);
378 vcxt.values[idx++].Reg64 = env->efer;
380 assert(whpx_register_names[idx] == WHvX64RegisterKernelGsBase);
381 vcxt.values[idx++].Reg64 = env->kernelgsbase;
384 assert(whpx_register_names[idx] == WHvX64RegisterApicBase);
385 vcxt.values[idx++].Reg64 = vcpu->apic_base;
387 /* WHvX64RegisterPat - Skipped */
389 assert(whpx_register_names[idx] == WHvX64RegisterSysenterCs);
390 vcxt.values[idx++].Reg64 = env->sysenter_cs;
391 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEip);
392 vcxt.values[idx++].Reg64 = env->sysenter_eip;
393 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEsp);
394 vcxt.values[idx++].Reg64 = env->sysenter_esp;
395 assert(whpx_register_names[idx] == WHvX64RegisterStar);
396 vcxt.values[idx++].Reg64 = env->star;
398 assert(whpx_register_names[idx] == WHvX64RegisterLstar);
399 vcxt.values[idx++].Reg64 = env->lstar;
400 assert(whpx_register_names[idx] == WHvX64RegisterCstar);
401 vcxt.values[idx++].Reg64 = env->cstar;
402 assert(whpx_register_names[idx] == WHvX64RegisterSfmask);
403 vcxt.values[idx++].Reg64 = env->fmask;
406 /* Interrupt / Event Registers - Skipped */
408 assert(idx == RTL_NUMBER_OF(whpx_register_names));
410 hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
411 whpx->partition, cpu->cpu_index,
413 RTL_NUMBER_OF(whpx_register_names),
417 error_report("WHPX: Failed to set virtual processor context, hr=%08lx",
424 static int whpx_get_tsc(CPUState *cpu)
426 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
427 WHV_REGISTER_NAME tsc_reg = WHvX64RegisterTsc;
428 WHV_REGISTER_VALUE tsc_val;
430 struct whpx_state *whpx = &whpx_global;
432 hr = whp_dispatch.WHvGetVirtualProcessorRegisters(
433 whpx->partition, cpu->cpu_index, &tsc_reg, 1, &tsc_val);
435 error_report("WHPX: Failed to get TSC, hr=%08lx", hr);
439 env->tsc = tsc_val.Reg64;
443 static void whpx_get_registers(CPUState *cpu)
445 struct whpx_state *whpx = &whpx_global;
446 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
447 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
448 X86CPU *x86_cpu = X86_CPU(cpu);
449 struct whpx_register_set vcxt;
450 uint64_t tpr, apic_base;
456 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
458 if (!env->tsc_valid) {
460 env->tsc_valid = !runstate_is_running();
463 hr = whp_dispatch.WHvGetVirtualProcessorRegisters(
464 whpx->partition, cpu->cpu_index,
466 RTL_NUMBER_OF(whpx_register_names),
469 error_report("WHPX: Failed to get virtual processor context, hr=%08lx",
475 /* Indexes for first 16 registers match between HV and QEMU definitions */
477 for (idx = 0; idx < CPU_NB_REGS; idx += 1) {
478 env->regs[idx] = vcxt.values[idx].Reg64;
482 /* Same goes for RIP and RFLAGS */
483 assert(whpx_register_names[idx] == WHvX64RegisterRip);
484 env->eip = vcxt.values[idx++].Reg64;
485 assert(whpx_register_names[idx] == WHvX64RegisterRflags);
486 env->eflags = vcxt.values[idx++].Reg64;
488 /* Translate 6+4 segment registers. HV and QEMU order matches */
489 assert(idx == WHvX64RegisterEs);
490 for (i = 0; i < 6; i += 1, idx += 1) {
491 env->segs[i] = whpx_seg_h2q(&vcxt.values[idx].Segment);
494 assert(idx == WHvX64RegisterLdtr);
495 env->ldt = whpx_seg_h2q(&vcxt.values[idx++].Segment);
496 assert(idx == WHvX64RegisterTr);
497 env->tr = whpx_seg_h2q(&vcxt.values[idx++].Segment);
498 assert(idx == WHvX64RegisterIdtr);
499 env->idt.base = vcxt.values[idx].Table.Base;
500 env->idt.limit = vcxt.values[idx].Table.Limit;
502 assert(idx == WHvX64RegisterGdtr);
503 env->gdt.base = vcxt.values[idx].Table.Base;
504 env->gdt.limit = vcxt.values[idx].Table.Limit;
507 /* CR0, 2, 3, 4, 8 */
508 assert(whpx_register_names[idx] == WHvX64RegisterCr0);
509 env->cr[0] = vcxt.values[idx++].Reg64;
510 assert(whpx_register_names[idx] == WHvX64RegisterCr2);
511 env->cr[2] = vcxt.values[idx++].Reg64;
512 assert(whpx_register_names[idx] == WHvX64RegisterCr3);
513 env->cr[3] = vcxt.values[idx++].Reg64;
514 assert(whpx_register_names[idx] == WHvX64RegisterCr4);
515 env->cr[4] = vcxt.values[idx++].Reg64;
516 assert(whpx_register_names[idx] == WHvX64RegisterCr8);
517 tpr = vcxt.values[idx++].Reg64;
518 if (tpr != vcpu->tpr) {
520 cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
523 /* 8 Debug Registers - Skipped */
525 /* 16 XMM registers */
526 assert(whpx_register_names[idx] == WHvX64RegisterXmm0);
528 for (i = 0; i < sizeof(env->xmm_regs) / sizeof(ZMMReg); i += 1, idx += 1) {
529 env->xmm_regs[i].ZMM_Q(0) = vcxt.values[idx].Reg128.Low64;
530 env->xmm_regs[i].ZMM_Q(1) = vcxt.values[idx].Reg128.High64;
535 assert(whpx_register_names[idx] == WHvX64RegisterFpMmx0);
536 for (i = 0; i < 8; i += 1, idx += 1) {
537 env->fpregs[i].mmx.MMX_Q(0) = vcxt.values[idx].Fp.AsUINT128.Low64;
538 /* env->fpregs[i].mmx.MMX_Q(1) =
539 vcxt.values[idx].Fp.AsUINT128.High64;
543 /* FP control status register */
544 assert(whpx_register_names[idx] == WHvX64RegisterFpControlStatus);
545 env->fpuc = vcxt.values[idx].FpControlStatus.FpControl;
546 env->fpstt = (vcxt.values[idx].FpControlStatus.FpStatus >> 11) & 0x7;
547 env->fpus = vcxt.values[idx].FpControlStatus.FpStatus & ~0x3800;
548 for (i = 0; i < 8; ++i) {
549 env->fptags[i] = !((vcxt.values[idx].FpControlStatus.FpTag >> i) & 1);
551 env->fpop = vcxt.values[idx].FpControlStatus.LastFpOp;
552 env->fpip = vcxt.values[idx].FpControlStatus.LastFpRip;
555 /* XMM control status register */
556 assert(whpx_register_names[idx] == WHvX64RegisterXmmControlStatus);
557 env->mxcsr = vcxt.values[idx].XmmControlStatus.XmmStatusControl;
561 assert(whpx_register_names[idx] == WHvX64RegisterEfer);
562 env->efer = vcxt.values[idx++].Reg64;
564 assert(whpx_register_names[idx] == WHvX64RegisterKernelGsBase);
565 env->kernelgsbase = vcxt.values[idx++].Reg64;
568 assert(whpx_register_names[idx] == WHvX64RegisterApicBase);
569 apic_base = vcxt.values[idx++].Reg64;
570 if (apic_base != vcpu->apic_base) {
571 vcpu->apic_base = apic_base;
572 cpu_set_apic_base(x86_cpu->apic_state, vcpu->apic_base);
575 /* WHvX64RegisterPat - Skipped */
577 assert(whpx_register_names[idx] == WHvX64RegisterSysenterCs);
578 env->sysenter_cs = vcxt.values[idx++].Reg64;
579 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEip);
580 env->sysenter_eip = vcxt.values[idx++].Reg64;
581 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEsp);
582 env->sysenter_esp = vcxt.values[idx++].Reg64;
583 assert(whpx_register_names[idx] == WHvX64RegisterStar);
584 env->star = vcxt.values[idx++].Reg64;
586 assert(whpx_register_names[idx] == WHvX64RegisterLstar);
587 env->lstar = vcxt.values[idx++].Reg64;
588 assert(whpx_register_names[idx] == WHvX64RegisterCstar);
589 env->cstar = vcxt.values[idx++].Reg64;
590 assert(whpx_register_names[idx] == WHvX64RegisterSfmask);
591 env->fmask = vcxt.values[idx++].Reg64;
594 /* Interrupt / Event Registers - Skipped */
596 assert(idx == RTL_NUMBER_OF(whpx_register_names));
601 static HRESULT CALLBACK whpx_emu_ioport_callback(
603 WHV_EMULATOR_IO_ACCESS_INFO *IoAccess)
605 MemTxAttrs attrs = { 0 };
606 address_space_rw(&address_space_io, IoAccess->Port, attrs,
607 &IoAccess->Data, IoAccess->AccessSize,
608 IoAccess->Direction);
612 static HRESULT CALLBACK whpx_emu_mmio_callback(
614 WHV_EMULATOR_MEMORY_ACCESS_INFO *ma)
616 cpu_physical_memory_rw(ma->GpaAddress, ma->Data, ma->AccessSize,
621 static HRESULT CALLBACK whpx_emu_getreg_callback(
623 const WHV_REGISTER_NAME *RegisterNames,
624 UINT32 RegisterCount,
625 WHV_REGISTER_VALUE *RegisterValues)
628 struct whpx_state *whpx = &whpx_global;
629 CPUState *cpu = (CPUState *)ctx;
631 hr = whp_dispatch.WHvGetVirtualProcessorRegisters(
632 whpx->partition, cpu->cpu_index,
633 RegisterNames, RegisterCount,
636 error_report("WHPX: Failed to get virtual processor registers,"
643 static HRESULT CALLBACK whpx_emu_setreg_callback(
645 const WHV_REGISTER_NAME *RegisterNames,
646 UINT32 RegisterCount,
647 const WHV_REGISTER_VALUE *RegisterValues)
650 struct whpx_state *whpx = &whpx_global;
651 CPUState *cpu = (CPUState *)ctx;
653 hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
654 whpx->partition, cpu->cpu_index,
655 RegisterNames, RegisterCount,
658 error_report("WHPX: Failed to set virtual processor registers,"
663 * The emulator just successfully wrote the register state. We clear the
664 * dirty state so we avoid the double write on resume of the VP.
666 cpu->vcpu_dirty = false;
671 static HRESULT CALLBACK whpx_emu_translate_callback(
673 WHV_GUEST_VIRTUAL_ADDRESS Gva,
674 WHV_TRANSLATE_GVA_FLAGS TranslateFlags,
675 WHV_TRANSLATE_GVA_RESULT_CODE *TranslationResult,
676 WHV_GUEST_PHYSICAL_ADDRESS *Gpa)
679 struct whpx_state *whpx = &whpx_global;
680 CPUState *cpu = (CPUState *)ctx;
681 WHV_TRANSLATE_GVA_RESULT res;
683 hr = whp_dispatch.WHvTranslateGva(whpx->partition, cpu->cpu_index,
684 Gva, TranslateFlags, &res, Gpa);
686 error_report("WHPX: Failed to translate GVA, hr=%08lx", hr);
688 *TranslationResult = res.ResultCode;
694 static const WHV_EMULATOR_CALLBACKS whpx_emu_callbacks = {
695 .Size = sizeof(WHV_EMULATOR_CALLBACKS),
696 .WHvEmulatorIoPortCallback = whpx_emu_ioport_callback,
697 .WHvEmulatorMemoryCallback = whpx_emu_mmio_callback,
698 .WHvEmulatorGetVirtualProcessorRegisters = whpx_emu_getreg_callback,
699 .WHvEmulatorSetVirtualProcessorRegisters = whpx_emu_setreg_callback,
700 .WHvEmulatorTranslateGvaPage = whpx_emu_translate_callback,
703 static int whpx_handle_mmio(CPUState *cpu, WHV_MEMORY_ACCESS_CONTEXT *ctx)
706 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
707 WHV_EMULATOR_STATUS emu_status;
709 hr = whp_dispatch.WHvEmulatorTryMmioEmulation(
711 &vcpu->exit_ctx.VpContext, ctx,
714 error_report("WHPX: Failed to parse MMIO access, hr=%08lx", hr);
718 if (!emu_status.EmulationSuccessful) {
719 error_report("WHPX: Failed to emulate MMIO access with"
720 " EmulatorReturnStatus: %u", emu_status.AsUINT32);
727 static int whpx_handle_portio(CPUState *cpu,
728 WHV_X64_IO_PORT_ACCESS_CONTEXT *ctx)
731 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
732 WHV_EMULATOR_STATUS emu_status;
734 hr = whp_dispatch.WHvEmulatorTryIoEmulation(
736 &vcpu->exit_ctx.VpContext, ctx,
739 error_report("WHPX: Failed to parse PortIO access, hr=%08lx", hr);
743 if (!emu_status.EmulationSuccessful) {
744 error_report("WHPX: Failed to emulate PortIO access with"
745 " EmulatorReturnStatus: %u", emu_status.AsUINT32);
752 static int whpx_handle_halt(CPUState *cpu)
754 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
757 qemu_mutex_lock_iothread();
758 if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
759 (env->eflags & IF_MASK)) &&
760 !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
761 cpu->exception_index = EXCP_HLT;
765 qemu_mutex_unlock_iothread();
770 static void whpx_vcpu_pre_run(CPUState *cpu)
773 struct whpx_state *whpx = &whpx_global;
774 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
775 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
776 X86CPU *x86_cpu = X86_CPU(cpu);
779 WHV_X64_PENDING_INTERRUPTION_REGISTER new_int;
780 UINT32 reg_count = 0;
781 WHV_REGISTER_VALUE reg_values[3];
782 WHV_REGISTER_NAME reg_names[3];
784 memset(&new_int, 0, sizeof(new_int));
785 memset(reg_values, 0, sizeof(reg_values));
787 qemu_mutex_lock_iothread();
790 if (!vcpu->interruption_pending &&
791 cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
792 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
793 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
794 vcpu->interruptable = false;
795 new_int.InterruptionType = WHvX64PendingNmi;
796 new_int.InterruptionPending = 1;
797 new_int.InterruptionVector = 2;
799 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
800 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
805 * Force the VCPU out of its inner loop to process any INIT requests or
806 * commit pending TPR access.
808 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
809 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
810 !(env->hflags & HF_SMM_MASK)) {
811 cpu->exit_request = 1;
813 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
814 cpu->exit_request = 1;
818 /* Get pending hard interruption or replay one that was overwritten */
819 if (!vcpu->interruption_pending &&
820 vcpu->interruptable && (env->eflags & IF_MASK)) {
821 assert(!new_int.InterruptionPending);
822 if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
823 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
824 irq = cpu_get_pic_interrupt(env);
826 new_int.InterruptionType = WHvX64PendingInterrupt;
827 new_int.InterruptionPending = 1;
828 new_int.InterruptionVector = irq;
833 /* Setup interrupt state if new one was prepared */
834 if (new_int.InterruptionPending) {
835 reg_values[reg_count].PendingInterruption = new_int;
836 reg_names[reg_count] = WHvRegisterPendingInterruption;
840 /* Sync the TPR to the CR8 if was modified during the intercept */
841 tpr = cpu_get_apic_tpr(x86_cpu->apic_state);
842 if (tpr != vcpu->tpr) {
844 reg_values[reg_count].Reg64 = tpr;
845 cpu->exit_request = 1;
846 reg_names[reg_count] = WHvX64RegisterCr8;
850 /* Update the state of the interrupt delivery notification */
851 if (!vcpu->window_registered &&
852 cpu->interrupt_request & CPU_INTERRUPT_HARD) {
853 reg_values[reg_count].DeliverabilityNotifications.InterruptNotification
855 vcpu->window_registered = 1;
856 reg_names[reg_count] = WHvX64RegisterDeliverabilityNotifications;
860 qemu_mutex_unlock_iothread();
863 hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
864 whpx->partition, cpu->cpu_index,
865 reg_names, reg_count, reg_values);
867 error_report("WHPX: Failed to set interrupt state registers,"
875 static void whpx_vcpu_post_run(CPUState *cpu)
877 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
878 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
879 X86CPU *x86_cpu = X86_CPU(cpu);
881 env->eflags = vcpu->exit_ctx.VpContext.Rflags;
883 uint64_t tpr = vcpu->exit_ctx.VpContext.Cr8;
884 if (vcpu->tpr != tpr) {
886 qemu_mutex_lock_iothread();
887 cpu_set_apic_tpr(x86_cpu->apic_state, vcpu->tpr);
888 qemu_mutex_unlock_iothread();
891 vcpu->interruption_pending =
892 vcpu->exit_ctx.VpContext.ExecutionState.InterruptionPending;
894 vcpu->interruptable =
895 !vcpu->exit_ctx.VpContext.ExecutionState.InterruptShadow;
900 static void whpx_vcpu_process_async_events(CPUState *cpu)
902 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
903 X86CPU *x86_cpu = X86_CPU(cpu);
904 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
906 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
907 !(env->hflags & HF_SMM_MASK)) {
909 do_cpu_init(x86_cpu);
910 cpu->vcpu_dirty = true;
911 vcpu->interruptable = true;
914 if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
915 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
916 apic_poll_irq(x86_cpu->apic_state);
919 if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
920 (env->eflags & IF_MASK)) ||
921 (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
925 if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
926 if (!cpu->vcpu_dirty) {
927 whpx_get_registers(cpu);
929 do_cpu_sipi(x86_cpu);
932 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
933 cpu->interrupt_request &= ~CPU_INTERRUPT_TPR;
934 if (!cpu->vcpu_dirty) {
935 whpx_get_registers(cpu);
937 apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip,
938 env->tpr_access_type);
944 static int whpx_vcpu_run(CPUState *cpu)
947 struct whpx_state *whpx = &whpx_global;
948 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
951 whpx_vcpu_process_async_events(cpu);
953 cpu->exception_index = EXCP_HLT;
954 atomic_set(&cpu->exit_request, false);
958 qemu_mutex_unlock_iothread();
962 if (cpu->vcpu_dirty) {
963 whpx_set_registers(cpu, WHPX_SET_RUNTIME_STATE);
964 cpu->vcpu_dirty = false;
967 whpx_vcpu_pre_run(cpu);
969 if (atomic_read(&cpu->exit_request)) {
973 hr = whp_dispatch.WHvRunVirtualProcessor(
974 whpx->partition, cpu->cpu_index,
975 &vcpu->exit_ctx, sizeof(vcpu->exit_ctx));
978 error_report("WHPX: Failed to exec a virtual processor,"
984 whpx_vcpu_post_run(cpu);
986 switch (vcpu->exit_ctx.ExitReason) {
987 case WHvRunVpExitReasonMemoryAccess:
988 ret = whpx_handle_mmio(cpu, &vcpu->exit_ctx.MemoryAccess);
991 case WHvRunVpExitReasonX64IoPortAccess:
992 ret = whpx_handle_portio(cpu, &vcpu->exit_ctx.IoPortAccess);
995 case WHvRunVpExitReasonX64InterruptWindow:
996 vcpu->window_registered = 0;
1000 case WHvRunVpExitReasonX64Halt:
1001 ret = whpx_handle_halt(cpu);
1004 case WHvRunVpExitReasonCanceled:
1005 cpu->exception_index = EXCP_INTERRUPT;
1009 case WHvRunVpExitReasonX64MsrAccess: {
1010 WHV_REGISTER_VALUE reg_values[3] = {0};
1011 WHV_REGISTER_NAME reg_names[3];
1014 reg_names[0] = WHvX64RegisterRip;
1015 reg_names[1] = WHvX64RegisterRax;
1016 reg_names[2] = WHvX64RegisterRdx;
1018 reg_values[0].Reg64 =
1019 vcpu->exit_ctx.VpContext.Rip +
1020 vcpu->exit_ctx.VpContext.InstructionLength;
1023 * For all unsupported MSR access we:
1027 reg_count = vcpu->exit_ctx.MsrAccess.AccessInfo.IsWrite ?
1030 hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
1033 reg_names, reg_count,
1037 error_report("WHPX: Failed to set MsrAccess state "
1038 " registers, hr=%08lx", hr);
1043 case WHvRunVpExitReasonX64Cpuid: {
1044 WHV_REGISTER_VALUE reg_values[5];
1045 WHV_REGISTER_NAME reg_names[5];
1046 UINT32 reg_count = 5;
1047 UINT64 cpuid_fn, rip = 0, rax = 0, rcx = 0, rdx = 0, rbx = 0;
1048 X86CPU *x86_cpu = X86_CPU(cpu);
1049 CPUX86State *env = &x86_cpu->env;
1051 memset(reg_values, 0, sizeof(reg_values));
1053 rip = vcpu->exit_ctx.VpContext.Rip +
1054 vcpu->exit_ctx.VpContext.InstructionLength;
1055 cpuid_fn = vcpu->exit_ctx.CpuidAccess.Rax;
1058 * Ideally, these should be supplied to the hypervisor during VCPU
1059 * initialization and it should be able to satisfy this request.
1060 * But, currently, WHPX doesn't support setting CPUID values in the
1061 * hypervisor once the partition has been setup, which is too late
1062 * since VCPUs are realized later. For now, use the values from
1063 * QEMU to satisfy these requests, until WHPX adds support for
1064 * being able to set these values in the hypervisor at runtime.
1066 cpu_x86_cpuid(env, cpuid_fn, 0, (UINT32 *)&rax, (UINT32 *)&rbx,
1067 (UINT32 *)&rcx, (UINT32 *)&rdx);
1070 /* Remove any support of OSVW */
1071 rcx &= ~CPUID_EXT3_OSVW;
1075 reg_names[0] = WHvX64RegisterRip;
1076 reg_names[1] = WHvX64RegisterRax;
1077 reg_names[2] = WHvX64RegisterRcx;
1078 reg_names[3] = WHvX64RegisterRdx;
1079 reg_names[4] = WHvX64RegisterRbx;
1081 reg_values[0].Reg64 = rip;
1082 reg_values[1].Reg64 = rax;
1083 reg_values[2].Reg64 = rcx;
1084 reg_values[3].Reg64 = rdx;
1085 reg_values[4].Reg64 = rbx;
1087 hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
1088 whpx->partition, cpu->cpu_index,
1094 error_report("WHPX: Failed to set CpuidAccess state registers,"
1100 case WHvRunVpExitReasonNone:
1101 case WHvRunVpExitReasonUnrecoverableException:
1102 case WHvRunVpExitReasonInvalidVpRegisterValue:
1103 case WHvRunVpExitReasonUnsupportedFeature:
1104 case WHvRunVpExitReasonException:
1106 error_report("WHPX: Unexpected VP exit code %d",
1107 vcpu->exit_ctx.ExitReason);
1108 whpx_get_registers(cpu);
1109 qemu_mutex_lock_iothread();
1110 qemu_system_guest_panicked(cpu_get_crash_info(cpu));
1111 qemu_mutex_unlock_iothread();
1118 qemu_mutex_lock_iothread();
1121 atomic_set(&cpu->exit_request, false);
1126 static void do_whpx_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
1128 whpx_get_registers(cpu);
1129 cpu->vcpu_dirty = true;
1132 static void do_whpx_cpu_synchronize_post_reset(CPUState *cpu,
1133 run_on_cpu_data arg)
1135 whpx_set_registers(cpu, WHPX_SET_RESET_STATE);
1136 cpu->vcpu_dirty = false;
1139 static void do_whpx_cpu_synchronize_post_init(CPUState *cpu,
1140 run_on_cpu_data arg)
1142 whpx_set_registers(cpu, WHPX_SET_FULL_STATE);
1143 cpu->vcpu_dirty = false;
1146 static void do_whpx_cpu_synchronize_pre_loadvm(CPUState *cpu,
1147 run_on_cpu_data arg)
1149 cpu->vcpu_dirty = true;
1156 void whpx_cpu_synchronize_state(CPUState *cpu)
1158 if (!cpu->vcpu_dirty) {
1159 run_on_cpu(cpu, do_whpx_cpu_synchronize_state, RUN_ON_CPU_NULL);
1163 void whpx_cpu_synchronize_post_reset(CPUState *cpu)
1165 run_on_cpu(cpu, do_whpx_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
1168 void whpx_cpu_synchronize_post_init(CPUState *cpu)
1170 run_on_cpu(cpu, do_whpx_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
1173 void whpx_cpu_synchronize_pre_loadvm(CPUState *cpu)
1175 run_on_cpu(cpu, do_whpx_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
1182 static Error *whpx_migration_blocker;
1184 static void whpx_cpu_update_state(void *opaque, int running, RunState state)
1186 CPUX86State *env = opaque;
1189 env->tsc_valid = false;
1193 int whpx_init_vcpu(CPUState *cpu)
1196 struct whpx_state *whpx = &whpx_global;
1197 struct whpx_vcpu *vcpu;
1198 Error *local_error = NULL;
1200 /* Add migration blockers for all unsupported features of the
1201 * Windows Hypervisor Platform
1203 if (whpx_migration_blocker == NULL) {
1204 error_setg(&whpx_migration_blocker,
1205 "State blocked due to non-migratable CPUID feature support,"
1206 "dirty memory tracking support, and XSAVE/XRSTOR support");
1208 (void)migrate_add_blocker(whpx_migration_blocker, &local_error);
1210 error_report_err(local_error);
1211 migrate_del_blocker(whpx_migration_blocker);
1212 error_free(whpx_migration_blocker);
1217 vcpu = g_malloc0(sizeof(struct whpx_vcpu));
1220 error_report("WHPX: Failed to allocte VCPU context.");
1224 hr = whp_dispatch.WHvEmulatorCreateEmulator(
1225 &whpx_emu_callbacks,
1228 error_report("WHPX: Failed to setup instruction completion support,"
1234 hr = whp_dispatch.WHvCreateVirtualProcessor(
1235 whpx->partition, cpu->cpu_index, 0);
1237 error_report("WHPX: Failed to create a virtual processor,"
1239 whp_dispatch.WHvEmulatorDestroyEmulator(vcpu->emulator);
1244 vcpu->interruptable = true;
1246 cpu->vcpu_dirty = true;
1247 cpu->hax_vcpu = (struct hax_vcpu_state *)vcpu;
1248 qemu_add_vm_change_state_handler(whpx_cpu_update_state, cpu->env_ptr);
1253 int whpx_vcpu_exec(CPUState *cpu)
1259 if (cpu->exception_index >= EXCP_INTERRUPT) {
1260 ret = cpu->exception_index;
1261 cpu->exception_index = -1;
1265 fatal = whpx_vcpu_run(cpu);
1268 error_report("WHPX: Failed to exec a virtual processor");
1276 void whpx_destroy_vcpu(CPUState *cpu)
1278 struct whpx_state *whpx = &whpx_global;
1279 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
1281 whp_dispatch.WHvDeleteVirtualProcessor(whpx->partition, cpu->cpu_index);
1282 whp_dispatch.WHvEmulatorDestroyEmulator(vcpu->emulator);
1283 g_free(cpu->hax_vcpu);
1287 void whpx_vcpu_kick(CPUState *cpu)
1289 struct whpx_state *whpx = &whpx_global;
1290 whp_dispatch.WHvCancelRunVirtualProcessor(
1291 whpx->partition, cpu->cpu_index, 0);
1298 static void whpx_update_mapping(hwaddr start_pa, ram_addr_t size,
1299 void *host_va, int add, int rom,
1302 struct whpx_state *whpx = &whpx_global;
1307 printf("WHPX: ADD PA:%p Size:%p, Host:%p, %s, '%s'\n",
1308 (void*)start_pa, (void*)size, host_va,
1309 (rom ? "ROM" : "RAM"), name);
1311 printf("WHPX: DEL PA:%p Size:%p, Host:%p, '%s'\n",
1312 (void*)start_pa, (void*)size, host_va, name);
1317 hr = whp_dispatch.WHvMapGpaRange(whpx->partition,
1321 (WHvMapGpaRangeFlagRead |
1322 WHvMapGpaRangeFlagExecute |
1323 (rom ? 0 : WHvMapGpaRangeFlagWrite)));
1325 hr = whp_dispatch.WHvUnmapGpaRange(whpx->partition,
1331 error_report("WHPX: Failed to %s GPA range '%s' PA:%p, Size:%p bytes,"
1332 " Host:%p, hr=%08lx",
1333 (add ? "MAP" : "UNMAP"), name,
1334 (void *)(uintptr_t)start_pa, (void *)size, host_va, hr);
1338 static void whpx_process_section(MemoryRegionSection *section, int add)
1340 MemoryRegion *mr = section->mr;
1341 hwaddr start_pa = section->offset_within_address_space;
1342 ram_addr_t size = int128_get64(section->size);
1346 if (!memory_region_is_ram(mr)) {
1350 delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask);
1351 delta &= ~qemu_real_host_page_mask;
1357 size &= qemu_real_host_page_mask;
1358 if (!size || (start_pa & ~qemu_real_host_page_mask)) {
1362 host_va = (uintptr_t)memory_region_get_ram_ptr(mr)
1363 + section->offset_within_region + delta;
1365 whpx_update_mapping(start_pa, size, (void *)(uintptr_t)host_va, add,
1366 memory_region_is_rom(mr), mr->name);
1369 static void whpx_region_add(MemoryListener *listener,
1370 MemoryRegionSection *section)
1372 memory_region_ref(section->mr);
1373 whpx_process_section(section, 1);
1376 static void whpx_region_del(MemoryListener *listener,
1377 MemoryRegionSection *section)
1379 whpx_process_section(section, 0);
1380 memory_region_unref(section->mr);
1383 static void whpx_transaction_begin(MemoryListener *listener)
1387 static void whpx_transaction_commit(MemoryListener *listener)
1391 static void whpx_log_sync(MemoryListener *listener,
1392 MemoryRegionSection *section)
1394 MemoryRegion *mr = section->mr;
1396 if (!memory_region_is_ram(mr)) {
1400 memory_region_set_dirty(mr, 0, int128_get64(section->size));
1403 static MemoryListener whpx_memory_listener = {
1404 .begin = whpx_transaction_begin,
1405 .commit = whpx_transaction_commit,
1406 .region_add = whpx_region_add,
1407 .region_del = whpx_region_del,
1408 .log_sync = whpx_log_sync,
1412 static void whpx_memory_init(void)
1414 memory_listener_register(&whpx_memory_listener, &address_space_memory);
1417 static void whpx_handle_interrupt(CPUState *cpu, int mask)
1419 cpu->interrupt_request |= mask;
1421 if (!qemu_cpu_is_self(cpu)) {
1427 * Load the functions from the given library, using the given handle. If a
1428 * handle is provided, it is used, otherwise the library is opened. The
1429 * handle will be updated on return with the opened one.
1431 static bool load_whp_dispatch_fns(HMODULE *handle,
1432 WHPFunctionList function_list)
1434 HMODULE hLib = *handle;
1436 #define WINHV_PLATFORM_DLL "WinHvPlatform.dll"
1437 #define WINHV_EMULATION_DLL "WinHvEmulation.dll"
1438 #define WHP_LOAD_FIELD_OPTIONAL(return_type, function_name, signature) \
1439 whp_dispatch.function_name = \
1440 (function_name ## _t)GetProcAddress(hLib, #function_name); \
1442 #define WHP_LOAD_FIELD(return_type, function_name, signature) \
1443 whp_dispatch.function_name = \
1444 (function_name ## _t)GetProcAddress(hLib, #function_name); \
1445 if (!whp_dispatch.function_name) { \
1446 error_report("Could not load function %s", #function_name); \
1450 #define WHP_LOAD_LIB(lib_name, handle_lib) \
1451 if (!handle_lib) { \
1452 handle_lib = LoadLibrary(lib_name); \
1453 if (!handle_lib) { \
1454 error_report("Could not load library %s.", lib_name); \
1459 switch (function_list) {
1460 case WINHV_PLATFORM_FNS_DEFAULT:
1461 WHP_LOAD_LIB(WINHV_PLATFORM_DLL, hLib)
1462 LIST_WINHVPLATFORM_FUNCTIONS(WHP_LOAD_FIELD)
1465 case WINHV_EMULATION_FNS_DEFAULT:
1466 WHP_LOAD_LIB(WINHV_EMULATION_DLL, hLib)
1467 LIST_WINHVEMULATION_FUNCTIONS(WHP_LOAD_FIELD)
1470 case WINHV_PLATFORM_FNS_SUPPLEMENTAL:
1471 WHP_LOAD_LIB(WINHV_PLATFORM_DLL, hLib)
1472 LIST_WINHVPLATFORM_FUNCTIONS_SUPPLEMENTAL(WHP_LOAD_FIELD_OPTIONAL)
1491 static int whpx_accel_init(MachineState *ms)
1493 struct whpx_state *whpx;
1496 WHV_CAPABILITY whpx_cap;
1497 UINT32 whpx_cap_size;
1498 WHV_PARTITION_PROPERTY prop;
1500 whpx = &whpx_global;
1502 if (!init_whp_dispatch()) {
1507 memset(whpx, 0, sizeof(struct whpx_state));
1508 whpx->mem_quota = ms->ram_size;
1510 hr = whp_dispatch.WHvGetCapability(
1511 WHvCapabilityCodeHypervisorPresent, &whpx_cap,
1512 sizeof(whpx_cap), &whpx_cap_size);
1513 if (FAILED(hr) || !whpx_cap.HypervisorPresent) {
1514 error_report("WHPX: No accelerator found, hr=%08lx", hr);
1519 hr = whp_dispatch.WHvCreatePartition(&whpx->partition);
1521 error_report("WHPX: Failed to create partition, hr=%08lx", hr);
1526 memset(&prop, 0, sizeof(WHV_PARTITION_PROPERTY));
1527 prop.ProcessorCount = ms->smp.cpus;
1528 hr = whp_dispatch.WHvSetPartitionProperty(
1530 WHvPartitionPropertyCodeProcessorCount,
1532 sizeof(WHV_PARTITION_PROPERTY));
1535 error_report("WHPX: Failed to set partition core count to %d,"
1536 " hr=%08lx", ms->smp.cores, hr);
1541 memset(&prop, 0, sizeof(WHV_PARTITION_PROPERTY));
1542 prop.ExtendedVmExits.X64MsrExit = 1;
1543 prop.ExtendedVmExits.X64CpuidExit = 1;
1544 hr = whp_dispatch.WHvSetPartitionProperty(
1546 WHvPartitionPropertyCodeExtendedVmExits,
1548 sizeof(WHV_PARTITION_PROPERTY));
1551 error_report("WHPX: Failed to enable partition extended X64MsrExit and"
1552 " X64CpuidExit hr=%08lx", hr);
1557 UINT32 cpuidExitList[] = {1, 0x80000001};
1558 hr = whp_dispatch.WHvSetPartitionProperty(
1560 WHvPartitionPropertyCodeCpuidExitList,
1562 RTL_NUMBER_OF(cpuidExitList) * sizeof(UINT32));
1565 error_report("WHPX: Failed to set partition CpuidExitList hr=%08lx",
1571 hr = whp_dispatch.WHvSetupPartition(whpx->partition);
1573 error_report("WHPX: Failed to setup partition, hr=%08lx", hr);
1580 cpu_interrupt_handler = whpx_handle_interrupt;
1582 printf("Windows Hypervisor Platform accelerator is operational\n");
1587 if (NULL != whpx->partition) {
1588 whp_dispatch.WHvDeletePartition(whpx->partition);
1589 whpx->partition = NULL;
1596 int whpx_enabled(void)
1598 return whpx_allowed;
1601 static void whpx_accel_class_init(ObjectClass *oc, void *data)
1603 AccelClass *ac = ACCEL_CLASS(oc);
1605 ac->init_machine = whpx_accel_init;
1606 ac->allowed = &whpx_allowed;
1609 static const TypeInfo whpx_accel_type = {
1610 .name = ACCEL_CLASS_NAME("whpx"),
1611 .parent = TYPE_ACCEL,
1612 .class_init = whpx_accel_class_init,
1615 static void whpx_type_init(void)
1617 type_register_static(&whpx_accel_type);
1620 bool init_whp_dispatch(void)
1622 if (whp_dispatch_initialized) {
1626 if (!load_whp_dispatch_fns(&hWinHvPlatform, WINHV_PLATFORM_FNS_DEFAULT)) {
1630 if (!load_whp_dispatch_fns(&hWinHvEmulation, WINHV_EMULATION_FNS_DEFAULT)) {
1634 assert(load_whp_dispatch_fns(&hWinHvPlatform,
1635 WINHV_PLATFORM_FNS_SUPPLEMENTAL));
1636 whp_dispatch_initialized = true;
1640 if (hWinHvPlatform) {
1641 FreeLibrary(hWinHvPlatform);
1644 if (hWinHvEmulation) {
1645 FreeLibrary(hWinHvEmulation);
1651 type_init(whpx_type_init);