]> Git Repo - qemu.git/blob - target/i386/whpx-all.c
Fix WHPX additional lock acquisition
[qemu.git] / target / i386 / whpx-all.c
1 /*
2  * QEMU Windows Hypervisor Platform accelerator (WHPX)
3  *
4  * Copyright Microsoft Corp. 2017
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or later.
7  * See the COPYING file in the top-level directory.
8  *
9  */
10
11 #include "qemu/osdep.h"
12 #include "cpu.h"
13 #include "exec/address-spaces.h"
14 #include "exec/exec-all.h"
15 #include "exec/ioport.h"
16 #include "qemu-common.h"
17 #include "strings.h"
18 #include "sysemu/accel.h"
19 #include "sysemu/whpx.h"
20 #include "sysemu/sysemu.h"
21 #include "sysemu/cpus.h"
22 #include "qemu/main-loop.h"
23 #include "hw/boards.h"
24 #include "qemu/error-report.h"
25 #include "qemu/queue.h"
26 #include "qapi/error.h"
27 #include "migration/blocker.h"
28
29 #include <WinHvPlatform.h>
30 #include <WinHvEmulation.h>
31
32 struct whpx_state {
33     uint64_t mem_quota;
34     WHV_PARTITION_HANDLE partition;
35     uint32_t exit_ctx_size;
36 };
37
38 static const WHV_REGISTER_NAME whpx_register_names[] = {
39
40     /* X64 General purpose registers */
41     WHvX64RegisterRax,
42     WHvX64RegisterRcx,
43     WHvX64RegisterRdx,
44     WHvX64RegisterRbx,
45     WHvX64RegisterRsp,
46     WHvX64RegisterRbp,
47     WHvX64RegisterRsi,
48     WHvX64RegisterRdi,
49     WHvX64RegisterR8,
50     WHvX64RegisterR9,
51     WHvX64RegisterR10,
52     WHvX64RegisterR11,
53     WHvX64RegisterR12,
54     WHvX64RegisterR13,
55     WHvX64RegisterR14,
56     WHvX64RegisterR15,
57     WHvX64RegisterRip,
58     WHvX64RegisterRflags,
59
60     /* X64 Segment registers */
61     WHvX64RegisterEs,
62     WHvX64RegisterCs,
63     WHvX64RegisterSs,
64     WHvX64RegisterDs,
65     WHvX64RegisterFs,
66     WHvX64RegisterGs,
67     WHvX64RegisterLdtr,
68     WHvX64RegisterTr,
69
70     /* X64 Table registers */
71     WHvX64RegisterIdtr,
72     WHvX64RegisterGdtr,
73
74     /* X64 Control Registers */
75     WHvX64RegisterCr0,
76     WHvX64RegisterCr2,
77     WHvX64RegisterCr3,
78     WHvX64RegisterCr4,
79     WHvX64RegisterCr8,
80
81     /* X64 Debug Registers */
82     /*
83      * WHvX64RegisterDr0,
84      * WHvX64RegisterDr1,
85      * WHvX64RegisterDr2,
86      * WHvX64RegisterDr3,
87      * WHvX64RegisterDr6,
88      * WHvX64RegisterDr7,
89      */
90
91     /* X64 Floating Point and Vector Registers */
92     WHvX64RegisterXmm0,
93     WHvX64RegisterXmm1,
94     WHvX64RegisterXmm2,
95     WHvX64RegisterXmm3,
96     WHvX64RegisterXmm4,
97     WHvX64RegisterXmm5,
98     WHvX64RegisterXmm6,
99     WHvX64RegisterXmm7,
100     WHvX64RegisterXmm8,
101     WHvX64RegisterXmm9,
102     WHvX64RegisterXmm10,
103     WHvX64RegisterXmm11,
104     WHvX64RegisterXmm12,
105     WHvX64RegisterXmm13,
106     WHvX64RegisterXmm14,
107     WHvX64RegisterXmm15,
108     WHvX64RegisterFpMmx0,
109     WHvX64RegisterFpMmx1,
110     WHvX64RegisterFpMmx2,
111     WHvX64RegisterFpMmx3,
112     WHvX64RegisterFpMmx4,
113     WHvX64RegisterFpMmx5,
114     WHvX64RegisterFpMmx6,
115     WHvX64RegisterFpMmx7,
116     WHvX64RegisterFpControlStatus,
117     WHvX64RegisterXmmControlStatus,
118
119     /* X64 MSRs */
120     WHvX64RegisterTsc,
121     WHvX64RegisterEfer,
122 #ifdef TARGET_X86_64
123     WHvX64RegisterKernelGsBase,
124 #endif
125     WHvX64RegisterApicBase,
126     /* WHvX64RegisterPat, */
127     WHvX64RegisterSysenterCs,
128     WHvX64RegisterSysenterEip,
129     WHvX64RegisterSysenterEsp,
130     WHvX64RegisterStar,
131 #ifdef TARGET_X86_64
132     WHvX64RegisterLstar,
133     WHvX64RegisterCstar,
134     WHvX64RegisterSfmask,
135 #endif
136
137     /* Interrupt / Event Registers */
138     /*
139      * WHvRegisterPendingInterruption,
140      * WHvRegisterInterruptState,
141      * WHvRegisterPendingEvent0,
142      * WHvRegisterPendingEvent1
143      * WHvX64RegisterDeliverabilityNotifications,
144      */
145 };
146
147 struct whpx_register_set {
148     WHV_REGISTER_VALUE values[RTL_NUMBER_OF(whpx_register_names)];
149 };
150
151 struct whpx_vcpu {
152     WHV_EMULATOR_HANDLE emulator;
153     bool window_registered;
154     bool interruptable;
155     uint64_t tpr;
156     uint64_t apic_base;
157     WHV_X64_PENDING_INTERRUPTION_REGISTER interrupt_in_flight;
158
159     /* Must be the last field as it may have a tail */
160     WHV_RUN_VP_EXIT_CONTEXT exit_ctx;
161 };
162
163 static bool whpx_allowed;
164
165 struct whpx_state whpx_global;
166
167
168 /*
169  * VP support
170  */
171
172 static struct whpx_vcpu *get_whpx_vcpu(CPUState *cpu)
173 {
174     return (struct whpx_vcpu *)cpu->hax_vcpu;
175 }
176
177 static WHV_X64_SEGMENT_REGISTER whpx_seg_q2h(const SegmentCache *qs, int v86,
178                                              int r86)
179 {
180     WHV_X64_SEGMENT_REGISTER hs;
181     unsigned flags = qs->flags;
182
183     hs.Base = qs->base;
184     hs.Limit = qs->limit;
185     hs.Selector = qs->selector;
186
187     if (v86) {
188         hs.Attributes = 0;
189         hs.SegmentType = 3;
190         hs.Present = 1;
191         hs.DescriptorPrivilegeLevel = 3;
192         hs.NonSystemSegment = 1;
193
194     } else {
195         hs.Attributes = (flags >> DESC_TYPE_SHIFT);
196
197         if (r86) {
198             /* hs.Base &= 0xfffff; */
199         }
200     }
201
202     return hs;
203 }
204
205 static SegmentCache whpx_seg_h2q(const WHV_X64_SEGMENT_REGISTER *hs)
206 {
207     SegmentCache qs;
208
209     qs.base = hs->Base;
210     qs.limit = hs->Limit;
211     qs.selector = hs->Selector;
212
213     qs.flags = ((uint32_t)hs->Attributes) << DESC_TYPE_SHIFT;
214
215     return qs;
216 }
217
218 static void whpx_set_registers(CPUState *cpu)
219 {
220     struct whpx_state *whpx = &whpx_global;
221     struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
222     struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
223     X86CPU *x86_cpu = X86_CPU(cpu);
224     struct whpx_register_set vcxt = {0};
225     HRESULT hr;
226     int idx = 0;
227     int i;
228     int v86, r86;
229
230     assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
231
232     v86 = (env->eflags & VM_MASK);
233     r86 = !(env->cr[0] & CR0_PE_MASK);
234
235     vcpu->tpr = cpu_get_apic_tpr(x86_cpu->apic_state);
236     vcpu->apic_base = cpu_get_apic_base(x86_cpu->apic_state);
237
238     /* Indexes for first 16 registers match between HV and QEMU definitions */
239     for (idx = 0; idx < CPU_NB_REGS64; idx += 1) {
240         vcxt.values[idx].Reg64 = env->regs[idx];
241     }
242
243     /* Same goes for RIP and RFLAGS */
244     assert(whpx_register_names[idx] == WHvX64RegisterRip);
245     vcxt.values[idx++].Reg64 = env->eip;
246
247     assert(whpx_register_names[idx] == WHvX64RegisterRflags);
248     vcxt.values[idx++].Reg64 = env->eflags;
249
250     /* Translate 6+4 segment registers. HV and QEMU order matches  */
251     assert(idx == WHvX64RegisterEs);
252     for (i = 0; i < 6; i += 1, idx += 1) {
253         vcxt.values[idx].Segment = whpx_seg_q2h(&env->segs[i], v86, r86);
254     }
255
256     assert(idx == WHvX64RegisterLdtr);
257     vcxt.values[idx++].Segment = whpx_seg_q2h(&env->ldt, 0, 0);
258
259     assert(idx == WHvX64RegisterTr);
260     vcxt.values[idx++].Segment = whpx_seg_q2h(&env->tr, 0, 0);
261
262     assert(idx == WHvX64RegisterIdtr);
263     vcxt.values[idx].Table.Base = env->idt.base;
264     vcxt.values[idx].Table.Limit = env->idt.limit;
265     idx += 1;
266
267     assert(idx == WHvX64RegisterGdtr);
268     vcxt.values[idx].Table.Base = env->gdt.base;
269     vcxt.values[idx].Table.Limit = env->gdt.limit;
270     idx += 1;
271
272     /* CR0, 2, 3, 4, 8 */
273     assert(whpx_register_names[idx] == WHvX64RegisterCr0);
274     vcxt.values[idx++].Reg64 = env->cr[0];
275     assert(whpx_register_names[idx] == WHvX64RegisterCr2);
276     vcxt.values[idx++].Reg64 = env->cr[2];
277     assert(whpx_register_names[idx] == WHvX64RegisterCr3);
278     vcxt.values[idx++].Reg64 = env->cr[3];
279     assert(whpx_register_names[idx] == WHvX64RegisterCr4);
280     vcxt.values[idx++].Reg64 = env->cr[4];
281     assert(whpx_register_names[idx] == WHvX64RegisterCr8);
282     vcxt.values[idx++].Reg64 = vcpu->tpr;
283
284     /* 8 Debug Registers - Skipped */
285
286     /* 16 XMM registers */
287     assert(whpx_register_names[idx] == WHvX64RegisterXmm0);
288     for (i = 0; i < 16; i += 1, idx += 1) {
289         vcxt.values[idx].Reg128.Low64 = env->xmm_regs[i].ZMM_Q(0);
290         vcxt.values[idx].Reg128.High64 = env->xmm_regs[i].ZMM_Q(1);
291     }
292
293     /* 8 FP registers */
294     assert(whpx_register_names[idx] == WHvX64RegisterFpMmx0);
295     for (i = 0; i < 8; i += 1, idx += 1) {
296         vcxt.values[idx].Fp.AsUINT128.Low64 = env->fpregs[i].mmx.MMX_Q(0);
297         /* vcxt.values[idx].Fp.AsUINT128.High64 =
298                env->fpregs[i].mmx.MMX_Q(1);
299         */
300     }
301
302     /* FP control status register */
303     assert(whpx_register_names[idx] == WHvX64RegisterFpControlStatus);
304     vcxt.values[idx].FpControlStatus.FpControl = env->fpuc;
305     vcxt.values[idx].FpControlStatus.FpStatus =
306         (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
307     vcxt.values[idx].FpControlStatus.FpTag = 0;
308     for (i = 0; i < 8; ++i) {
309         vcxt.values[idx].FpControlStatus.FpTag |= (!env->fptags[i]) << i;
310     }
311     vcxt.values[idx].FpControlStatus.Reserved = 0;
312     vcxt.values[idx].FpControlStatus.LastFpOp = env->fpop;
313     vcxt.values[idx].FpControlStatus.LastFpRip = env->fpip;
314     idx += 1;
315
316     /* XMM control status register */
317     assert(whpx_register_names[idx] == WHvX64RegisterXmmControlStatus);
318     vcxt.values[idx].XmmControlStatus.LastFpRdp = 0;
319     vcxt.values[idx].XmmControlStatus.XmmStatusControl = env->mxcsr;
320     vcxt.values[idx].XmmControlStatus.XmmStatusControlMask = 0x0000ffff;
321     idx += 1;
322
323     /* MSRs */
324     assert(whpx_register_names[idx] == WHvX64RegisterTsc);
325     vcxt.values[idx++].Reg64 = env->tsc;
326     assert(whpx_register_names[idx] == WHvX64RegisterEfer);
327     vcxt.values[idx++].Reg64 = env->efer;
328 #ifdef TARGET_X86_64
329     assert(whpx_register_names[idx] == WHvX64RegisterKernelGsBase);
330     vcxt.values[idx++].Reg64 = env->kernelgsbase;
331 #endif
332
333     assert(whpx_register_names[idx] == WHvX64RegisterApicBase);
334     vcxt.values[idx++].Reg64 = vcpu->apic_base;
335
336     /* WHvX64RegisterPat - Skipped */
337
338     assert(whpx_register_names[idx] == WHvX64RegisterSysenterCs);
339     vcxt.values[idx++].Reg64 = env->sysenter_cs;
340     assert(whpx_register_names[idx] == WHvX64RegisterSysenterEip);
341     vcxt.values[idx++].Reg64 = env->sysenter_eip;
342     assert(whpx_register_names[idx] == WHvX64RegisterSysenterEsp);
343     vcxt.values[idx++].Reg64 = env->sysenter_esp;
344     assert(whpx_register_names[idx] == WHvX64RegisterStar);
345     vcxt.values[idx++].Reg64 = env->star;
346 #ifdef TARGET_X86_64
347     assert(whpx_register_names[idx] == WHvX64RegisterLstar);
348     vcxt.values[idx++].Reg64 = env->lstar;
349     assert(whpx_register_names[idx] == WHvX64RegisterCstar);
350     vcxt.values[idx++].Reg64 = env->cstar;
351     assert(whpx_register_names[idx] == WHvX64RegisterSfmask);
352     vcxt.values[idx++].Reg64 = env->fmask;
353 #endif
354
355     /* Interrupt / Event Registers - Skipped */
356
357     assert(idx == RTL_NUMBER_OF(whpx_register_names));
358
359     hr = WHvSetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
360                                          whpx_register_names,
361                                          RTL_NUMBER_OF(whpx_register_names),
362                                          &vcxt.values[0]);
363
364     if (FAILED(hr)) {
365         error_report("WHPX: Failed to set virtual processor context, hr=%08lx",
366                      hr);
367     }
368
369     return;
370 }
371
372 static void whpx_get_registers(CPUState *cpu)
373 {
374     struct whpx_state *whpx = &whpx_global;
375     struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
376     struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
377     X86CPU *x86_cpu = X86_CPU(cpu);
378     struct whpx_register_set vcxt;
379     uint64_t tpr, apic_base;
380     HRESULT hr;
381     int idx = 0;
382     int i;
383
384     assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
385
386     hr = WHvGetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
387                                          whpx_register_names,
388                                          RTL_NUMBER_OF(whpx_register_names),
389                                          &vcxt.values[0]);
390     if (FAILED(hr)) {
391         error_report("WHPX: Failed to get virtual processor context, hr=%08lx",
392                      hr);
393     }
394
395     /* Indexes for first 16 registers match between HV and QEMU definitions */
396     for (idx = 0; idx < CPU_NB_REGS64; idx += 1) {
397         env->regs[idx] = vcxt.values[idx].Reg64;
398     }
399
400     /* Same goes for RIP and RFLAGS */
401     assert(whpx_register_names[idx] == WHvX64RegisterRip);
402     env->eip = vcxt.values[idx++].Reg64;
403     assert(whpx_register_names[idx] == WHvX64RegisterRflags);
404     env->eflags = vcxt.values[idx++].Reg64;
405
406     /* Translate 6+4 segment registers. HV and QEMU order matches  */
407     assert(idx == WHvX64RegisterEs);
408     for (i = 0; i < 6; i += 1, idx += 1) {
409         env->segs[i] = whpx_seg_h2q(&vcxt.values[idx].Segment);
410     }
411
412     assert(idx == WHvX64RegisterLdtr);
413     env->ldt = whpx_seg_h2q(&vcxt.values[idx++].Segment);
414     assert(idx == WHvX64RegisterTr);
415     env->tr = whpx_seg_h2q(&vcxt.values[idx++].Segment);
416     assert(idx == WHvX64RegisterIdtr);
417     env->idt.base = vcxt.values[idx].Table.Base;
418     env->idt.limit = vcxt.values[idx].Table.Limit;
419     idx += 1;
420     assert(idx == WHvX64RegisterGdtr);
421     env->gdt.base = vcxt.values[idx].Table.Base;
422     env->gdt.limit = vcxt.values[idx].Table.Limit;
423     idx += 1;
424
425     /* CR0, 2, 3, 4, 8 */
426     assert(whpx_register_names[idx] == WHvX64RegisterCr0);
427     env->cr[0] = vcxt.values[idx++].Reg64;
428     assert(whpx_register_names[idx] == WHvX64RegisterCr2);
429     env->cr[2] = vcxt.values[idx++].Reg64;
430     assert(whpx_register_names[idx] == WHvX64RegisterCr3);
431     env->cr[3] = vcxt.values[idx++].Reg64;
432     assert(whpx_register_names[idx] == WHvX64RegisterCr4);
433     env->cr[4] = vcxt.values[idx++].Reg64;
434     assert(whpx_register_names[idx] == WHvX64RegisterCr8);
435     tpr = vcxt.values[idx++].Reg64;
436     if (tpr != vcpu->tpr) {
437         vcpu->tpr = tpr;
438         cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
439     }
440
441     /* 8 Debug Registers - Skipped */
442
443     /* 16 XMM registers */
444     assert(whpx_register_names[idx] == WHvX64RegisterXmm0);
445     for (i = 0; i < 16; i += 1, idx += 1) {
446         env->xmm_regs[i].ZMM_Q(0) = vcxt.values[idx].Reg128.Low64;
447         env->xmm_regs[i].ZMM_Q(1) = vcxt.values[idx].Reg128.High64;
448     }
449
450     /* 8 FP registers */
451     assert(whpx_register_names[idx] == WHvX64RegisterFpMmx0);
452     for (i = 0; i < 8; i += 1, idx += 1) {
453         env->fpregs[i].mmx.MMX_Q(0) = vcxt.values[idx].Fp.AsUINT128.Low64;
454         /* env->fpregs[i].mmx.MMX_Q(1) =
455                vcxt.values[idx].Fp.AsUINT128.High64;
456         */
457     }
458
459     /* FP control status register */
460     assert(whpx_register_names[idx] == WHvX64RegisterFpControlStatus);
461     env->fpuc = vcxt.values[idx].FpControlStatus.FpControl;
462     env->fpstt = (vcxt.values[idx].FpControlStatus.FpStatus >> 11) & 0x7;
463     env->fpus = vcxt.values[idx].FpControlStatus.FpStatus & ~0x3800;
464     for (i = 0; i < 8; ++i) {
465         env->fptags[i] = !((vcxt.values[idx].FpControlStatus.FpTag >> i) & 1);
466     }
467     env->fpop = vcxt.values[idx].FpControlStatus.LastFpOp;
468     env->fpip = vcxt.values[idx].FpControlStatus.LastFpRip;
469     idx += 1;
470
471     /* XMM control status register */
472     assert(whpx_register_names[idx] == WHvX64RegisterXmmControlStatus);
473     env->mxcsr = vcxt.values[idx].XmmControlStatus.XmmStatusControl;
474     idx += 1;
475
476     /* MSRs */
477     assert(whpx_register_names[idx] == WHvX64RegisterTsc);
478     env->tsc = vcxt.values[idx++].Reg64;
479     assert(whpx_register_names[idx] == WHvX64RegisterEfer);
480     env->efer = vcxt.values[idx++].Reg64;
481 #ifdef TARGET_X86_64
482     assert(whpx_register_names[idx] == WHvX64RegisterKernelGsBase);
483     env->kernelgsbase = vcxt.values[idx++].Reg64;
484 #endif
485
486     assert(whpx_register_names[idx] == WHvX64RegisterApicBase);
487     apic_base = vcxt.values[idx++].Reg64;
488     if (apic_base != vcpu->apic_base) {
489         vcpu->apic_base = apic_base;
490         cpu_set_apic_base(x86_cpu->apic_state, vcpu->apic_base);
491     }
492
493     /* WHvX64RegisterPat - Skipped */
494
495     assert(whpx_register_names[idx] == WHvX64RegisterSysenterCs);
496     env->sysenter_cs = vcxt.values[idx++].Reg64;;
497     assert(whpx_register_names[idx] == WHvX64RegisterSysenterEip);
498     env->sysenter_eip = vcxt.values[idx++].Reg64;
499     assert(whpx_register_names[idx] == WHvX64RegisterSysenterEsp);
500     env->sysenter_esp = vcxt.values[idx++].Reg64;
501     assert(whpx_register_names[idx] == WHvX64RegisterStar);
502     env->star = vcxt.values[idx++].Reg64;
503 #ifdef TARGET_X86_64
504     assert(whpx_register_names[idx] == WHvX64RegisterLstar);
505     env->lstar = vcxt.values[idx++].Reg64;
506     assert(whpx_register_names[idx] == WHvX64RegisterCstar);
507     env->cstar = vcxt.values[idx++].Reg64;
508     assert(whpx_register_names[idx] == WHvX64RegisterSfmask);
509     env->fmask = vcxt.values[idx++].Reg64;
510 #endif
511
512     /* Interrupt / Event Registers - Skipped */
513
514     assert(idx == RTL_NUMBER_OF(whpx_register_names));
515
516     return;
517 }
518
519 static HRESULT CALLBACK whpx_emu_ioport_callback(
520     void *ctx,
521     WHV_EMULATOR_IO_ACCESS_INFO *IoAccess)
522 {
523     MemTxAttrs attrs = { 0 };
524     address_space_rw(&address_space_io, IoAccess->Port, attrs,
525                      (uint8_t *)&IoAccess->Data, IoAccess->AccessSize,
526                      IoAccess->Direction);
527     return S_OK;
528 }
529
530 static HRESULT CALLBACK whpx_emu_memio_callback(
531     void *ctx,
532     WHV_EMULATOR_MEMORY_ACCESS_INFO *ma)
533 {
534     cpu_physical_memory_rw(ma->GpaAddress, ma->Data, ma->AccessSize,
535                            ma->Direction);
536     return S_OK;
537 }
538
539 static HRESULT CALLBACK whpx_emu_getreg_callback(
540     void *ctx,
541     const WHV_REGISTER_NAME *RegisterNames,
542     UINT32 RegisterCount,
543     WHV_REGISTER_VALUE *RegisterValues)
544 {
545     HRESULT hr;
546     struct whpx_state *whpx = &whpx_global;
547     CPUState *cpu = (CPUState *)ctx;
548
549     hr = WHvGetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
550                                          RegisterNames, RegisterCount,
551                                          RegisterValues);
552     if (FAILED(hr)) {
553         error_report("WHPX: Failed to get virtual processor registers,"
554                      " hr=%08lx", hr);
555     }
556
557     return hr;
558 }
559
560 static HRESULT CALLBACK whpx_emu_setreg_callback(
561     void *ctx,
562     const WHV_REGISTER_NAME *RegisterNames,
563     UINT32 RegisterCount,
564     const WHV_REGISTER_VALUE *RegisterValues)
565 {
566     HRESULT hr;
567     struct whpx_state *whpx = &whpx_global;
568     CPUState *cpu = (CPUState *)ctx;
569
570     hr = WHvSetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
571                                          RegisterNames, RegisterCount,
572                                          RegisterValues);
573     if (FAILED(hr)) {
574         error_report("WHPX: Failed to set virtual processor registers,"
575                      " hr=%08lx", hr);
576     }
577
578     /*
579      * The emulator just successfully wrote the register state. We clear the
580      * dirty state so we avoid the double write on resume of the VP.
581      */
582     cpu->vcpu_dirty = false;
583
584     return hr;
585 }
586
587 static HRESULT CALLBACK whpx_emu_translate_callback(
588     void *ctx,
589     WHV_GUEST_VIRTUAL_ADDRESS Gva,
590     WHV_TRANSLATE_GVA_FLAGS TranslateFlags,
591     WHV_TRANSLATE_GVA_RESULT_CODE *TranslationResult,
592     WHV_GUEST_PHYSICAL_ADDRESS *Gpa)
593 {
594     HRESULT hr;
595     struct whpx_state *whpx = &whpx_global;
596     CPUState *cpu = (CPUState *)ctx;
597     WHV_TRANSLATE_GVA_RESULT res;
598
599     hr = WHvTranslateGva(whpx->partition, cpu->cpu_index,
600                          Gva, TranslateFlags, &res, Gpa);
601     if (FAILED(hr)) {
602         error_report("WHPX: Failed to translate GVA, hr=%08lx", hr);
603     } else {
604         *TranslationResult = res.ResultCode;
605     }
606
607     return hr;
608 }
609
610 static const WHV_EMULATOR_CALLBACKS whpx_emu_callbacks = {
611     .Size = sizeof(WHV_EMULATOR_CALLBACKS),
612     .WHvEmulatorIoPortCallback = whpx_emu_ioport_callback,
613     .WHvEmulatorMemoryCallback = whpx_emu_memio_callback,
614     .WHvEmulatorGetVirtualProcessorRegisters = whpx_emu_getreg_callback,
615     .WHvEmulatorSetVirtualProcessorRegisters = whpx_emu_setreg_callback,
616     .WHvEmulatorTranslateGvaPage = whpx_emu_translate_callback,
617 };
618
619 static int whpx_handle_mmio(CPUState *cpu, WHV_MEMORY_ACCESS_CONTEXT *ctx)
620 {
621     HRESULT hr;
622     struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
623     WHV_EMULATOR_STATUS emu_status;
624
625     hr = WHvEmulatorTryMmioEmulation(vcpu->emulator, cpu,
626                                      &vcpu->exit_ctx.VpContext, ctx,
627                                      &emu_status);
628     if (FAILED(hr)) {
629         error_report("WHPX: Failed to parse MMIO access, hr=%08lx", hr);
630         return -1;
631     }
632
633     if (!emu_status.EmulationSuccessful) {
634         error_report("WHPX: Failed to emulate MMIO access");
635         return -1;
636     }
637
638     return 0;
639 }
640
641 static int whpx_handle_portio(CPUState *cpu,
642                               WHV_X64_IO_PORT_ACCESS_CONTEXT *ctx)
643 {
644     HRESULT hr;
645     struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
646     WHV_EMULATOR_STATUS emu_status;
647
648     hr = WHvEmulatorTryIoEmulation(vcpu->emulator, cpu,
649                                    &vcpu->exit_ctx.VpContext, ctx,
650                                    &emu_status);
651     if (FAILED(hr)) {
652         error_report("WHPX: Failed to parse PortIO access, hr=%08lx", hr);
653         return -1;
654     }
655
656     if (!emu_status.EmulationSuccessful) {
657         error_report("WHPX: Failed to emulate PortMMIO access");
658         return -1;
659     }
660
661     return 0;
662 }
663
664 static int whpx_handle_halt(CPUState *cpu)
665 {
666     struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
667     int ret = 0;
668
669     qemu_mutex_lock_iothread();
670     if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
671           (env->eflags & IF_MASK)) &&
672         !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
673         cpu->exception_index = EXCP_HLT;
674         cpu->halted = true;
675         ret = 1;
676     }
677     qemu_mutex_unlock_iothread();
678
679     return ret;
680 }
681
682 static void whpx_vcpu_pre_run(CPUState *cpu)
683 {
684     HRESULT hr;
685     struct whpx_state *whpx = &whpx_global;
686     struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
687     struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
688     X86CPU *x86_cpu = X86_CPU(cpu);
689     int irq;
690     WHV_X64_PENDING_INTERRUPTION_REGISTER new_int = {0};
691     UINT32 reg_count = 0;
692     WHV_REGISTER_VALUE reg_values[3] = {0};
693     WHV_REGISTER_NAME reg_names[3];
694
695     qemu_mutex_lock_iothread();
696
697     /* Inject NMI */
698     if (!vcpu->interrupt_in_flight.InterruptionPending &&
699         cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
700         if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
701             cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
702             vcpu->interruptable = false;
703             new_int.InterruptionType = WHvX64PendingNmi;
704             new_int.InterruptionPending = 1;
705             new_int.InterruptionVector = 2;
706         }
707         if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
708             cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
709         }
710     }
711
712     /*
713      * Force the VCPU out of its inner loop to process any INIT requests or
714      * commit pending TPR access.
715      */
716     if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
717         if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
718             !(env->hflags & HF_SMM_MASK)) {
719             cpu->exit_request = 1;
720         }
721         if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
722             cpu->exit_request = 1;
723         }
724     }
725
726     /* Get pending hard interruption or replay one that was overwritten */
727     if (!vcpu->interrupt_in_flight.InterruptionPending &&
728         vcpu->interruptable && (env->eflags & IF_MASK)) {
729         assert(!new_int.InterruptionPending);
730         if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
731             cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
732             irq = cpu_get_pic_interrupt(env);
733             if (irq >= 0) {
734                 new_int.InterruptionType = WHvX64PendingInterrupt;
735                 new_int.InterruptionPending = 1;
736                 new_int.InterruptionVector = irq;
737             }
738         }
739     }
740
741     /* Setup interrupt state if new one was prepared */
742     if (new_int.InterruptionPending) {
743         reg_values[reg_count].PendingInterruption = new_int;
744         reg_names[reg_count] = WHvRegisterPendingInterruption;
745         reg_count += 1;
746     }
747
748     /* Sync the TPR to the CR8 if was modified during the intercept */
749     reg_values[reg_count].Reg64 = cpu_get_apic_tpr(x86_cpu->apic_state);
750     if (reg_values[reg_count].Reg64 != vcpu->tpr) {
751         vcpu->tpr = reg_values[reg_count].Reg64;
752         cpu->exit_request = 1;
753         reg_names[reg_count] = WHvX64RegisterCr8;
754         reg_count += 1;
755     }
756
757     /* Update the state of the interrupt delivery notification */
758     if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
759         reg_values[reg_count].DeliverabilityNotifications.InterruptNotification
760             = 1;
761         if (vcpu->window_registered != 1) {
762             vcpu->window_registered = 1;
763         }
764         reg_names[reg_count] = WHvX64RegisterDeliverabilityNotifications;
765         reg_count += 1;
766     }
767
768     qemu_mutex_unlock_iothread();
769
770     if (reg_count) {
771         hr = WHvSetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
772                                              reg_names, reg_count, reg_values);
773         if (FAILED(hr)) {
774             error_report("WHPX: Failed to set interrupt state registers,"
775                          " hr=%08lx", hr);
776         }
777     }
778
779     return;
780 }
781
782 static void whpx_vcpu_post_run(CPUState *cpu)
783 {
784     HRESULT hr;
785     struct whpx_state *whpx = &whpx_global;
786     struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
787     struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
788     X86CPU *x86_cpu = X86_CPU(cpu);
789     WHV_REGISTER_VALUE reg_values[4];
790     const WHV_REGISTER_NAME reg_names[4] = {
791         WHvX64RegisterRflags,
792         WHvX64RegisterCr8,
793         WHvRegisterPendingInterruption,
794         WHvRegisterInterruptState,
795     };
796
797     hr = WHvGetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
798                                          reg_names, 4, reg_values);
799     if (FAILED(hr)) {
800         error_report("WHPX: Failed to get interrupt state regusters,"
801                      " hr=%08lx", hr);
802         vcpu->interruptable = false;
803         return;
804     }
805
806     assert(reg_names[0] == WHvX64RegisterRflags);
807     env->eflags = reg_values[0].Reg64;
808
809     assert(reg_names[1] == WHvX64RegisterCr8);
810     if (vcpu->tpr != reg_values[1].Reg64) {
811         vcpu->tpr = reg_values[1].Reg64;
812         qemu_mutex_lock_iothread();
813         cpu_set_apic_tpr(x86_cpu->apic_state, vcpu->tpr);
814         qemu_mutex_unlock_iothread();
815     }
816
817     assert(reg_names[2] == WHvRegisterPendingInterruption);
818     vcpu->interrupt_in_flight = reg_values[2].PendingInterruption;
819
820     assert(reg_names[3] == WHvRegisterInterruptState);
821     vcpu->interruptable = !reg_values[3].InterruptState.InterruptShadow;
822
823     return;
824 }
825
826 static void whpx_vcpu_process_async_events(CPUState *cpu)
827 {
828     struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
829     X86CPU *x86_cpu = X86_CPU(cpu);
830     struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
831
832     if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
833         !(env->hflags & HF_SMM_MASK)) {
834
835         do_cpu_init(x86_cpu);
836         cpu->vcpu_dirty = true;
837         vcpu->interruptable = true;
838     }
839
840     if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
841         cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
842         apic_poll_irq(x86_cpu->apic_state);
843     }
844
845     if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
846          (env->eflags & IF_MASK)) ||
847         (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
848         cpu->halted = false;
849     }
850
851     if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
852         if (!cpu->vcpu_dirty) {
853             whpx_get_registers(cpu);
854         }
855         do_cpu_sipi(x86_cpu);
856     }
857
858     if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
859         cpu->interrupt_request &= ~CPU_INTERRUPT_TPR;
860         if (!cpu->vcpu_dirty) {
861             whpx_get_registers(cpu);
862         }
863         apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip,
864                                       env->tpr_access_type);
865     }
866
867     return;
868 }
869
870 static int whpx_vcpu_run(CPUState *cpu)
871 {
872     HRESULT hr;
873     struct whpx_state *whpx = &whpx_global;
874     struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
875     int ret;
876
877     whpx_vcpu_process_async_events(cpu);
878     if (cpu->halted) {
879         cpu->exception_index = EXCP_HLT;
880         atomic_set(&cpu->exit_request, false);
881         return 0;
882     }
883
884     qemu_mutex_unlock_iothread();
885     cpu_exec_start(cpu);
886
887     do {
888         if (cpu->vcpu_dirty) {
889             whpx_set_registers(cpu);
890             cpu->vcpu_dirty = false;
891         }
892
893         whpx_vcpu_pre_run(cpu);
894
895         if (atomic_read(&cpu->exit_request)) {
896             whpx_vcpu_kick(cpu);
897         }
898
899         hr = WHvRunVirtualProcessor(whpx->partition, cpu->cpu_index,
900                                     &vcpu->exit_ctx, whpx->exit_ctx_size);
901
902         if (FAILED(hr)) {
903             error_report("WHPX: Failed to exec a virtual processor,"
904                          " hr=%08lx", hr);
905             ret = -1;
906             break;
907         }
908
909         whpx_vcpu_post_run(cpu);
910
911         switch (vcpu->exit_ctx.ExitReason) {
912         case WHvRunVpExitReasonMemoryAccess:
913             ret = whpx_handle_mmio(cpu, &vcpu->exit_ctx.MemoryAccess);
914             break;
915
916         case WHvRunVpExitReasonX64IoPortAccess:
917             ret = whpx_handle_portio(cpu, &vcpu->exit_ctx.IoPortAccess);
918             break;
919
920         case WHvRunVpExitReasonX64InterruptWindow:
921             vcpu->window_registered = 0;
922             break;
923
924         case WHvRunVpExitReasonX64Halt:
925             ret = whpx_handle_halt(cpu);
926             break;
927
928         case WHvRunVpExitReasonCanceled:
929             cpu->exception_index = EXCP_INTERRUPT;
930             ret = 1;
931             break;
932
933         case WHvRunVpExitReasonNone:
934         case WHvRunVpExitReasonUnrecoverableException:
935         case WHvRunVpExitReasonInvalidVpRegisterValue:
936         case WHvRunVpExitReasonUnsupportedFeature:
937         case WHvRunVpExitReasonX64MsrAccess:
938         case WHvRunVpExitReasonX64Cpuid:
939         case WHvRunVpExitReasonException:
940         default:
941             error_report("WHPX: Unexpected VP exit code %d",
942                          vcpu->exit_ctx.ExitReason);
943             whpx_get_registers(cpu);
944             qemu_mutex_lock_iothread();
945             qemu_system_guest_panicked(cpu_get_crash_info(cpu));
946             qemu_mutex_unlock_iothread();
947             break;
948         }
949
950     } while (!ret);
951
952     cpu_exec_end(cpu);
953     qemu_mutex_lock_iothread();
954     current_cpu = cpu;
955
956     atomic_set(&cpu->exit_request, false);
957
958     return ret < 0;
959 }
960
961 static void do_whpx_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
962 {
963     whpx_get_registers(cpu);
964     cpu->vcpu_dirty = true;
965 }
966
967 static void do_whpx_cpu_synchronize_post_reset(CPUState *cpu,
968                                                run_on_cpu_data arg)
969 {
970     whpx_set_registers(cpu);
971     cpu->vcpu_dirty = false;
972 }
973
974 static void do_whpx_cpu_synchronize_post_init(CPUState *cpu,
975                                               run_on_cpu_data arg)
976 {
977     whpx_set_registers(cpu);
978     cpu->vcpu_dirty = false;
979 }
980
981 static void do_whpx_cpu_synchronize_pre_loadvm(CPUState *cpu,
982                                                run_on_cpu_data arg)
983 {
984     cpu->vcpu_dirty = true;
985 }
986
987 /*
988  * CPU support.
989  */
990
991 void whpx_cpu_synchronize_state(CPUState *cpu)
992 {
993     if (!cpu->vcpu_dirty) {
994         run_on_cpu(cpu, do_whpx_cpu_synchronize_state, RUN_ON_CPU_NULL);
995     }
996 }
997
998 void whpx_cpu_synchronize_post_reset(CPUState *cpu)
999 {
1000     run_on_cpu(cpu, do_whpx_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
1001 }
1002
1003 void whpx_cpu_synchronize_post_init(CPUState *cpu)
1004 {
1005     run_on_cpu(cpu, do_whpx_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
1006 }
1007
1008 void whpx_cpu_synchronize_pre_loadvm(CPUState *cpu)
1009 {
1010     run_on_cpu(cpu, do_whpx_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
1011 }
1012
1013 /*
1014  * Vcpu support.
1015  */
1016
1017 static Error *whpx_migration_blocker;
1018
1019 int whpx_init_vcpu(CPUState *cpu)
1020 {
1021     HRESULT hr;
1022     struct whpx_state *whpx = &whpx_global;
1023     struct whpx_vcpu *vcpu;
1024     Error *local_error = NULL;
1025
1026     /* Add migration blockers for all unsupported features of the
1027      * Windows Hypervisor Platform
1028      */
1029     if (whpx_migration_blocker == NULL) {
1030         error_setg(&whpx_migration_blocker,
1031                "State blocked due to non-migratable CPUID feature support,"
1032                "dirty memory tracking support, and XSAVE/XRSTOR support");
1033
1034         (void)migrate_add_blocker(whpx_migration_blocker, &local_error);
1035         if (local_error) {
1036             error_report_err(local_error);
1037             error_free(whpx_migration_blocker);
1038             migrate_del_blocker(whpx_migration_blocker);
1039             return -EINVAL;
1040         }
1041     }
1042
1043     vcpu = g_malloc0(FIELD_OFFSET(struct whpx_vcpu, exit_ctx) +
1044                      whpx->exit_ctx_size);
1045
1046     if (!vcpu) {
1047         error_report("WHPX: Failed to allocte VCPU context.");
1048         return -ENOMEM;
1049     }
1050
1051     hr = WHvEmulatorCreateEmulator(&whpx_emu_callbacks, &vcpu->emulator);
1052     if (FAILED(hr)) {
1053         error_report("WHPX: Failed to setup instruction completion support,"
1054                      " hr=%08lx", hr);
1055         g_free(vcpu);
1056         return -EINVAL;
1057     }
1058
1059     hr = WHvCreateVirtualProcessor(whpx->partition, cpu->cpu_index, 0);
1060     if (FAILED(hr)) {
1061         error_report("WHPX: Failed to create a virtual processor,"
1062                      " hr=%08lx", hr);
1063         WHvEmulatorDestroyEmulator(vcpu->emulator);
1064         g_free(vcpu);
1065         return -EINVAL;
1066     }
1067
1068     vcpu->interruptable = true;
1069
1070     cpu->vcpu_dirty = true;
1071     cpu->hax_vcpu = (struct hax_vcpu_state *)vcpu;
1072
1073     return 0;
1074 }
1075
1076 int whpx_vcpu_exec(CPUState *cpu)
1077 {
1078     int ret;
1079     int fatal;
1080
1081     for (;;) {
1082         if (cpu->exception_index >= EXCP_INTERRUPT) {
1083             ret = cpu->exception_index;
1084             cpu->exception_index = -1;
1085             break;
1086         }
1087
1088         fatal = whpx_vcpu_run(cpu);
1089
1090         if (fatal) {
1091             error_report("WHPX: Failed to exec a virtual processor");
1092             abort();
1093         }
1094     }
1095
1096     return ret;
1097 }
1098
1099 void whpx_destroy_vcpu(CPUState *cpu)
1100 {
1101     struct whpx_state *whpx = &whpx_global;
1102     struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
1103
1104     WHvDeleteVirtualProcessor(whpx->partition, cpu->cpu_index);
1105     WHvEmulatorDestroyEmulator(vcpu->emulator);
1106     g_free(cpu->hax_vcpu);
1107     return;
1108 }
1109
1110 void whpx_vcpu_kick(CPUState *cpu)
1111 {
1112     struct whpx_state *whpx = &whpx_global;
1113     WHvCancelRunVirtualProcessor(whpx->partition, cpu->cpu_index, 0);
1114 }
1115
1116 /*
1117  * Memory support.
1118  */
1119
1120 static void whpx_update_mapping(hwaddr start_pa, ram_addr_t size,
1121                                 void *host_va, int add, int rom,
1122                                 const char *name)
1123 {
1124     struct whpx_state *whpx = &whpx_global;
1125     HRESULT hr;
1126
1127     /*
1128     if (add) {
1129         printf("WHPX: ADD PA:%p Size:%p, Host:%p, %s, '%s'\n",
1130                (void*)start_pa, (void*)size, host_va,
1131                (rom ? "ROM" : "RAM"), name);
1132     } else {
1133         printf("WHPX: DEL PA:%p Size:%p, Host:%p,      '%s'\n",
1134                (void*)start_pa, (void*)size, host_va, name);
1135     }
1136     */
1137
1138     if (add) {
1139         hr = WHvMapGpaRange(whpx->partition,
1140                             host_va,
1141                             start_pa,
1142                             size,
1143                             (WHvMapGpaRangeFlagRead |
1144                              WHvMapGpaRangeFlagExecute |
1145                              (rom ? 0 : WHvMapGpaRangeFlagWrite)));
1146     } else {
1147         hr = WHvUnmapGpaRange(whpx->partition,
1148                               start_pa,
1149                               size);
1150     }
1151
1152     if (FAILED(hr)) {
1153         error_report("WHPX: Failed to %s GPA range '%s' PA:%p, Size:%p bytes,"
1154                      " Host:%p, hr=%08lx",
1155                      (add ? "MAP" : "UNMAP"), name,
1156                      (void *)start_pa, (void *)size, host_va, hr);
1157     }
1158 }
1159
1160 static void whpx_process_section(MemoryRegionSection *section, int add)
1161 {
1162     MemoryRegion *mr = section->mr;
1163     hwaddr start_pa = section->offset_within_address_space;
1164     ram_addr_t size = int128_get64(section->size);
1165     unsigned int delta;
1166     uint64_t host_va;
1167
1168     if (!memory_region_is_ram(mr)) {
1169         return;
1170     }
1171
1172     delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask);
1173     delta &= ~qemu_real_host_page_mask;
1174     if (delta > size) {
1175         return;
1176     }
1177     start_pa += delta;
1178     size -= delta;
1179     size &= qemu_real_host_page_mask;
1180     if (!size || (start_pa & ~qemu_real_host_page_mask)) {
1181         return;
1182     }
1183
1184     host_va = (uintptr_t)memory_region_get_ram_ptr(mr)
1185             + section->offset_within_region + delta;
1186
1187     whpx_update_mapping(start_pa, size, (void *)host_va, add,
1188                        memory_region_is_rom(mr), mr->name);
1189 }
1190
1191 static void whpx_region_add(MemoryListener *listener,
1192                            MemoryRegionSection *section)
1193 {
1194     memory_region_ref(section->mr);
1195     whpx_process_section(section, 1);
1196 }
1197
1198 static void whpx_region_del(MemoryListener *listener,
1199                            MemoryRegionSection *section)
1200 {
1201     whpx_process_section(section, 0);
1202     memory_region_unref(section->mr);
1203 }
1204
1205 static void whpx_transaction_begin(MemoryListener *listener)
1206 {
1207 }
1208
1209 static void whpx_transaction_commit(MemoryListener *listener)
1210 {
1211 }
1212
1213 static void whpx_log_sync(MemoryListener *listener,
1214                          MemoryRegionSection *section)
1215 {
1216     MemoryRegion *mr = section->mr;
1217
1218     if (!memory_region_is_ram(mr)) {
1219         return;
1220     }
1221
1222     memory_region_set_dirty(mr, 0, int128_get64(section->size));
1223 }
1224
1225 static MemoryListener whpx_memory_listener = {
1226     .begin = whpx_transaction_begin,
1227     .commit = whpx_transaction_commit,
1228     .region_add = whpx_region_add,
1229     .region_del = whpx_region_del,
1230     .log_sync = whpx_log_sync,
1231     .priority = 10,
1232 };
1233
1234 static void whpx_memory_init(void)
1235 {
1236     memory_listener_register(&whpx_memory_listener, &address_space_memory);
1237 }
1238
1239 static void whpx_handle_interrupt(CPUState *cpu, int mask)
1240 {
1241     cpu->interrupt_request |= mask;
1242
1243     if (!qemu_cpu_is_self(cpu)) {
1244         qemu_cpu_kick(cpu);
1245     }
1246 }
1247
1248 /*
1249  * Partition support
1250  */
1251
1252 static int whpx_accel_init(MachineState *ms)
1253 {
1254     struct whpx_state *whpx;
1255     int ret;
1256     HRESULT hr;
1257     WHV_CAPABILITY whpx_cap;
1258     WHV_PARTITION_PROPERTY prop;
1259
1260     whpx = &whpx_global;
1261
1262     memset(whpx, 0, sizeof(struct whpx_state));
1263     whpx->mem_quota = ms->ram_size;
1264
1265     hr = WHvGetCapability(WHvCapabilityCodeHypervisorPresent, &whpx_cap,
1266                           sizeof(whpx_cap));
1267     if (FAILED(hr) || !whpx_cap.HypervisorPresent) {
1268         error_report("WHPX: No accelerator found, hr=%08lx", hr);
1269         ret = -ENOSPC;
1270         goto error;
1271     }
1272
1273     hr = WHvCreatePartition(&whpx->partition);
1274     if (FAILED(hr)) {
1275         error_report("WHPX: Failed to create partition, hr=%08lx", hr);
1276         ret = -EINVAL;
1277         goto error;
1278     }
1279
1280     memset(&prop, 0, sizeof(WHV_PARTITION_PROPERTY));
1281     prop.PropertyCode = WHvPartitionPropertyCodeProcessorCount;
1282     prop.ProcessorCount = smp_cpus;
1283     hr = WHvSetPartitionProperty(whpx->partition,
1284                                  &prop,
1285                                  sizeof(WHV_PARTITION_PROPERTY));
1286
1287     if (FAILED(hr)) {
1288         error_report("WHPX: Failed to set partition core count to %d,"
1289                      " hr=%08lx", smp_cores, hr);
1290         ret = -EINVAL;
1291         goto error;
1292     }
1293
1294     hr = WHvSetupPartition(whpx->partition);
1295     if (FAILED(hr)) {
1296         error_report("WHPX: Failed to setup partition, hr=%08lx", hr);
1297         ret = -EINVAL;
1298         goto error;
1299     }
1300
1301     whpx->exit_ctx_size = WHvGetRunExitContextSize();
1302     assert(whpx->exit_ctx_size);
1303
1304     whpx_memory_init();
1305
1306     cpu_interrupt_handler = whpx_handle_interrupt;
1307
1308     printf("Windows Hypervisor Platform accelerator is operational\n");
1309     return 0;
1310
1311   error:
1312
1313     if (NULL != whpx->partition) {
1314         WHvDeletePartition(whpx->partition);
1315         whpx->partition = NULL;
1316     }
1317
1318
1319     return ret;
1320 }
1321
1322 int whpx_enabled(void)
1323 {
1324     return whpx_allowed;
1325 }
1326
1327 static void whpx_accel_class_init(ObjectClass *oc, void *data)
1328 {
1329     AccelClass *ac = ACCEL_CLASS(oc);
1330     ac->name = "WHPX";
1331     ac->init_machine = whpx_accel_init;
1332     ac->allowed = &whpx_allowed;
1333 }
1334
1335 static const TypeInfo whpx_accel_type = {
1336     .name = ACCEL_CLASS_NAME("whpx"),
1337     .parent = TYPE_ACCEL,
1338     .class_init = whpx_accel_class_init,
1339 };
1340
1341 static void whpx_type_init(void)
1342 {
1343     type_register_static(&whpx_accel_type);
1344 }
1345
1346 type_init(whpx_type_init);
This page took 0.096424 seconds and 4 git commands to generate.