]> Git Repo - qemu.git/blob - target/i386/whpx-all.c
Merge remote-tracking branch 'remotes/jnsnow/tags/bitmaps-pull-request' into staging
[qemu.git] / target / i386 / whpx-all.c
1 /*
2  * QEMU Windows Hypervisor Platform accelerator (WHPX)
3  *
4  * Copyright Microsoft Corp. 2017
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or later.
7  * See the COPYING file in the top-level directory.
8  *
9  */
10
11 #include "qemu/osdep.h"
12 #include "cpu.h"
13 #include "exec/address-spaces.h"
14 #include "exec/exec-all.h"
15 #include "exec/ioport.h"
16 #include "qemu-common.h"
17 #include "strings.h"
18 #include "sysemu/accel.h"
19 #include "sysemu/whpx.h"
20 #include "sysemu/sysemu.h"
21 #include "sysemu/cpus.h"
22 #include "qemu/main-loop.h"
23 #include "hw/boards.h"
24 #include "qemu/error-report.h"
25 #include "qemu/queue.h"
26 #include "qapi/error.h"
27 #include "migration/blocker.h"
28
29 #include <winhvplatform.h>
30 #include <winhvemulation.h>
31
32 struct whpx_state {
33     uint64_t mem_quota;
34     WHV_PARTITION_HANDLE partition;
35     uint32_t exit_ctx_size;
36 };
37
38 static const WHV_REGISTER_NAME whpx_register_names[] = {
39
40     /* X64 General purpose registers */
41     WHvX64RegisterRax,
42     WHvX64RegisterRcx,
43     WHvX64RegisterRdx,
44     WHvX64RegisterRbx,
45     WHvX64RegisterRsp,
46     WHvX64RegisterRbp,
47     WHvX64RegisterRsi,
48     WHvX64RegisterRdi,
49     WHvX64RegisterR8,
50     WHvX64RegisterR9,
51     WHvX64RegisterR10,
52     WHvX64RegisterR11,
53     WHvX64RegisterR12,
54     WHvX64RegisterR13,
55     WHvX64RegisterR14,
56     WHvX64RegisterR15,
57     WHvX64RegisterRip,
58     WHvX64RegisterRflags,
59
60     /* X64 Segment registers */
61     WHvX64RegisterEs,
62     WHvX64RegisterCs,
63     WHvX64RegisterSs,
64     WHvX64RegisterDs,
65     WHvX64RegisterFs,
66     WHvX64RegisterGs,
67     WHvX64RegisterLdtr,
68     WHvX64RegisterTr,
69
70     /* X64 Table registers */
71     WHvX64RegisterIdtr,
72     WHvX64RegisterGdtr,
73
74     /* X64 Control Registers */
75     WHvX64RegisterCr0,
76     WHvX64RegisterCr2,
77     WHvX64RegisterCr3,
78     WHvX64RegisterCr4,
79     WHvX64RegisterCr8,
80
81     /* X64 Debug Registers */
82     /*
83      * WHvX64RegisterDr0,
84      * WHvX64RegisterDr1,
85      * WHvX64RegisterDr2,
86      * WHvX64RegisterDr3,
87      * WHvX64RegisterDr6,
88      * WHvX64RegisterDr7,
89      */
90
91     /* X64 Floating Point and Vector Registers */
92     WHvX64RegisterXmm0,
93     WHvX64RegisterXmm1,
94     WHvX64RegisterXmm2,
95     WHvX64RegisterXmm3,
96     WHvX64RegisterXmm4,
97     WHvX64RegisterXmm5,
98     WHvX64RegisterXmm6,
99     WHvX64RegisterXmm7,
100     WHvX64RegisterXmm8,
101     WHvX64RegisterXmm9,
102     WHvX64RegisterXmm10,
103     WHvX64RegisterXmm11,
104     WHvX64RegisterXmm12,
105     WHvX64RegisterXmm13,
106     WHvX64RegisterXmm14,
107     WHvX64RegisterXmm15,
108     WHvX64RegisterFpMmx0,
109     WHvX64RegisterFpMmx1,
110     WHvX64RegisterFpMmx2,
111     WHvX64RegisterFpMmx3,
112     WHvX64RegisterFpMmx4,
113     WHvX64RegisterFpMmx5,
114     WHvX64RegisterFpMmx6,
115     WHvX64RegisterFpMmx7,
116     WHvX64RegisterFpControlStatus,
117     WHvX64RegisterXmmControlStatus,
118
119     /* X64 MSRs */
120     WHvX64RegisterTsc,
121     WHvX64RegisterEfer,
122 #ifdef TARGET_X86_64
123     WHvX64RegisterKernelGsBase,
124 #endif
125     WHvX64RegisterApicBase,
126     /* WHvX64RegisterPat, */
127     WHvX64RegisterSysenterCs,
128     WHvX64RegisterSysenterEip,
129     WHvX64RegisterSysenterEsp,
130     WHvX64RegisterStar,
131 #ifdef TARGET_X86_64
132     WHvX64RegisterLstar,
133     WHvX64RegisterCstar,
134     WHvX64RegisterSfmask,
135 #endif
136
137     /* Interrupt / Event Registers */
138     /*
139      * WHvRegisterPendingInterruption,
140      * WHvRegisterInterruptState,
141      * WHvRegisterPendingEvent0,
142      * WHvRegisterPendingEvent1
143      * WHvX64RegisterDeliverabilityNotifications,
144      */
145 };
146
147 struct whpx_register_set {
148     WHV_REGISTER_VALUE values[RTL_NUMBER_OF(whpx_register_names)];
149 };
150
151 struct whpx_vcpu {
152     WHV_EMULATOR_HANDLE emulator;
153     bool window_registered;
154     bool interruptable;
155     uint64_t tpr;
156     uint64_t apic_base;
157     WHV_X64_PENDING_INTERRUPTION_REGISTER interrupt_in_flight;
158
159     /* Must be the last field as it may have a tail */
160     WHV_RUN_VP_EXIT_CONTEXT exit_ctx;
161 };
162
163 static bool whpx_allowed;
164
165 struct whpx_state whpx_global;
166
167
168 /*
169  * VP support
170  */
171
172 static struct whpx_vcpu *get_whpx_vcpu(CPUState *cpu)
173 {
174     return (struct whpx_vcpu *)cpu->hax_vcpu;
175 }
176
177 static WHV_X64_SEGMENT_REGISTER whpx_seg_q2h(const SegmentCache *qs, int v86,
178                                              int r86)
179 {
180     WHV_X64_SEGMENT_REGISTER hs;
181     unsigned flags = qs->flags;
182
183     hs.Base = qs->base;
184     hs.Limit = qs->limit;
185     hs.Selector = qs->selector;
186
187     if (v86) {
188         hs.Attributes = 0;
189         hs.SegmentType = 3;
190         hs.Present = 1;
191         hs.DescriptorPrivilegeLevel = 3;
192         hs.NonSystemSegment = 1;
193
194     } else {
195         hs.Attributes = (flags >> DESC_TYPE_SHIFT);
196
197         if (r86) {
198             /* hs.Base &= 0xfffff; */
199         }
200     }
201
202     return hs;
203 }
204
205 static SegmentCache whpx_seg_h2q(const WHV_X64_SEGMENT_REGISTER *hs)
206 {
207     SegmentCache qs;
208
209     qs.base = hs->Base;
210     qs.limit = hs->Limit;
211     qs.selector = hs->Selector;
212
213     qs.flags = ((uint32_t)hs->Attributes) << DESC_TYPE_SHIFT;
214
215     return qs;
216 }
217
218 static void whpx_set_registers(CPUState *cpu)
219 {
220     struct whpx_state *whpx = &whpx_global;
221     struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
222     struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
223     X86CPU *x86_cpu = X86_CPU(cpu);
224     struct whpx_register_set vcxt = {0};
225     HRESULT hr;
226     int idx = 0;
227     int i;
228     int v86, r86;
229
230     assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
231
232     v86 = (env->eflags & VM_MASK);
233     r86 = !(env->cr[0] & CR0_PE_MASK);
234
235     vcpu->tpr = cpu_get_apic_tpr(x86_cpu->apic_state);
236     vcpu->apic_base = cpu_get_apic_base(x86_cpu->apic_state);
237
238     /* Indexes for first 16 registers match between HV and QEMU definitions */
239     for (idx = 0; idx < CPU_NB_REGS64; idx += 1) {
240         vcxt.values[idx].Reg64 = env->regs[idx];
241     }
242
243     /* Same goes for RIP and RFLAGS */
244     assert(whpx_register_names[idx] == WHvX64RegisterRip);
245     vcxt.values[idx++].Reg64 = env->eip;
246
247     assert(whpx_register_names[idx] == WHvX64RegisterRflags);
248     vcxt.values[idx++].Reg64 = env->eflags;
249
250     /* Translate 6+4 segment registers. HV and QEMU order matches  */
251     assert(idx == WHvX64RegisterEs);
252     for (i = 0; i < 6; i += 1, idx += 1) {
253         vcxt.values[idx].Segment = whpx_seg_q2h(&env->segs[i], v86, r86);
254     }
255
256     assert(idx == WHvX64RegisterLdtr);
257     vcxt.values[idx++].Segment = whpx_seg_q2h(&env->ldt, 0, 0);
258
259     assert(idx == WHvX64RegisterTr);
260     vcxt.values[idx++].Segment = whpx_seg_q2h(&env->tr, 0, 0);
261
262     assert(idx == WHvX64RegisterIdtr);
263     vcxt.values[idx].Table.Base = env->idt.base;
264     vcxt.values[idx].Table.Limit = env->idt.limit;
265     idx += 1;
266
267     assert(idx == WHvX64RegisterGdtr);
268     vcxt.values[idx].Table.Base = env->gdt.base;
269     vcxt.values[idx].Table.Limit = env->gdt.limit;
270     idx += 1;
271
272     /* CR0, 2, 3, 4, 8 */
273     assert(whpx_register_names[idx] == WHvX64RegisterCr0);
274     vcxt.values[idx++].Reg64 = env->cr[0];
275     assert(whpx_register_names[idx] == WHvX64RegisterCr2);
276     vcxt.values[idx++].Reg64 = env->cr[2];
277     assert(whpx_register_names[idx] == WHvX64RegisterCr3);
278     vcxt.values[idx++].Reg64 = env->cr[3];
279     assert(whpx_register_names[idx] == WHvX64RegisterCr4);
280     vcxt.values[idx++].Reg64 = env->cr[4];
281     assert(whpx_register_names[idx] == WHvX64RegisterCr8);
282     vcxt.values[idx++].Reg64 = vcpu->tpr;
283
284     /* 8 Debug Registers - Skipped */
285
286     /* 16 XMM registers */
287     assert(whpx_register_names[idx] == WHvX64RegisterXmm0);
288     for (i = 0; i < 16; i += 1, idx += 1) {
289         vcxt.values[idx].Reg128.Low64 = env->xmm_regs[i].ZMM_Q(0);
290         vcxt.values[idx].Reg128.High64 = env->xmm_regs[i].ZMM_Q(1);
291     }
292
293     /* 8 FP registers */
294     assert(whpx_register_names[idx] == WHvX64RegisterFpMmx0);
295     for (i = 0; i < 8; i += 1, idx += 1) {
296         vcxt.values[idx].Fp.AsUINT128.Low64 = env->fpregs[i].mmx.MMX_Q(0);
297         /* vcxt.values[idx].Fp.AsUINT128.High64 =
298                env->fpregs[i].mmx.MMX_Q(1);
299         */
300     }
301
302     /* FP control status register */
303     assert(whpx_register_names[idx] == WHvX64RegisterFpControlStatus);
304     vcxt.values[idx].FpControlStatus.FpControl = env->fpuc;
305     vcxt.values[idx].FpControlStatus.FpStatus =
306         (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
307     vcxt.values[idx].FpControlStatus.FpTag = 0;
308     for (i = 0; i < 8; ++i) {
309         vcxt.values[idx].FpControlStatus.FpTag |= (!env->fptags[i]) << i;
310     }
311     vcxt.values[idx].FpControlStatus.Reserved = 0;
312     vcxt.values[idx].FpControlStatus.LastFpOp = env->fpop;
313     vcxt.values[idx].FpControlStatus.LastFpRip = env->fpip;
314     idx += 1;
315
316     /* XMM control status register */
317     assert(whpx_register_names[idx] == WHvX64RegisterXmmControlStatus);
318     vcxt.values[idx].XmmControlStatus.LastFpRdp = 0;
319     vcxt.values[idx].XmmControlStatus.XmmStatusControl = env->mxcsr;
320     vcxt.values[idx].XmmControlStatus.XmmStatusControlMask = 0x0000ffff;
321     idx += 1;
322
323     /* MSRs */
324     assert(whpx_register_names[idx] == WHvX64RegisterTsc);
325     vcxt.values[idx++].Reg64 = env->tsc;
326     assert(whpx_register_names[idx] == WHvX64RegisterEfer);
327     vcxt.values[idx++].Reg64 = env->efer;
328 #ifdef TARGET_X86_64
329     assert(whpx_register_names[idx] == WHvX64RegisterKernelGsBase);
330     vcxt.values[idx++].Reg64 = env->kernelgsbase;
331 #endif
332
333     assert(whpx_register_names[idx] == WHvX64RegisterApicBase);
334     vcxt.values[idx++].Reg64 = vcpu->apic_base;
335
336     /* WHvX64RegisterPat - Skipped */
337
338     assert(whpx_register_names[idx] == WHvX64RegisterSysenterCs);
339     vcxt.values[idx++].Reg64 = env->sysenter_cs;
340     assert(whpx_register_names[idx] == WHvX64RegisterSysenterEip);
341     vcxt.values[idx++].Reg64 = env->sysenter_eip;
342     assert(whpx_register_names[idx] == WHvX64RegisterSysenterEsp);
343     vcxt.values[idx++].Reg64 = env->sysenter_esp;
344     assert(whpx_register_names[idx] == WHvX64RegisterStar);
345     vcxt.values[idx++].Reg64 = env->star;
346 #ifdef TARGET_X86_64
347     assert(whpx_register_names[idx] == WHvX64RegisterLstar);
348     vcxt.values[idx++].Reg64 = env->lstar;
349     assert(whpx_register_names[idx] == WHvX64RegisterCstar);
350     vcxt.values[idx++].Reg64 = env->cstar;
351     assert(whpx_register_names[idx] == WHvX64RegisterSfmask);
352     vcxt.values[idx++].Reg64 = env->fmask;
353 #endif
354
355     /* Interrupt / Event Registers - Skipped */
356
357     assert(idx == RTL_NUMBER_OF(whpx_register_names));
358
359     hr = WHvSetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
360                                          whpx_register_names,
361                                          RTL_NUMBER_OF(whpx_register_names),
362                                          &vcxt.values[0]);
363
364     if (FAILED(hr)) {
365         error_report("WHPX: Failed to set virtual processor context, hr=%08lx",
366                      hr);
367         __debugbreak();
368     }
369
370     return;
371 }
372
373 static void whpx_get_registers(CPUState *cpu)
374 {
375     struct whpx_state *whpx = &whpx_global;
376     struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
377     struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
378     X86CPU *x86_cpu = X86_CPU(cpu);
379     struct whpx_register_set vcxt;
380     uint64_t tpr, apic_base;
381     HRESULT hr;
382     int idx = 0;
383     int i;
384
385     assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
386
387     hr = WHvGetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
388                                          whpx_register_names,
389                                          RTL_NUMBER_OF(whpx_register_names),
390                                          &vcxt.values[0]);
391     if (FAILED(hr)) {
392         error_report("WHPX: Failed to get virtual processor context, hr=%08lx",
393                      hr);
394         __debugbreak();
395     }
396
397     /* Indexes for first 16 registers match between HV and QEMU definitions */
398     for (idx = 0; idx < CPU_NB_REGS64; idx += 1) {
399         env->regs[idx] = vcxt.values[idx].Reg64;
400     }
401
402     /* Same goes for RIP and RFLAGS */
403     assert(whpx_register_names[idx] == WHvX64RegisterRip);
404     env->eip = vcxt.values[idx++].Reg64;
405     assert(whpx_register_names[idx] == WHvX64RegisterRflags);
406     env->eflags = vcxt.values[idx++].Reg64;
407
408     /* Translate 6+4 segment registers. HV and QEMU order matches  */
409     assert(idx == WHvX64RegisterEs);
410     for (i = 0; i < 6; i += 1, idx += 1) {
411         env->segs[i] = whpx_seg_h2q(&vcxt.values[idx].Segment);
412     }
413
414     assert(idx == WHvX64RegisterLdtr);
415     env->ldt = whpx_seg_h2q(&vcxt.values[idx++].Segment);
416     assert(idx == WHvX64RegisterTr);
417     env->tr = whpx_seg_h2q(&vcxt.values[idx++].Segment);
418     assert(idx == WHvX64RegisterIdtr);
419     env->idt.base = vcxt.values[idx].Table.Base;
420     env->idt.limit = vcxt.values[idx].Table.Limit;
421     idx += 1;
422     assert(idx == WHvX64RegisterGdtr);
423     env->gdt.base = vcxt.values[idx].Table.Base;
424     env->gdt.limit = vcxt.values[idx].Table.Limit;
425     idx += 1;
426
427     /* CR0, 2, 3, 4, 8 */
428     assert(whpx_register_names[idx] == WHvX64RegisterCr0);
429     env->cr[0] = vcxt.values[idx++].Reg64;
430     assert(whpx_register_names[idx] == WHvX64RegisterCr2);
431     env->cr[2] = vcxt.values[idx++].Reg64;
432     assert(whpx_register_names[idx] == WHvX64RegisterCr3);
433     env->cr[3] = vcxt.values[idx++].Reg64;
434     assert(whpx_register_names[idx] == WHvX64RegisterCr4);
435     env->cr[4] = vcxt.values[idx++].Reg64;
436     assert(whpx_register_names[idx] == WHvX64RegisterCr8);
437     tpr = vcxt.values[idx++].Reg64;
438     if (tpr != vcpu->tpr) {
439         vcpu->tpr = tpr;
440         cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
441     }
442
443     /* 8 Debug Registers - Skipped */
444
445     /* 16 XMM registers */
446     assert(whpx_register_names[idx] == WHvX64RegisterXmm0);
447     for (i = 0; i < 16; i += 1, idx += 1) {
448         env->xmm_regs[i].ZMM_Q(0) = vcxt.values[idx].Reg128.Low64;
449         env->xmm_regs[i].ZMM_Q(1) = vcxt.values[idx].Reg128.High64;
450     }
451
452     /* 8 FP registers */
453     assert(whpx_register_names[idx] == WHvX64RegisterFpMmx0);
454     for (i = 0; i < 8; i += 1, idx += 1) {
455         env->fpregs[i].mmx.MMX_Q(0) = vcxt.values[idx].Fp.AsUINT128.Low64;
456         /* env->fpregs[i].mmx.MMX_Q(1) =
457                vcxt.values[idx].Fp.AsUINT128.High64;
458         */
459     }
460
461     /* FP control status register */
462     assert(whpx_register_names[idx] == WHvX64RegisterFpControlStatus);
463     env->fpuc = vcxt.values[idx].FpControlStatus.FpControl;
464     env->fpstt = (vcxt.values[idx].FpControlStatus.FpStatus >> 11) & 0x7;
465     env->fpus = vcxt.values[idx].FpControlStatus.FpStatus & ~0x3800;
466     for (i = 0; i < 8; ++i) {
467         env->fptags[i] = !((vcxt.values[idx].FpControlStatus.FpTag >> i) & 1);
468     }
469     env->fpop = vcxt.values[idx].FpControlStatus.LastFpOp;
470     env->fpip = vcxt.values[idx].FpControlStatus.LastFpRip;
471     idx += 1;
472
473     /* XMM control status register */
474     assert(whpx_register_names[idx] == WHvX64RegisterXmmControlStatus);
475     env->mxcsr = vcxt.values[idx].XmmControlStatus.XmmStatusControl;
476     idx += 1;
477
478     /* MSRs */
479     assert(whpx_register_names[idx] == WHvX64RegisterTsc);
480     env->tsc = vcxt.values[idx++].Reg64;
481     assert(whpx_register_names[idx] == WHvX64RegisterEfer);
482     env->efer = vcxt.values[idx++].Reg64;
483 #ifdef TARGET_X86_64
484     assert(whpx_register_names[idx] == WHvX64RegisterKernelGsBase);
485     env->kernelgsbase = vcxt.values[idx++].Reg64;
486 #endif
487
488     assert(whpx_register_names[idx] == WHvX64RegisterApicBase);
489     apic_base = vcxt.values[idx++].Reg64;
490     if (apic_base != vcpu->apic_base) {
491         vcpu->apic_base = apic_base;
492         cpu_set_apic_base(x86_cpu->apic_state, vcpu->apic_base);
493     }
494
495     /* WHvX64RegisterPat - Skipped */
496
497     assert(whpx_register_names[idx] == WHvX64RegisterSysenterCs);
498     env->sysenter_cs = vcxt.values[idx++].Reg64;;
499     assert(whpx_register_names[idx] == WHvX64RegisterSysenterEip);
500     env->sysenter_eip = vcxt.values[idx++].Reg64;
501     assert(whpx_register_names[idx] == WHvX64RegisterSysenterEsp);
502     env->sysenter_esp = vcxt.values[idx++].Reg64;
503     assert(whpx_register_names[idx] == WHvX64RegisterStar);
504     env->star = vcxt.values[idx++].Reg64;
505 #ifdef TARGET_X86_64
506     assert(whpx_register_names[idx] == WHvX64RegisterLstar);
507     env->lstar = vcxt.values[idx++].Reg64;
508     assert(whpx_register_names[idx] == WHvX64RegisterCstar);
509     env->cstar = vcxt.values[idx++].Reg64;
510     assert(whpx_register_names[idx] == WHvX64RegisterSfmask);
511     env->fmask = vcxt.values[idx++].Reg64;
512 #endif
513
514     /* Interrupt / Event Registers - Skipped */
515
516     assert(idx == RTL_NUMBER_OF(whpx_register_names));
517
518     return;
519 }
520
521 static HRESULT CALLBACK whpx_emu_ioport_callback(
522     void *ctx,
523     WHV_EMULATOR_IO_ACCESS_INFO *IoAccess)
524 {
525     MemTxAttrs attrs = { 0 };
526     address_space_rw(&address_space_io, IoAccess->Port, attrs,
527                      (uint8_t *)&IoAccess->Data, IoAccess->AccessSize,
528                      IoAccess->Direction);
529     return S_OK;
530 }
531
532 static HRESULT CALLBACK whpx_emu_memio_callback(
533     void *ctx,
534     WHV_EMULATOR_MEMORY_ACCESS_INFO *ma)
535 {
536     cpu_physical_memory_rw(ma->GpaAddress, ma->Data, ma->AccessSize,
537                            ma->Direction);
538     return S_OK;
539 }
540
541 static HRESULT CALLBACK whpx_emu_getreg_callback(
542     void *ctx,
543     const WHV_REGISTER_NAME *RegisterNames,
544     UINT32 RegisterCount,
545     WHV_REGISTER_VALUE *RegisterValues)
546 {
547     HRESULT hr;
548     struct whpx_state *whpx = &whpx_global;
549     CPUState *cpu = (CPUState *)ctx;
550
551     hr = WHvGetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
552                                          RegisterNames, RegisterCount,
553                                          RegisterValues);
554     if (FAILED(hr)) {
555         error_report("WHPX: Failed to get virtual processor registers,"
556                      " hr=%08lx", hr);
557         __debugbreak();
558     }
559
560     return hr;
561 }
562
563 static HRESULT CALLBACK whpx_emu_setreg_callback(
564     void *ctx,
565     const WHV_REGISTER_NAME *RegisterNames,
566     UINT32 RegisterCount,
567     const WHV_REGISTER_VALUE *RegisterValues)
568 {
569     HRESULT hr;
570     struct whpx_state *whpx = &whpx_global;
571     CPUState *cpu = (CPUState *)ctx;
572
573     hr = WHvSetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
574                                          RegisterNames, RegisterCount,
575                                          RegisterValues);
576     if (FAILED(hr)) {
577         error_report("WHPX: Failed to set virtual processor registers,"
578                      " hr=%08lx", hr);
579         __debugbreak();
580     }
581
582     /*
583      * The emulator just successfully wrote the register state. We clear the
584      * dirty state so we avoid the double write on resume of the VP.
585      */
586     cpu->vcpu_dirty = false;
587
588     return hr;
589 }
590
591 static HRESULT CALLBACK whpx_emu_translate_callback(
592     void *ctx,
593     WHV_GUEST_VIRTUAL_ADDRESS Gva,
594     WHV_TRANSLATE_GVA_FLAGS TranslateFlags,
595     WHV_TRANSLATE_GVA_RESULT_CODE *TranslationResult,
596     WHV_GUEST_PHYSICAL_ADDRESS *Gpa)
597 {
598     HRESULT hr;
599     struct whpx_state *whpx = &whpx_global;
600     CPUState *cpu = (CPUState *)ctx;
601     WHV_TRANSLATE_GVA_RESULT res;
602
603     hr = WHvTranslateGva(whpx->partition, cpu->cpu_index,
604                          Gva, TranslateFlags, &res, Gpa);
605     if (FAILED(hr)) {
606         error_report("WHPX: Failed to translate GVA, hr=%08lx", hr);
607         __debugbreak();
608     } else {
609         *TranslationResult = res.ResultCode;
610     }
611
612     return hr;
613 }
614
615 static const WHV_EMULATOR_CALLBACKS whpx_emu_callbacks = {
616     .WHvEmulatorIoPortCallback = whpx_emu_ioport_callback,
617     .WHvEmulatorMemoryCallback = whpx_emu_memio_callback,
618     .WHvEmulatorGetVirtualProcessorRegisters = whpx_emu_getreg_callback,
619     .WHvEmulatorSetVirtualProcessorRegisters = whpx_emu_setreg_callback,
620     .WHvEmulatorTranslateGvaPage = whpx_emu_translate_callback,
621 };
622
623 static int whpx_handle_mmio(CPUState *cpu, WHV_MEMORY_ACCESS_CONTEXT *ctx)
624 {
625     HRESULT hr;
626     struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
627     WHV_EMULATOR_STATUS emu_status;
628
629     hr = WHvEmulatorTryMmioEmulation(vcpu->emulator, cpu, ctx, &emu_status);
630     if (FAILED(hr)) {
631         __debugbreak();
632         error_report("WHPX: Failed to parse MMIO access, hr=%08lx", hr);
633         return -1;
634     }
635
636     if (!emu_status.EmulationSuccessful) {
637         __debugbreak();
638         error_report("WHPX: Failed to emulate MMIO access");
639         return -1;
640     }
641
642     return 0;
643 }
644
645 static int whpx_handle_portio(CPUState *cpu,
646                               WHV_X64_IO_PORT_ACCESS_CONTEXT *ctx)
647 {
648     HRESULT hr;
649     struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
650     WHV_EMULATOR_STATUS emu_status;
651
652     hr = WHvEmulatorTryIoEmulation(vcpu->emulator, cpu, ctx, &emu_status);
653     if (FAILED(hr)) {
654         __debugbreak();
655         error_report("WHPX: Failed to parse PortIO access, hr=%08lx", hr);
656         return -1;
657     }
658
659     if (!emu_status.EmulationSuccessful) {
660         __debugbreak();
661         error_report("WHPX: Failed to emulate PortMMIO access");
662         return -1;
663     }
664
665     return 0;
666 }
667
668 static int whpx_handle_halt(CPUState *cpu)
669 {
670     struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
671     int ret = 0;
672
673     qemu_mutex_lock_iothread();
674     if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
675           (env->eflags & IF_MASK)) &&
676         !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
677         cpu->exception_index = EXCP_HLT;
678         cpu->halted = true;
679         ret = 1;
680     }
681     qemu_mutex_unlock_iothread();
682
683     return ret;
684 }
685
686 static void whpx_vcpu_pre_run(CPUState *cpu)
687 {
688     HRESULT hr;
689     struct whpx_state *whpx = &whpx_global;
690     struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
691     struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
692     X86CPU *x86_cpu = X86_CPU(cpu);
693     int irq;
694     WHV_X64_PENDING_INTERRUPTION_REGISTER new_int = {0};
695     UINT32 reg_count = 0;
696     WHV_REGISTER_VALUE reg_values[3] = {0};
697     WHV_REGISTER_NAME reg_names[3];
698
699     qemu_mutex_lock_iothread();
700
701     /* Inject NMI */
702     if (!vcpu->interrupt_in_flight.InterruptionPending &&
703         cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
704         if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
705             cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
706             vcpu->interruptable = false;
707             new_int.InterruptionType = WHvX64PendingNmi;
708             new_int.InterruptionPending = 1;
709             new_int.InterruptionVector = 2;
710         }
711         if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
712             qemu_mutex_lock_iothread();
713             cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
714             __debugbreak();
715             qemu_mutex_unlock_iothread();
716         }
717     }
718
719     /*
720      * Force the VCPU out of its inner loop to process any INIT requests or
721      * commit pending TPR access.
722      */
723     if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
724         if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
725             !(env->hflags & HF_SMM_MASK)) {
726             cpu->exit_request = 1;
727         }
728         if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
729             cpu->exit_request = 1;
730         }
731     }
732
733     /* Get pending hard interruption or replay one that was overwritten */
734     if (!vcpu->interrupt_in_flight.InterruptionPending &&
735         vcpu->interruptable && (env->eflags & IF_MASK)) {
736         assert(!new_int.InterruptionPending);
737         if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
738             cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
739             irq = cpu_get_pic_interrupt(env);
740             if (irq >= 0) {
741                 new_int.InterruptionType = WHvX64PendingInterrupt;
742                 new_int.InterruptionPending = 1;
743                 new_int.InterruptionVector = irq;
744             }
745         }
746     }
747
748     /* Setup interrupt state if new one was prepared */
749     if (new_int.InterruptionPending) {
750         reg_values[reg_count].PendingInterruption = new_int;
751         reg_names[reg_count] = WHvRegisterPendingInterruption;
752         reg_count += 1;
753     }
754
755     /* Sync the TPR to the CR8 if was modified during the intercept */
756     reg_values[reg_count].Reg64 = cpu_get_apic_tpr(x86_cpu->apic_state);
757     if (reg_values[reg_count].Reg64 != vcpu->tpr) {
758         vcpu->tpr = reg_values[reg_count].Reg64;
759         cpu->exit_request = 1;
760         reg_names[reg_count] = WHvX64RegisterCr8;
761         reg_count += 1;
762     }
763
764     /* Update the state of the interrupt delivery notification */
765     if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
766         reg_values[reg_count].DeliverabilityNotifications.InterruptNotification
767             = 1;
768         if (vcpu->window_registered != 1) {
769             vcpu->window_registered = 1;
770         }
771         reg_names[reg_count] = WHvX64RegisterDeliverabilityNotifications;
772         reg_count += 1;
773     }
774
775     qemu_mutex_unlock_iothread();
776
777     if (reg_count) {
778         hr = WHvSetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
779                                              reg_names, reg_count, reg_values);
780         if (FAILED(hr)) {
781             error_report("WHPX: Failed to set interrupt state registers,"
782                          " hr=%08lx", hr);
783             __debugbreak();
784         }
785     }
786
787     return;
788 }
789
790 static void whpx_vcpu_post_run(CPUState *cpu)
791 {
792     HRESULT hr;
793     struct whpx_state *whpx = &whpx_global;
794     struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
795     struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
796     X86CPU *x86_cpu = X86_CPU(cpu);
797     WHV_REGISTER_VALUE reg_values[4];
798     const WHV_REGISTER_NAME reg_names[4] = {
799         WHvX64RegisterRflags,
800         WHvX64RegisterCr8,
801         WHvRegisterPendingInterruption,
802         WHvRegisterInterruptState,
803     };
804
805     hr = WHvGetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
806                                          reg_names, 4, reg_values);
807     if (FAILED(hr)) {
808         error_report("WHPX: Failed to get interrupt state regusters,"
809                      " hr=%08lx", hr);
810         __debugbreak();
811         vcpu->interruptable = false;
812         return;
813     }
814
815     assert(reg_names[0] == WHvX64RegisterRflags);
816     env->eflags = reg_values[0].Reg64;
817
818     assert(reg_names[1] == WHvX64RegisterCr8);
819     if (vcpu->tpr != reg_values[1].Reg64) {
820         vcpu->tpr = reg_values[1].Reg64;
821         qemu_mutex_lock_iothread();
822         cpu_set_apic_tpr(x86_cpu->apic_state, vcpu->tpr);
823         qemu_mutex_unlock_iothread();
824     }
825
826     assert(reg_names[2] == WHvRegisterPendingInterruption);
827     vcpu->interrupt_in_flight = reg_values[2].PendingInterruption;
828
829     assert(reg_names[3] == WHvRegisterInterruptState);
830     vcpu->interruptable = !reg_values[3].InterruptState.InterruptShadow;
831
832     return;
833 }
834
835 static void whpx_vcpu_process_async_events(CPUState *cpu)
836 {
837     struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
838     X86CPU *x86_cpu = X86_CPU(cpu);
839     struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
840
841     if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
842         !(env->hflags & HF_SMM_MASK)) {
843
844         do_cpu_init(x86_cpu);
845         cpu->vcpu_dirty = true;
846         vcpu->interruptable = true;
847     }
848
849     if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
850         cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
851         apic_poll_irq(x86_cpu->apic_state);
852     }
853
854     if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
855          (env->eflags & IF_MASK)) ||
856         (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
857         cpu->halted = false;
858     }
859
860     if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
861         if (!cpu->vcpu_dirty) {
862             whpx_get_registers(cpu);
863         }
864         do_cpu_sipi(x86_cpu);
865     }
866
867     if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
868         cpu->interrupt_request &= ~CPU_INTERRUPT_TPR;
869         if (!cpu->vcpu_dirty) {
870             whpx_get_registers(cpu);
871         }
872         apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip,
873                                       env->tpr_access_type);
874     }
875
876     return;
877 }
878
879 static int whpx_vcpu_run(CPUState *cpu)
880 {
881     HRESULT hr;
882     struct whpx_state *whpx = &whpx_global;
883     struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
884     int ret;
885
886     whpx_vcpu_process_async_events(cpu);
887     if (cpu->halted) {
888         cpu->exception_index = EXCP_HLT;
889         atomic_set(&cpu->exit_request, false);
890         return 0;
891     }
892
893     qemu_mutex_unlock_iothread();
894     cpu_exec_start(cpu);
895
896     do {
897         if (cpu->vcpu_dirty) {
898             whpx_set_registers(cpu);
899             cpu->vcpu_dirty = false;
900         }
901
902         whpx_vcpu_pre_run(cpu);
903
904         if (atomic_read(&cpu->exit_request)) {
905             whpx_vcpu_kick(cpu);
906         }
907
908         for (;;) {
909             hr = WHvRunVirtualProcessor(whpx->partition, cpu->cpu_index,
910                                         &vcpu->exit_ctx, whpx->exit_ctx_size);
911
912             if (SUCCEEDED(hr) && (vcpu->exit_ctx.ExitReason ==
913                                   WHvRunVpExitReasonAlerted)) {
914                 WHvCancelRunVirtualProcessor(whpx->partition, cpu->cpu_index,
915                                              0);
916             } else {
917                 break;
918             }
919         }
920
921         if (FAILED(hr)) {
922             error_report("WHPX: Failed to exec a virtual processor,"
923                          " hr=%08lx", hr);
924             ret = -1;
925             break;
926         }
927
928         whpx_vcpu_post_run(cpu);
929
930         switch (vcpu->exit_ctx.ExitReason) {
931         case WHvRunVpExitReasonMemoryAccess:
932             ret = whpx_handle_mmio(cpu, &vcpu->exit_ctx.MemoryAccess);
933             break;
934
935         case WHvRunVpExitReasonX64IoPortAccess:
936             ret = whpx_handle_portio(cpu, &vcpu->exit_ctx.IoPortAccess);
937             break;
938
939         case WHvRunVpExitReasonX64InterruptWindow:
940             vcpu->window_registered = 0;
941             break;
942
943         case WHvRunVpExitReasonX64Halt:
944             ret = whpx_handle_halt(cpu);
945             break;
946
947         case WHvRunVpExitReasonCanceled:
948             cpu->exception_index = EXCP_INTERRUPT;
949             ret = 1;
950             break;
951
952         case WHvRunVpExitReasonNone:
953         case WHvRunVpExitReasonUnrecoverableException:
954         case WHvRunVpExitReasonInvalidVpRegisterValue:
955         case WHvRunVpExitReasonUnsupportedFeature:
956         case WHvRunVpExitReasonX64MsrAccess:
957         case WHvRunVpExitReasonX64Cpuid:
958         case WHvRunVpExitReasonException:
959         case WHvRunVpExitReasonAlerted:
960         default:
961             error_report("WHPX: Unexpected VP exit code %d",
962                          vcpu->exit_ctx.ExitReason);
963             whpx_get_registers(cpu);
964             qemu_mutex_lock_iothread();
965             qemu_system_guest_panicked(cpu_get_crash_info(cpu));
966             qemu_mutex_unlock_iothread();
967             break;
968         }
969
970     } while (!ret);
971
972     cpu_exec_end(cpu);
973     qemu_mutex_lock_iothread();
974     current_cpu = cpu;
975
976     atomic_set(&cpu->exit_request, false);
977
978     return ret < 0;
979 }
980
981 static void do_whpx_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
982 {
983     whpx_get_registers(cpu);
984     cpu->vcpu_dirty = true;
985 }
986
987 static void do_whpx_cpu_synchronize_post_reset(CPUState *cpu,
988                                                run_on_cpu_data arg)
989 {
990     whpx_set_registers(cpu);
991     cpu->vcpu_dirty = false;
992 }
993
994 static void do_whpx_cpu_synchronize_post_init(CPUState *cpu,
995                                               run_on_cpu_data arg)
996 {
997     whpx_set_registers(cpu);
998     cpu->vcpu_dirty = false;
999 }
1000
1001 static void do_whpx_cpu_synchronize_pre_loadvm(CPUState *cpu,
1002                                                run_on_cpu_data arg)
1003 {
1004     cpu->vcpu_dirty = true;
1005 }
1006
1007 /*
1008  * CPU support.
1009  */
1010
1011 void whpx_cpu_synchronize_state(CPUState *cpu)
1012 {
1013     if (!cpu->vcpu_dirty) {
1014         run_on_cpu(cpu, do_whpx_cpu_synchronize_state, RUN_ON_CPU_NULL);
1015     }
1016 }
1017
1018 void whpx_cpu_synchronize_post_reset(CPUState *cpu)
1019 {
1020     run_on_cpu(cpu, do_whpx_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
1021 }
1022
1023 void whpx_cpu_synchronize_post_init(CPUState *cpu)
1024 {
1025     run_on_cpu(cpu, do_whpx_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
1026 }
1027
1028 void whpx_cpu_synchronize_pre_loadvm(CPUState *cpu)
1029 {
1030     run_on_cpu(cpu, do_whpx_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
1031 }
1032
1033 /*
1034  * Vcpu support.
1035  */
1036
1037 static Error *whpx_migration_blocker;
1038
1039 int whpx_init_vcpu(CPUState *cpu)
1040 {
1041     HRESULT hr;
1042     struct whpx_state *whpx = &whpx_global;
1043     struct whpx_vcpu *vcpu;
1044     Error *local_error = NULL;
1045
1046     /* Add migration blockers for all unsupported features of the
1047      * Windows Hypervisor Platform
1048      */
1049     if (whpx_migration_blocker == NULL) {
1050         error_setg(&whpx_migration_blocker,
1051                "State blocked due to non-migratable CPUID feature support,"
1052                "dirty memory tracking support, and XSAVE/XRSTOR support");
1053
1054         (void)migrate_add_blocker(whpx_migration_blocker, &local_error);
1055         if (local_error) {
1056             error_report_err(local_error);
1057             error_free(whpx_migration_blocker);
1058             migrate_del_blocker(whpx_migration_blocker);
1059             return -EINVAL;
1060         }
1061     }
1062
1063     vcpu = g_malloc0(FIELD_OFFSET(struct whpx_vcpu, exit_ctx) +
1064                      whpx->exit_ctx_size);
1065
1066     if (!vcpu) {
1067         error_report("WHPX: Failed to allocte VCPU context.");
1068         return -ENOMEM;
1069     }
1070
1071     hr = WHvEmulatorCreateEmulator(whpx_emu_callbacks, &vcpu->emulator);
1072     if (FAILED(hr)) {
1073         error_report("WHPX: Failed to setup instruction completion support,"
1074                      " hr=%08lx", hr);
1075         g_free(vcpu);
1076         return -EINVAL;
1077     }
1078
1079     hr = WHvCreateVirtualProcessor(whpx->partition, cpu->cpu_index, 0);
1080     if (FAILED(hr)) {
1081         error_report("WHPX: Failed to create a virtual processor,"
1082                      " hr=%08lx", hr);
1083         WHvEmulatorDestroyEmulator(vcpu->emulator);
1084         g_free(vcpu);
1085         return -EINVAL;
1086     }
1087
1088     vcpu->interruptable = true;
1089
1090     cpu->vcpu_dirty = true;
1091     cpu->hax_vcpu = (struct hax_vcpu_state *)vcpu;
1092
1093     return 0;
1094 }
1095
1096 int whpx_vcpu_exec(CPUState *cpu)
1097 {
1098     int ret;
1099     int fatal;
1100
1101     for (;;) {
1102         if (cpu->exception_index >= EXCP_INTERRUPT) {
1103             ret = cpu->exception_index;
1104             cpu->exception_index = -1;
1105             break;
1106         }
1107
1108         fatal = whpx_vcpu_run(cpu);
1109
1110         if (fatal) {
1111             error_report("WHPX: Failed to exec a virtual processor");
1112             abort();
1113         }
1114     }
1115
1116     return ret;
1117 }
1118
1119 void whpx_destroy_vcpu(CPUState *cpu)
1120 {
1121     struct whpx_state *whpx = &whpx_global;
1122     struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
1123
1124     WHvDeleteVirtualProcessor(whpx->partition, cpu->cpu_index);
1125     WHvEmulatorDestroyEmulator(vcpu->emulator);
1126     g_free(cpu->hax_vcpu);
1127     return;
1128 }
1129
1130 void whpx_vcpu_kick(CPUState *cpu)
1131 {
1132     struct whpx_state *whpx = &whpx_global;
1133     WHvCancelRunVirtualProcessor(whpx->partition, cpu->cpu_index, 0);
1134 }
1135
1136 /*
1137  * Memory support.
1138  */
1139
1140 static void whpx_update_mapping(hwaddr start_pa, ram_addr_t size,
1141                                 void *host_va, int add, int rom,
1142                                 const char *name)
1143 {
1144     struct whpx_state *whpx = &whpx_global;
1145     HRESULT hr;
1146
1147     /*
1148     if (add) {
1149         printf("WHPX: ADD PA:%p Size:%p, Host:%p, %s, '%s'\n",
1150                (void*)start_pa, (void*)size, host_va,
1151                (rom ? "ROM" : "RAM"), name);
1152     } else {
1153         printf("WHPX: DEL PA:%p Size:%p, Host:%p,      '%s'\n",
1154                (void*)start_pa, (void*)size, host_va, name);
1155     }
1156     */
1157
1158     if (add) {
1159         hr = WHvMapGpaRange(whpx->partition,
1160                             host_va,
1161                             start_pa,
1162                             size,
1163                             (WHvMapGpaRangeFlagRead |
1164                              WHvMapGpaRangeFlagExecute |
1165                              (rom ? 0 : WHvMapGpaRangeFlagWrite)));
1166     } else {
1167         hr = WHvUnmapGpaRange(whpx->partition,
1168                               start_pa,
1169                               size);
1170     }
1171
1172     if (FAILED(hr)) {
1173         error_report("WHPX: Failed to %s GPA range '%s' PA:%p, Size:%p bytes,"
1174                      " Host:%p, hr=%08lx",
1175                      (add ? "MAP" : "UNMAP"), name,
1176                      (void *)start_pa, (void *)size, host_va, hr);
1177     }
1178 }
1179
1180 static void whpx_process_section(MemoryRegionSection *section, int add)
1181 {
1182     MemoryRegion *mr = section->mr;
1183     hwaddr start_pa = section->offset_within_address_space;
1184     ram_addr_t size = int128_get64(section->size);
1185     unsigned int delta;
1186     uint64_t host_va;
1187
1188     if (!memory_region_is_ram(mr)) {
1189         return;
1190     }
1191
1192     delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask);
1193     delta &= ~qemu_real_host_page_mask;
1194     if (delta > size) {
1195         return;
1196     }
1197     start_pa += delta;
1198     size -= delta;
1199     size &= qemu_real_host_page_mask;
1200     if (!size || (start_pa & ~qemu_real_host_page_mask)) {
1201         return;
1202     }
1203
1204     host_va = (uintptr_t)memory_region_get_ram_ptr(mr)
1205             + section->offset_within_region + delta;
1206
1207     whpx_update_mapping(start_pa, size, (void *)host_va, add,
1208                        memory_region_is_rom(mr), mr->name);
1209 }
1210
1211 static void whpx_region_add(MemoryListener *listener,
1212                            MemoryRegionSection *section)
1213 {
1214     memory_region_ref(section->mr);
1215     whpx_process_section(section, 1);
1216 }
1217
1218 static void whpx_region_del(MemoryListener *listener,
1219                            MemoryRegionSection *section)
1220 {
1221     whpx_process_section(section, 0);
1222     memory_region_unref(section->mr);
1223 }
1224
1225 static void whpx_transaction_begin(MemoryListener *listener)
1226 {
1227 }
1228
1229 static void whpx_transaction_commit(MemoryListener *listener)
1230 {
1231 }
1232
1233 static void whpx_log_sync(MemoryListener *listener,
1234                          MemoryRegionSection *section)
1235 {
1236     MemoryRegion *mr = section->mr;
1237
1238     if (!memory_region_is_ram(mr)) {
1239         return;
1240     }
1241
1242     memory_region_set_dirty(mr, 0, int128_get64(section->size));
1243 }
1244
1245 static MemoryListener whpx_memory_listener = {
1246     .begin = whpx_transaction_begin,
1247     .commit = whpx_transaction_commit,
1248     .region_add = whpx_region_add,
1249     .region_del = whpx_region_del,
1250     .log_sync = whpx_log_sync,
1251     .priority = 10,
1252 };
1253
1254 static void whpx_memory_init(void)
1255 {
1256     memory_listener_register(&whpx_memory_listener, &address_space_memory);
1257 }
1258
1259 static void whpx_handle_interrupt(CPUState *cpu, int mask)
1260 {
1261     cpu->interrupt_request |= mask;
1262
1263     if (!qemu_cpu_is_self(cpu)) {
1264         qemu_cpu_kick(cpu);
1265     }
1266 }
1267
1268 /*
1269  * Partition support
1270  */
1271
1272 static int whpx_accel_init(MachineState *ms)
1273 {
1274     struct whpx_state *whpx;
1275     int ret;
1276     HRESULT hr;
1277     WHV_CAPABILITY whpx_cap;
1278     WHV_PARTITION_PROPERTY prop;
1279
1280     whpx = &whpx_global;
1281
1282     memset(whpx, 0, sizeof(struct whpx_state));
1283     whpx->mem_quota = ms->ram_size;
1284
1285     hr = WHvGetCapability(WHvCapabilityCodeHypervisorPresent, &whpx_cap,
1286                           sizeof(whpx_cap));
1287     if (FAILED(hr) || !whpx_cap.HypervisorPresent) {
1288         error_report("WHPX: No accelerator found, hr=%08lx", hr);
1289         ret = -ENOSPC;
1290         goto error;
1291     }
1292
1293     hr = WHvCreatePartition(&whpx->partition);
1294     if (FAILED(hr)) {
1295         error_report("WHPX: Failed to create partition, hr=%08lx", hr);
1296         ret = -EINVAL;
1297         goto error;
1298     }
1299
1300     memset(&prop, 0, sizeof(WHV_PARTITION_PROPERTY));
1301     prop.PropertyCode = WHvPartitionPropertyCodeProcessorCount;
1302     prop.ProcessorCount = smp_cpus;
1303     hr = WHvSetPartitionProperty(whpx->partition,
1304                                  &prop,
1305                                  sizeof(WHV_PARTITION_PROPERTY));
1306
1307     if (FAILED(hr)) {
1308         error_report("WHPX: Failed to set partition core count to %d,"
1309                      " hr=%08lx", smp_cores, hr);
1310         ret = -EINVAL;
1311         goto error;
1312     }
1313
1314     hr = WHvSetupPartition(whpx->partition);
1315     if (FAILED(hr)) {
1316         error_report("WHPX: Failed to setup partition, hr=%08lx", hr);
1317         ret = -EINVAL;
1318         goto error;
1319     }
1320
1321     whpx->exit_ctx_size = WHvGetRunExitContextSize();
1322     assert(whpx->exit_ctx_size);
1323
1324     whpx_memory_init();
1325
1326     cpu_interrupt_handler = whpx_handle_interrupt;
1327
1328     printf("Windows Hypervisor Platform accelerator is operational\n");
1329     return 0;
1330
1331   error:
1332
1333     if (NULL != whpx->partition) {
1334         WHvDeletePartition(whpx->partition);
1335         whpx->partition = NULL;
1336     }
1337
1338
1339     return ret;
1340 }
1341
1342 int whpx_enabled(void)
1343 {
1344     return whpx_allowed;
1345 }
1346
1347 static void whpx_accel_class_init(ObjectClass *oc, void *data)
1348 {
1349     AccelClass *ac = ACCEL_CLASS(oc);
1350     ac->name = "WHPX";
1351     ac->init_machine = whpx_accel_init;
1352     ac->allowed = &whpx_allowed;
1353 }
1354
1355 static const TypeInfo whpx_accel_type = {
1356     .name = ACCEL_CLASS_NAME("whpx"),
1357     .parent = TYPE_ACCEL,
1358     .class_init = whpx_accel_class_init,
1359 };
1360
1361 static void whpx_type_init(void)
1362 {
1363     type_register_static(&whpx_accel_type);
1364 }
1365
1366 type_init(whpx_type_init);
This page took 0.099333 seconds and 4 git commands to generate.