4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
15 #include <sys/types.h>
16 #include <sys/ioctl.h>
19 #include <linux/kvm.h>
21 #include "qemu-common.h"
26 #include "host-utils.h"
29 #ifdef CONFIG_KVM_PARA
30 #include <linux/kvm_para.h>
36 #define dprintf(fmt, ...) \
37 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
39 #define dprintf(fmt, ...) \
43 #define MSR_KVM_WALL_CLOCK 0x11
44 #define MSR_KVM_SYSTEM_TIME 0x12
46 #ifdef KVM_CAP_EXT_CPUID
48 static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
50 struct kvm_cpuid2 *cpuid;
53 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
54 cpuid = (struct kvm_cpuid2 *)qemu_mallocz(size);
56 r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
57 if (r == 0 && cpuid->nent >= max) {
65 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
73 uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function, int reg)
75 struct kvm_cpuid2 *cpuid;
80 if (!kvm_check_extension(env->kvm_state, KVM_CAP_EXT_CPUID)) {
85 while ((cpuid = try_get_cpuid(env->kvm_state, max)) == NULL) {
89 for (i = 0; i < cpuid->nent; ++i) {
90 if (cpuid->entries[i].function == function) {
93 ret = cpuid->entries[i].eax;
96 ret = cpuid->entries[i].ebx;
99 ret = cpuid->entries[i].ecx;
102 ret = cpuid->entries[i].edx;
105 /* KVM before 2.6.30 misreports the following features */
106 ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
109 /* On Intel, kvm returns cpuid according to the Intel spec,
110 * so add missing bits according to the AMD spec:
112 cpuid_1_edx = kvm_arch_get_supported_cpuid(env, 1, R_EDX);
113 ret |= cpuid_1_edx & 0xdfeff7ff;
128 uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function, int reg)
135 static void kvm_trim_features(uint32_t *features, uint32_t supported)
140 for (i = 0; i < 32; ++i) {
142 if ((*features & mask) && !(supported & mask)) {
148 #ifdef CONFIG_KVM_PARA
149 struct kvm_para_features {
152 } para_features[] = {
153 #ifdef KVM_CAP_CLOCKSOURCE
154 { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
156 #ifdef KVM_CAP_NOP_IO_DELAY
157 { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
159 #ifdef KVM_CAP_PV_MMU
160 { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
162 #ifdef KVM_CAP_CR3_CACHE
163 { KVM_CAP_CR3_CACHE, KVM_FEATURE_CR3_CACHE },
168 static int get_para_features(CPUState *env)
172 for (i = 0; i < ARRAY_SIZE(para_features) - 1; i++) {
173 if (kvm_check_extension(env->kvm_state, para_features[i].cap))
174 features |= (1 << para_features[i].feature);
181 int kvm_arch_init_vcpu(CPUState *env)
184 struct kvm_cpuid2 cpuid;
185 struct kvm_cpuid_entry2 entries[100];
186 } __attribute__((packed)) cpuid_data;
187 uint32_t limit, i, j, cpuid_i;
189 struct kvm_cpuid_entry2 *c;
190 #ifdef KVM_CPUID_SIGNATURE
191 uint32_t signature[3];
194 env->mp_state = KVM_MP_STATE_RUNNABLE;
196 kvm_trim_features(&env->cpuid_features,
197 kvm_arch_get_supported_cpuid(env, 1, R_EDX));
199 i = env->cpuid_ext_features & CPUID_EXT_HYPERVISOR;
200 kvm_trim_features(&env->cpuid_ext_features,
201 kvm_arch_get_supported_cpuid(env, 1, R_ECX));
202 env->cpuid_ext_features |= i;
204 kvm_trim_features(&env->cpuid_ext2_features,
205 kvm_arch_get_supported_cpuid(env, 0x80000001, R_EDX));
206 kvm_trim_features(&env->cpuid_ext3_features,
207 kvm_arch_get_supported_cpuid(env, 0x80000001, R_ECX));
211 #ifdef CONFIG_KVM_PARA
212 /* Paravirtualization CPUIDs */
213 memcpy(signature, "KVMKVMKVM\0\0\0", 12);
214 c = &cpuid_data.entries[cpuid_i++];
215 memset(c, 0, sizeof(*c));
216 c->function = KVM_CPUID_SIGNATURE;
218 c->ebx = signature[0];
219 c->ecx = signature[1];
220 c->edx = signature[2];
222 c = &cpuid_data.entries[cpuid_i++];
223 memset(c, 0, sizeof(*c));
224 c->function = KVM_CPUID_FEATURES;
225 c->eax = env->cpuid_kvm_features & get_para_features(env);
228 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
230 for (i = 0; i <= limit; i++) {
231 c = &cpuid_data.entries[cpuid_i++];
235 /* Keep reading function 2 till all the input is received */
239 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
240 KVM_CPUID_FLAG_STATE_READ_NEXT;
241 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
242 times = c->eax & 0xff;
244 for (j = 1; j < times; ++j) {
245 c = &cpuid_data.entries[cpuid_i++];
247 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
248 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
257 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
259 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
261 if (i == 4 && c->eax == 0)
263 if (i == 0xb && !(c->ecx & 0xff00))
265 if (i == 0xd && c->eax == 0)
268 c = &cpuid_data.entries[cpuid_i++];
274 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
278 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
280 for (i = 0x80000000; i <= limit; i++) {
281 c = &cpuid_data.entries[cpuid_i++];
285 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
288 cpuid_data.cpuid.nent = cpuid_i;
290 return kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data);
293 void kvm_arch_reset_vcpu(CPUState *env)
295 env->exception_injected = -1;
296 env->interrupt_injected = -1;
297 env->nmi_injected = 0;
298 env->nmi_pending = 0;
301 static int kvm_has_msr_star(CPUState *env)
303 static int has_msr_star;
307 if (has_msr_star == 0) {
308 struct kvm_msr_list msr_list, *kvm_msr_list;
312 /* Obtain MSR list from KVM. These are the MSRs that we must
315 ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, &msr_list);
316 if (ret < 0 && ret != -E2BIG) {
319 /* Old kernel modules had a bug and could write beyond the provided
320 memory. Allocate at least a safe amount of 1K. */
321 kvm_msr_list = qemu_mallocz(MAX(1024, sizeof(msr_list) +
323 sizeof(msr_list.indices[0])));
325 kvm_msr_list->nmsrs = msr_list.nmsrs;
326 ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
330 for (i = 0; i < kvm_msr_list->nmsrs; i++) {
331 if (kvm_msr_list->indices[i] == MSR_STAR) {
341 if (has_msr_star == 1)
346 int kvm_arch_init(KVMState *s, int smp_cpus)
350 /* create vm86 tss. KVM uses vm86 mode to emulate 16-bit code
351 * directly. In order to use vm86 mode, a TSS is needed. Since this
352 * must be part of guest physical memory, we need to allocate it. Older
353 * versions of KVM just assumed that it would be at the end of physical
354 * memory but that doesn't work with more than 4GB of memory. We simply
355 * refuse to work with those older versions of KVM. */
356 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
358 fprintf(stderr, "kvm does not support KVM_CAP_SET_TSS_ADDR\n");
362 /* this address is 3 pages before the bios, and the bios should present
363 * as unavaible memory. FIXME, need to ensure the e820 map deals with
367 * Tell fw_cfg to notify the BIOS to reserve the range.
369 if (e820_add_entry(0xfffbc000, 0x4000, E820_RESERVED) < 0) {
370 perror("e820_add_entry() table is full");
373 return kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, 0xfffbd000);
376 static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
378 lhs->selector = rhs->selector;
379 lhs->base = rhs->base;
380 lhs->limit = rhs->limit;
392 static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
394 unsigned flags = rhs->flags;
395 lhs->selector = rhs->selector;
396 lhs->base = rhs->base;
397 lhs->limit = rhs->limit;
398 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
399 lhs->present = (flags & DESC_P_MASK) != 0;
400 lhs->dpl = rhs->selector & 3;
401 lhs->db = (flags >> DESC_B_SHIFT) & 1;
402 lhs->s = (flags & DESC_S_MASK) != 0;
403 lhs->l = (flags >> DESC_L_SHIFT) & 1;
404 lhs->g = (flags & DESC_G_MASK) != 0;
405 lhs->avl = (flags & DESC_AVL_MASK) != 0;
409 static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
411 lhs->selector = rhs->selector;
412 lhs->base = rhs->base;
413 lhs->limit = rhs->limit;
415 (rhs->type << DESC_TYPE_SHIFT)
416 | (rhs->present * DESC_P_MASK)
417 | (rhs->dpl << DESC_DPL_SHIFT)
418 | (rhs->db << DESC_B_SHIFT)
419 | (rhs->s * DESC_S_MASK)
420 | (rhs->l << DESC_L_SHIFT)
421 | (rhs->g * DESC_G_MASK)
422 | (rhs->avl * DESC_AVL_MASK);
425 static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
428 *kvm_reg = *qemu_reg;
430 *qemu_reg = *kvm_reg;
433 static int kvm_getput_regs(CPUState *env, int set)
435 struct kvm_regs regs;
439 ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s);
444 kvm_getput_reg(®s.rax, &env->regs[R_EAX], set);
445 kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set);
446 kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set);
447 kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set);
448 kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set);
449 kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set);
450 kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set);
451 kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set);
453 kvm_getput_reg(®s.r8, &env->regs[8], set);
454 kvm_getput_reg(®s.r9, &env->regs[9], set);
455 kvm_getput_reg(®s.r10, &env->regs[10], set);
456 kvm_getput_reg(®s.r11, &env->regs[11], set);
457 kvm_getput_reg(®s.r12, &env->regs[12], set);
458 kvm_getput_reg(®s.r13, &env->regs[13], set);
459 kvm_getput_reg(®s.r14, &env->regs[14], set);
460 kvm_getput_reg(®s.r15, &env->regs[15], set);
463 kvm_getput_reg(®s.rflags, &env->eflags, set);
464 kvm_getput_reg(®s.rip, &env->eip, set);
467 ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, ®s);
472 static int kvm_put_fpu(CPUState *env)
477 memset(&fpu, 0, sizeof fpu);
478 fpu.fsw = env->fpus & ~(7 << 11);
479 fpu.fsw |= (env->fpstt & 7) << 11;
481 for (i = 0; i < 8; ++i)
482 fpu.ftwx |= (!env->fptags[i]) << i;
483 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
484 memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
485 fpu.mxcsr = env->mxcsr;
487 return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
490 static int kvm_put_sregs(CPUState *env)
492 struct kvm_sregs sregs;
494 memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
495 if (env->interrupt_injected >= 0) {
496 sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
497 (uint64_t)1 << (env->interrupt_injected % 64);
500 if ((env->eflags & VM_MASK)) {
501 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
502 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
503 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
504 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
505 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
506 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
508 set_seg(&sregs.cs, &env->segs[R_CS]);
509 set_seg(&sregs.ds, &env->segs[R_DS]);
510 set_seg(&sregs.es, &env->segs[R_ES]);
511 set_seg(&sregs.fs, &env->segs[R_FS]);
512 set_seg(&sregs.gs, &env->segs[R_GS]);
513 set_seg(&sregs.ss, &env->segs[R_SS]);
515 if (env->cr[0] & CR0_PE_MASK) {
516 /* force ss cpl to cs cpl */
517 sregs.ss.selector = (sregs.ss.selector & ~3) |
518 (sregs.cs.selector & 3);
519 sregs.ss.dpl = sregs.ss.selector & 3;
523 set_seg(&sregs.tr, &env->tr);
524 set_seg(&sregs.ldt, &env->ldt);
526 sregs.idt.limit = env->idt.limit;
527 sregs.idt.base = env->idt.base;
528 sregs.gdt.limit = env->gdt.limit;
529 sregs.gdt.base = env->gdt.base;
531 sregs.cr0 = env->cr[0];
532 sregs.cr2 = env->cr[2];
533 sregs.cr3 = env->cr[3];
534 sregs.cr4 = env->cr[4];
536 sregs.cr8 = cpu_get_apic_tpr(env);
537 sregs.apic_base = cpu_get_apic_base(env);
539 sregs.efer = env->efer;
541 return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
544 static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
545 uint32_t index, uint64_t value)
547 entry->index = index;
551 static int kvm_put_msrs(CPUState *env)
554 struct kvm_msrs info;
555 struct kvm_msr_entry entries[100];
557 struct kvm_msr_entry *msrs = msr_data.entries;
560 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
561 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
562 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
563 if (kvm_has_msr_star(env))
564 kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
565 kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
567 /* FIXME if lm capable */
568 kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
569 kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase);
570 kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
571 kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
573 kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME, env->system_time_msr);
574 kvm_msr_entry_set(&msrs[n++], MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
576 msr_data.info.nmsrs = n;
578 return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data);
583 static int kvm_get_fpu(CPUState *env)
588 ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu);
592 env->fpstt = (fpu.fsw >> 11) & 7;
595 for (i = 0; i < 8; ++i)
596 env->fptags[i] = !((fpu.ftwx >> i) & 1);
597 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
598 memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
599 env->mxcsr = fpu.mxcsr;
604 static int kvm_get_sregs(CPUState *env)
606 struct kvm_sregs sregs;
610 ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
614 /* There can only be one pending IRQ set in the bitmap at a time, so try
615 to find it and save its number instead (-1 for none). */
616 env->interrupt_injected = -1;
617 for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
618 if (sregs.interrupt_bitmap[i]) {
619 bit = ctz64(sregs.interrupt_bitmap[i]);
620 env->interrupt_injected = i * 64 + bit;
625 get_seg(&env->segs[R_CS], &sregs.cs);
626 get_seg(&env->segs[R_DS], &sregs.ds);
627 get_seg(&env->segs[R_ES], &sregs.es);
628 get_seg(&env->segs[R_FS], &sregs.fs);
629 get_seg(&env->segs[R_GS], &sregs.gs);
630 get_seg(&env->segs[R_SS], &sregs.ss);
632 get_seg(&env->tr, &sregs.tr);
633 get_seg(&env->ldt, &sregs.ldt);
635 env->idt.limit = sregs.idt.limit;
636 env->idt.base = sregs.idt.base;
637 env->gdt.limit = sregs.gdt.limit;
638 env->gdt.base = sregs.gdt.base;
640 env->cr[0] = sregs.cr0;
641 env->cr[2] = sregs.cr2;
642 env->cr[3] = sregs.cr3;
643 env->cr[4] = sregs.cr4;
645 cpu_set_apic_base(env, sregs.apic_base);
647 env->efer = sregs.efer;
648 //cpu_set_apic_tpr(env, sregs.cr8);
650 #define HFLAG_COPY_MASK ~( \
651 HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
652 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
653 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
654 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
658 hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
659 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
660 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
661 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
662 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
663 hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
664 (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
666 if (env->efer & MSR_EFER_LMA) {
667 hflags |= HF_LMA_MASK;
670 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
671 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
673 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
674 (DESC_B_SHIFT - HF_CS32_SHIFT);
675 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
676 (DESC_B_SHIFT - HF_SS32_SHIFT);
677 if (!(env->cr[0] & CR0_PE_MASK) ||
678 (env->eflags & VM_MASK) ||
679 !(hflags & HF_CS32_MASK)) {
680 hflags |= HF_ADDSEG_MASK;
682 hflags |= ((env->segs[R_DS].base |
683 env->segs[R_ES].base |
684 env->segs[R_SS].base) != 0) <<
688 env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
693 static int kvm_get_msrs(CPUState *env)
696 struct kvm_msrs info;
697 struct kvm_msr_entry entries[100];
699 struct kvm_msr_entry *msrs = msr_data.entries;
703 msrs[n++].index = MSR_IA32_SYSENTER_CS;
704 msrs[n++].index = MSR_IA32_SYSENTER_ESP;
705 msrs[n++].index = MSR_IA32_SYSENTER_EIP;
706 if (kvm_has_msr_star(env))
707 msrs[n++].index = MSR_STAR;
708 msrs[n++].index = MSR_IA32_TSC;
710 /* FIXME lm_capable_kernel */
711 msrs[n++].index = MSR_CSTAR;
712 msrs[n++].index = MSR_KERNELGSBASE;
713 msrs[n++].index = MSR_FMASK;
714 msrs[n++].index = MSR_LSTAR;
716 msrs[n++].index = MSR_KVM_SYSTEM_TIME;
717 msrs[n++].index = MSR_KVM_WALL_CLOCK;
719 msr_data.info.nmsrs = n;
720 ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data);
724 for (i = 0; i < ret; i++) {
725 switch (msrs[i].index) {
726 case MSR_IA32_SYSENTER_CS:
727 env->sysenter_cs = msrs[i].data;
729 case MSR_IA32_SYSENTER_ESP:
730 env->sysenter_esp = msrs[i].data;
732 case MSR_IA32_SYSENTER_EIP:
733 env->sysenter_eip = msrs[i].data;
736 env->star = msrs[i].data;
740 env->cstar = msrs[i].data;
742 case MSR_KERNELGSBASE:
743 env->kernelgsbase = msrs[i].data;
746 env->fmask = msrs[i].data;
749 env->lstar = msrs[i].data;
753 env->tsc = msrs[i].data;
755 case MSR_KVM_SYSTEM_TIME:
756 env->system_time_msr = msrs[i].data;
758 case MSR_KVM_WALL_CLOCK:
759 env->wall_clock_msr = msrs[i].data;
767 static int kvm_put_mp_state(CPUState *env)
769 struct kvm_mp_state mp_state = { .mp_state = env->mp_state };
771 return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state);
774 static int kvm_get_mp_state(CPUState *env)
776 struct kvm_mp_state mp_state;
779 ret = kvm_vcpu_ioctl(env, KVM_GET_MP_STATE, &mp_state);
783 env->mp_state = mp_state.mp_state;
787 static int kvm_put_vcpu_events(CPUState *env)
789 #ifdef KVM_CAP_VCPU_EVENTS
790 struct kvm_vcpu_events events;
792 if (!kvm_has_vcpu_events()) {
796 events.exception.injected = (env->exception_injected >= 0);
797 events.exception.nr = env->exception_injected;
798 events.exception.has_error_code = env->has_error_code;
799 events.exception.error_code = env->error_code;
801 events.interrupt.injected = (env->interrupt_injected >= 0);
802 events.interrupt.nr = env->interrupt_injected;
803 events.interrupt.soft = env->soft_interrupt;
805 events.nmi.injected = env->nmi_injected;
806 events.nmi.pending = env->nmi_pending;
807 events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
809 events.sipi_vector = env->sipi_vector;
812 KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR;
814 return kvm_vcpu_ioctl(env, KVM_SET_VCPU_EVENTS, &events);
820 static int kvm_get_vcpu_events(CPUState *env)
822 #ifdef KVM_CAP_VCPU_EVENTS
823 struct kvm_vcpu_events events;
826 if (!kvm_has_vcpu_events()) {
830 ret = kvm_vcpu_ioctl(env, KVM_GET_VCPU_EVENTS, &events);
834 env->exception_injected =
835 events.exception.injected ? events.exception.nr : -1;
836 env->has_error_code = events.exception.has_error_code;
837 env->error_code = events.exception.error_code;
839 env->interrupt_injected =
840 events.interrupt.injected ? events.interrupt.nr : -1;
841 env->soft_interrupt = events.interrupt.soft;
843 env->nmi_injected = events.nmi.injected;
844 env->nmi_pending = events.nmi.pending;
845 if (events.nmi.masked) {
846 env->hflags2 |= HF2_NMI_MASK;
848 env->hflags2 &= ~HF2_NMI_MASK;
851 env->sipi_vector = events.sipi_vector;
857 int kvm_arch_put_registers(CPUState *env)
861 ret = kvm_getput_regs(env, 1);
865 ret = kvm_put_fpu(env);
869 ret = kvm_put_sregs(env);
873 ret = kvm_put_msrs(env);
877 ret = kvm_put_mp_state(env);
881 ret = kvm_put_vcpu_events(env);
888 int kvm_arch_get_registers(CPUState *env)
892 ret = kvm_getput_regs(env, 0);
896 ret = kvm_get_fpu(env);
900 ret = kvm_get_sregs(env);
904 ret = kvm_get_msrs(env);
908 ret = kvm_get_mp_state(env);
912 ret = kvm_get_vcpu_events(env);
919 int kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
921 /* Try to inject an interrupt if the guest can accept it */
922 if (run->ready_for_interrupt_injection &&
923 (env->interrupt_request & CPU_INTERRUPT_HARD) &&
924 (env->eflags & IF_MASK)) {
927 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
928 irq = cpu_get_pic_interrupt(env);
930 struct kvm_interrupt intr;
933 dprintf("injected interrupt %d\n", irq);
934 kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr);
938 /* If we have an interrupt but the guest is not ready to receive an
939 * interrupt, request an interrupt window exit. This will
940 * cause a return to userspace as soon as the guest is ready to
941 * receive interrupts. */
942 if ((env->interrupt_request & CPU_INTERRUPT_HARD))
943 run->request_interrupt_window = 1;
945 run->request_interrupt_window = 0;
947 dprintf("setting tpr\n");
948 run->cr8 = cpu_get_apic_tpr(env);
953 int kvm_arch_post_run(CPUState *env, struct kvm_run *run)
956 env->eflags |= IF_MASK;
958 env->eflags &= ~IF_MASK;
960 cpu_set_apic_tpr(env, run->cr8);
961 cpu_set_apic_base(env, run->apic_base);
966 static int kvm_handle_halt(CPUState *env)
968 if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
969 (env->eflags & IF_MASK)) &&
970 !(env->interrupt_request & CPU_INTERRUPT_NMI)) {
972 env->exception_index = EXCP_HLT;
979 int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
983 switch (run->exit_reason) {
985 dprintf("handle_hlt\n");
986 ret = kvm_handle_halt(env);
993 #ifdef KVM_CAP_SET_GUEST_DEBUG
994 int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
996 static const uint8_t int3 = 0xcc;
998 if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
999 cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&int3, 1, 1))
1004 int kvm_arch_remove_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
1008 if (cpu_memory_rw_debug(env, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
1009 cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1))
1020 static int nb_hw_breakpoint;
1022 static int find_hw_breakpoint(target_ulong addr, int len, int type)
1026 for (n = 0; n < nb_hw_breakpoint; n++)
1027 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
1028 (hw_breakpoint[n].len == len || len == -1))
1033 int kvm_arch_insert_hw_breakpoint(target_ulong addr,
1034 target_ulong len, int type)
1037 case GDB_BREAKPOINT_HW:
1040 case GDB_WATCHPOINT_WRITE:
1041 case GDB_WATCHPOINT_ACCESS:
1048 if (addr & (len - 1))
1059 if (nb_hw_breakpoint == 4)
1062 if (find_hw_breakpoint(addr, len, type) >= 0)
1065 hw_breakpoint[nb_hw_breakpoint].addr = addr;
1066 hw_breakpoint[nb_hw_breakpoint].len = len;
1067 hw_breakpoint[nb_hw_breakpoint].type = type;
1073 int kvm_arch_remove_hw_breakpoint(target_ulong addr,
1074 target_ulong len, int type)
1078 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
1083 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
1088 void kvm_arch_remove_all_hw_breakpoints(void)
1090 nb_hw_breakpoint = 0;
1093 static CPUWatchpoint hw_watchpoint;
1095 int kvm_arch_debug(struct kvm_debug_exit_arch *arch_info)
1100 if (arch_info->exception == 1) {
1101 if (arch_info->dr6 & (1 << 14)) {
1102 if (cpu_single_env->singlestep_enabled)
1105 for (n = 0; n < 4; n++)
1106 if (arch_info->dr6 & (1 << n))
1107 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
1113 cpu_single_env->watchpoint_hit = &hw_watchpoint;
1114 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
1115 hw_watchpoint.flags = BP_MEM_WRITE;
1119 cpu_single_env->watchpoint_hit = &hw_watchpoint;
1120 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
1121 hw_watchpoint.flags = BP_MEM_ACCESS;
1125 } else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc))
1129 kvm_update_guest_debug(cpu_single_env,
1130 (arch_info->exception == 1) ?
1131 KVM_GUESTDBG_INJECT_DB : KVM_GUESTDBG_INJECT_BP);
1136 void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg)
1138 const uint8_t type_code[] = {
1139 [GDB_BREAKPOINT_HW] = 0x0,
1140 [GDB_WATCHPOINT_WRITE] = 0x1,
1141 [GDB_WATCHPOINT_ACCESS] = 0x3
1143 const uint8_t len_code[] = {
1144 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
1148 if (kvm_sw_breakpoints_active(env))
1149 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
1151 if (nb_hw_breakpoint > 0) {
1152 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
1153 dbg->arch.debugreg[7] = 0x0600;
1154 for (n = 0; n < nb_hw_breakpoint; n++) {
1155 dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
1156 dbg->arch.debugreg[7] |= (2 << (n * 2)) |
1157 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
1158 (len_code[hw_breakpoint[n].len] << (18 + n*4));
1162 #endif /* KVM_CAP_SET_GUEST_DEBUG */