#include "sysemu.h"
#include "kvm.h"
#include "cpu.h"
-
+#include "gdbstub.h"
+#include "host-utils.h"
+#include "hw/pc.h"
+#include "hw/apic.h"
+#include "ioport.h"
+#include "kvm_x86.h"
+
+#ifdef CONFIG_KVM_PARA
+#include <linux/kvm_para.h>
+#endif
+//
//#define DEBUG_KVM
#ifdef DEBUG_KVM
-#define dprintf(fmt, ...) \
+#define DPRINTF(fmt, ...) \
do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
#else
-#define dprintf(fmt, ...) \
+#define DPRINTF(fmt, ...) \
do { } while (0)
#endif
+#define MSR_KVM_WALL_CLOCK 0x11
+#define MSR_KVM_SYSTEM_TIME 0x12
+
+#ifndef BUS_MCEERR_AR
+#define BUS_MCEERR_AR 4
+#endif
+#ifndef BUS_MCEERR_AO
+#define BUS_MCEERR_AO 5
+#endif
+
+#ifdef KVM_CAP_EXT_CPUID
+
+static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
+{
+ struct kvm_cpuid2 *cpuid;
+ int r, size;
+
+ size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
+ cpuid = (struct kvm_cpuid2 *)qemu_mallocz(size);
+ cpuid->nent = max;
+ r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
+ if (r == 0 && cpuid->nent >= max) {
+ r = -E2BIG;
+ }
+ if (r < 0) {
+ if (r == -E2BIG) {
+ qemu_free(cpuid);
+ return NULL;
+ } else {
+ fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
+ strerror(-r));
+ exit(1);
+ }
+ }
+ return cpuid;
+}
+
+uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function,
+ uint32_t index, int reg)
+{
+ struct kvm_cpuid2 *cpuid;
+ int i, max;
+ uint32_t ret = 0;
+ uint32_t cpuid_1_edx;
+
+ if (!kvm_check_extension(env->kvm_state, KVM_CAP_EXT_CPUID)) {
+ return -1U;
+ }
+
+ max = 1;
+ while ((cpuid = try_get_cpuid(env->kvm_state, max)) == NULL) {
+ max *= 2;
+ }
+
+ for (i = 0; i < cpuid->nent; ++i) {
+ if (cpuid->entries[i].function == function &&
+ cpuid->entries[i].index == index) {
+ switch (reg) {
+ case R_EAX:
+ ret = cpuid->entries[i].eax;
+ break;
+ case R_EBX:
+ ret = cpuid->entries[i].ebx;
+ break;
+ case R_ECX:
+ ret = cpuid->entries[i].ecx;
+ break;
+ case R_EDX:
+ ret = cpuid->entries[i].edx;
+ switch (function) {
+ case 1:
+ /* KVM before 2.6.30 misreports the following features */
+ ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
+ break;
+ case 0x80000001:
+ /* On Intel, kvm returns cpuid according to the Intel spec,
+ * so add missing bits according to the AMD spec:
+ */
+ cpuid_1_edx = kvm_arch_get_supported_cpuid(env, 1, 0, R_EDX);
+ ret |= cpuid_1_edx & 0x183f7ff;
+ break;
+ }
+ break;
+ }
+ }
+ }
+
+ qemu_free(cpuid);
+
+ return ret;
+}
+
+#else
+
+uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function,
+ uint32_t index, int reg)
+{
+ return -1U;
+}
+
+#endif
+
+#ifdef CONFIG_KVM_PARA
+struct kvm_para_features {
+ int cap;
+ int feature;
+} para_features[] = {
+#ifdef KVM_CAP_CLOCKSOURCE
+ { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
+#endif
+#ifdef KVM_CAP_NOP_IO_DELAY
+ { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
+#endif
+#ifdef KVM_CAP_PV_MMU
+ { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
+#endif
+ { -1, -1 }
+};
+
+static int get_para_features(CPUState *env)
+{
+ int i, features = 0;
+
+ for (i = 0; i < ARRAY_SIZE(para_features) - 1; i++) {
+ if (kvm_check_extension(env->kvm_state, para_features[i].cap))
+ features |= (1 << para_features[i].feature);
+ }
+
+ return features;
+}
+#endif
+
+#ifdef KVM_CAP_MCE
+static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
+ int *max_banks)
+{
+ int r;
+
+ r = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_MCE);
+ if (r > 0) {
+ *max_banks = r;
+ return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
+ }
+ return -ENOSYS;
+}
+
+static int kvm_setup_mce(CPUState *env, uint64_t *mcg_cap)
+{
+ return kvm_vcpu_ioctl(env, KVM_X86_SETUP_MCE, mcg_cap);
+}
+
+static int kvm_set_mce(CPUState *env, struct kvm_x86_mce *m)
+{
+ return kvm_vcpu_ioctl(env, KVM_X86_SET_MCE, m);
+}
+
+static int kvm_get_msr(CPUState *env, struct kvm_msr_entry *msrs, int n)
+{
+ struct kvm_msrs *kmsrs = qemu_malloc(sizeof *kmsrs + n * sizeof *msrs);
+ int r;
+
+ kmsrs->nmsrs = n;
+ memcpy(kmsrs->entries, msrs, n * sizeof *msrs);
+ r = kvm_vcpu_ioctl(env, KVM_GET_MSRS, kmsrs);
+ memcpy(msrs, kmsrs->entries, n * sizeof *msrs);
+ free(kmsrs);
+ return r;
+}
+
+/* FIXME: kill this and kvm_get_msr, use env->mcg_status instead */
+static int kvm_mce_in_exception(CPUState *env)
+{
+ struct kvm_msr_entry msr_mcg_status = {
+ .index = MSR_MCG_STATUS,
+ };
+ int r;
+
+ r = kvm_get_msr(env, &msr_mcg_status, 1);
+ if (r == -1 || r == 0) {
+ return -1;
+ }
+ return !!(msr_mcg_status.data & MCG_STATUS_MCIP);
+}
+
+struct kvm_x86_mce_data
+{
+ CPUState *env;
+ struct kvm_x86_mce *mce;
+ int abort_on_error;
+};
+
+static void kvm_do_inject_x86_mce(void *_data)
+{
+ struct kvm_x86_mce_data *data = _data;
+ int r;
+
+ /* If there is an MCE exception being processed, ignore this SRAO MCE */
+ if ((data->env->mcg_cap & MCG_SER_P) &&
+ !(data->mce->status & MCI_STATUS_AR)) {
+ r = kvm_mce_in_exception(data->env);
+ if (r == -1) {
+ fprintf(stderr, "Failed to get MCE status\n");
+ } else if (r) {
+ return;
+ }
+ }
+
+ r = kvm_set_mce(data->env, data->mce);
+ if (r < 0) {
+ perror("kvm_set_mce FAILED");
+ if (data->abort_on_error) {
+ abort();
+ }
+ }
+}
+#endif
+
+void kvm_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
+ uint64_t mcg_status, uint64_t addr, uint64_t misc,
+ int abort_on_error)
+{
+#ifdef KVM_CAP_MCE
+ struct kvm_x86_mce mce = {
+ .bank = bank,
+ .status = status,
+ .mcg_status = mcg_status,
+ .addr = addr,
+ .misc = misc,
+ };
+ struct kvm_x86_mce_data data = {
+ .env = cenv,
+ .mce = &mce,
+ };
+
+ if (!cenv->mcg_cap) {
+ fprintf(stderr, "MCE support is not enabled!\n");
+ return;
+ }
+
+ run_on_cpu(cenv, kvm_do_inject_x86_mce, &data);
+#else
+ if (abort_on_error)
+ abort();
+#endif
+}
+
int kvm_arch_init_vcpu(CPUState *env)
{
struct {
- struct kvm_cpuid cpuid;
- struct kvm_cpuid_entry entries[100];
+ struct kvm_cpuid2 cpuid;
+ struct kvm_cpuid_entry2 entries[100];
} __attribute__((packed)) cpuid_data;
- uint32_t limit, i, cpuid_i;
- uint32_t eax, ebx, ecx, edx;
+ uint32_t limit, i, j, cpuid_i;
+ uint32_t unused;
+ struct kvm_cpuid_entry2 *c;
+#ifdef KVM_CPUID_SIGNATURE
+ uint32_t signature[3];
+#endif
+
+ env->mp_state = KVM_MP_STATE_RUNNABLE;
+
+ env->cpuid_features &= kvm_arch_get_supported_cpuid(env, 1, 0, R_EDX);
+
+ i = env->cpuid_ext_features & CPUID_EXT_HYPERVISOR;
+ env->cpuid_ext_features &= kvm_arch_get_supported_cpuid(env, 1, 0, R_ECX);
+ env->cpuid_ext_features |= i;
+
+ env->cpuid_ext2_features &= kvm_arch_get_supported_cpuid(env, 0x80000001,
+ 0, R_EDX);
+ env->cpuid_ext3_features &= kvm_arch_get_supported_cpuid(env, 0x80000001,
+ 0, R_ECX);
+ env->cpuid_svm_features &= kvm_arch_get_supported_cpuid(env, 0x8000000A,
+ 0, R_EDX);
+
cpuid_i = 0;
- cpu_x86_cpuid(env, 0, &eax, &ebx, &ecx, &edx);
- limit = eax;
+#ifdef CONFIG_KVM_PARA
+ /* Paravirtualization CPUIDs */
+ memcpy(signature, "KVMKVMKVM\0\0\0", 12);
+ c = &cpuid_data.entries[cpuid_i++];
+ memset(c, 0, sizeof(*c));
+ c->function = KVM_CPUID_SIGNATURE;
+ c->eax = 0;
+ c->ebx = signature[0];
+ c->ecx = signature[1];
+ c->edx = signature[2];
+
+ c = &cpuid_data.entries[cpuid_i++];
+ memset(c, 0, sizeof(*c));
+ c->function = KVM_CPUID_FEATURES;
+ c->eax = env->cpuid_kvm_features & get_para_features(env);
+#endif
+
+ cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
for (i = 0; i <= limit; i++) {
- struct kvm_cpuid_entry *c = &cpuid_data.entries[cpuid_i++];
+ c = &cpuid_data.entries[cpuid_i++];
+
+ switch (i) {
+ case 2: {
+ /* Keep reading function 2 till all the input is received */
+ int times;
+
+ c->function = i;
+ c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
+ KVM_CPUID_FLAG_STATE_READ_NEXT;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ times = c->eax & 0xff;
+
+ for (j = 1; j < times; ++j) {
+ c = &cpuid_data.entries[cpuid_i++];
+ c->function = i;
+ c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ }
+ break;
+ }
+ case 4:
+ case 0xb:
+ case 0xd:
+ for (j = 0; ; j++) {
+ c->function = i;
+ c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ c->index = j;
+ cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
+
+ if (i == 4 && c->eax == 0)
+ break;
+ if (i == 0xb && !(c->ecx & 0xff00))
+ break;
+ if (i == 0xd && c->eax == 0)
+ break;
- cpu_x86_cpuid(env, i, &eax, &ebx, &ecx, &edx);
- c->function = i;
- c->eax = eax;
- c->ebx = ebx;
- c->ecx = ecx;
- c->edx = edx;
+ c = &cpuid_data.entries[cpuid_i++];
+ }
+ break;
+ default:
+ c->function = i;
+ c->flags = 0;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ break;
+ }
}
-
- cpu_x86_cpuid(env, 0x80000000, &eax, &ebx, &ecx, &edx);
- limit = eax;
+ cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
for (i = 0x80000000; i <= limit; i++) {
- struct kvm_cpuid_entry *c = &cpuid_data.entries[cpuid_i++];
+ c = &cpuid_data.entries[cpuid_i++];
- cpu_x86_cpuid(env, i, &eax, &ebx, &ecx, &edx);
c->function = i;
- c->eax = eax;
- c->ebx = ebx;
- c->ecx = ecx;
- c->edx = edx;
+ c->flags = 0;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
}
cpuid_data.cpuid.nent = cpuid_i;
- return kvm_vcpu_ioctl(env, KVM_SET_CPUID, &cpuid_data);
+#ifdef KVM_CAP_MCE
+ if (((env->cpuid_version >> 8)&0xF) >= 6
+ && (env->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)
+ && kvm_check_extension(env->kvm_state, KVM_CAP_MCE) > 0) {
+ uint64_t mcg_cap;
+ int banks;
+
+ if (kvm_get_mce_cap_supported(env->kvm_state, &mcg_cap, &banks))
+ perror("kvm_get_mce_cap_supported FAILED");
+ else {
+ if (banks > MCE_BANKS_DEF)
+ banks = MCE_BANKS_DEF;
+ mcg_cap &= MCE_CAP_DEF;
+ mcg_cap |= banks;
+ if (kvm_setup_mce(env, &mcg_cap))
+ perror("kvm_setup_mce FAILED");
+ else
+ env->mcg_cap = mcg_cap;
+ }
+ }
+#endif
+
+ return kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data);
}
-static int kvm_has_msr_star(CPUState *env)
+void kvm_arch_reset_vcpu(CPUState *env)
+{
+ env->exception_injected = -1;
+ env->interrupt_injected = -1;
+ env->nmi_injected = 0;
+ env->nmi_pending = 0;
+ if (kvm_irqchip_in_kernel()) {
+ env->mp_state = cpu_is_bsp(env) ? KVM_MP_STATE_RUNNABLE :
+ KVM_MP_STATE_UNINITIALIZED;
+ } else {
+ env->mp_state = KVM_MP_STATE_RUNNABLE;
+ }
+}
+
+int has_msr_star;
+int has_msr_hsave_pa;
+
+static void kvm_supported_msrs(CPUState *env)
{
- static int has_msr_star;
+ static int kvm_supported_msrs;
int ret;
/* first time */
- if (has_msr_star == 0) {
+ if (kvm_supported_msrs == 0) {
struct kvm_msr_list msr_list, *kvm_msr_list;
- has_msr_star = -1;
+ kvm_supported_msrs = -1;
/* Obtain MSR list from KVM. These are the MSRs that we must
* save/restore */
msr_list.nmsrs = 0;
ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, &msr_list);
- if (ret < 0)
- return 0;
-
- kvm_msr_list = qemu_mallocz(sizeof(msr_list) +
- msr_list.nmsrs * sizeof(msr_list.indices[0]));
- if (kvm_msr_list == NULL)
- return 0;
+ if (ret < 0 && ret != -E2BIG) {
+ return;
+ }
+ /* Old kernel modules had a bug and could write beyond the provided
+ memory. Allocate at least a safe amount of 1K. */
+ kvm_msr_list = qemu_mallocz(MAX(1024, sizeof(msr_list) +
+ msr_list.nmsrs *
+ sizeof(msr_list.indices[0])));
kvm_msr_list->nmsrs = msr_list.nmsrs;
ret = kvm_ioctl(env->kvm_state, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
for (i = 0; i < kvm_msr_list->nmsrs; i++) {
if (kvm_msr_list->indices[i] == MSR_STAR) {
has_msr_star = 1;
- break;
+ continue;
+ }
+ if (kvm_msr_list->indices[i] == MSR_VM_HSAVE_PA) {
+ has_msr_hsave_pa = 1;
+ continue;
}
}
}
free(kvm_msr_list);
}
- if (has_msr_star == 1)
- return 1;
+ return;
+}
+
+static int kvm_has_msr_hsave_pa(CPUState *env)
+{
+ kvm_supported_msrs(env);
+ return has_msr_hsave_pa;
+}
+
+static int kvm_has_msr_star(CPUState *env)
+{
+ kvm_supported_msrs(env);
+ return has_msr_star;
+}
+
+static int kvm_init_identity_map_page(KVMState *s)
+{
+#ifdef KVM_CAP_SET_IDENTITY_MAP_ADDR
+ int ret;
+ uint64_t addr = 0xfffbc000;
+
+ if (!kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
+ return 0;
+ }
+
+ ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &addr);
+ if (ret < 0) {
+ fprintf(stderr, "kvm_set_identity_map_addr: %s\n", strerror(ret));
+ return ret;
+ }
+#endif
return 0;
}
* as unavaible memory. FIXME, need to ensure the e820 map deals with
* this?
*/
- return kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, 0xfffbd000);
+ /*
+ * Tell fw_cfg to notify the BIOS to reserve the range.
+ */
+ if (e820_add_entry(0xfffbc000, 0x4000, E820_RESERVED) < 0) {
+ perror("e820_add_entry() table is full");
+ exit(1);
+ }
+ ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, 0xfffbd000);
+ if (ret < 0) {
+ return ret;
+ }
+
+ return kvm_init_identity_map_page(s);
}
static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
}
+#ifdef KVM_CAP_XSAVE
+#define XSAVE_CWD_RIP 2
+#define XSAVE_CWD_RDP 4
+#define XSAVE_MXCSR 6
+#define XSAVE_ST_SPACE 8
+#define XSAVE_XMM_SPACE 40
+#define XSAVE_XSTATE_BV 128
+#define XSAVE_YMMH_SPACE 144
+#endif
+
+static int kvm_put_xsave(CPUState *env)
+{
+#ifdef KVM_CAP_XSAVE
+ int i, r;
+ struct kvm_xsave* xsave;
+ uint16_t cwd, swd, twd, fop;
+
+ if (!kvm_has_xsave())
+ return kvm_put_fpu(env);
+
+ xsave = qemu_memalign(4096, sizeof(struct kvm_xsave));
+ memset(xsave, 0, sizeof(struct kvm_xsave));
+ cwd = swd = twd = fop = 0;
+ swd = env->fpus & ~(7 << 11);
+ swd |= (env->fpstt & 7) << 11;
+ cwd = env->fpuc;
+ for (i = 0; i < 8; ++i)
+ twd |= (!env->fptags[i]) << i;
+ xsave->region[0] = (uint32_t)(swd << 16) + cwd;
+ xsave->region[1] = (uint32_t)(fop << 16) + twd;
+ memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs,
+ sizeof env->fpregs);
+ memcpy(&xsave->region[XSAVE_XMM_SPACE], env->xmm_regs,
+ sizeof env->xmm_regs);
+ xsave->region[XSAVE_MXCSR] = env->mxcsr;
+ *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv;
+ memcpy(&xsave->region[XSAVE_YMMH_SPACE], env->ymmh_regs,
+ sizeof env->ymmh_regs);
+ r = kvm_vcpu_ioctl(env, KVM_SET_XSAVE, xsave);
+ qemu_free(xsave);
+ return r;
+#else
+ return kvm_put_fpu(env);
+#endif
+}
+
+static int kvm_put_xcrs(CPUState *env)
+{
+#ifdef KVM_CAP_XCRS
+ struct kvm_xcrs xcrs;
+
+ if (!kvm_has_xcrs())
+ return 0;
+
+ xcrs.nr_xcrs = 1;
+ xcrs.flags = 0;
+ xcrs.xcrs[0].xcr = 0;
+ xcrs.xcrs[0].value = env->xcr0;
+ return kvm_vcpu_ioctl(env, KVM_SET_XCRS, &xcrs);
+#else
+ return 0;
+#endif
+}
+
static int kvm_put_sregs(CPUState *env)
{
struct kvm_sregs sregs;
- memcpy(sregs.interrupt_bitmap,
- env->interrupt_bitmap,
- sizeof(sregs.interrupt_bitmap));
+ memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
+ if (env->interrupt_injected >= 0) {
+ sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
+ (uint64_t)1 << (env->interrupt_injected % 64);
+ }
if ((env->eflags & VM_MASK)) {
set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
sregs.cr3 = env->cr[3];
sregs.cr4 = env->cr[4];
- sregs.cr8 = cpu_get_apic_tpr(env);
- sregs.apic_base = cpu_get_apic_base(env);
+ sregs.cr8 = cpu_get_apic_tpr(env->apic_state);
+ sregs.apic_base = cpu_get_apic_base(env->apic_state);
sregs.efer = env->efer;
entry->data = value;
}
-static int kvm_put_msrs(CPUState *env)
+static int kvm_put_msrs(CPUState *env, int level)
{
struct {
struct kvm_msrs info;
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
if (kvm_has_msr_star(env))
kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
- kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
+ if (kvm_has_msr_hsave_pa(env))
+ kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave);
#ifdef TARGET_X86_64
/* FIXME if lm capable */
kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
#endif
+ if (level == KVM_PUT_FULL_STATE) {
+ kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
+ kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME,
+ env->system_time_msr);
+ kvm_msr_entry_set(&msrs[n++], MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
+ }
+#ifdef KVM_CAP_MCE
+ if (env->mcg_cap) {
+ int i;
+ if (level == KVM_PUT_RESET_STATE)
+ kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
+ else if (level == KVM_PUT_FULL_STATE) {
+ kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
+ kvm_msr_entry_set(&msrs[n++], MSR_MCG_CTL, env->mcg_ctl);
+ for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++)
+ kvm_msr_entry_set(&msrs[n++], MSR_MC0_CTL + i, env->mce_banks[i]);
+ }
+ }
+#endif
+
msr_data.info.nmsrs = n;
return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data);
return 0;
}
+static int kvm_get_xsave(CPUState *env)
+{
+#ifdef KVM_CAP_XSAVE
+ struct kvm_xsave* xsave;
+ int ret, i;
+ uint16_t cwd, swd, twd, fop;
+
+ if (!kvm_has_xsave())
+ return kvm_get_fpu(env);
+
+ xsave = qemu_memalign(4096, sizeof(struct kvm_xsave));
+ ret = kvm_vcpu_ioctl(env, KVM_GET_XSAVE, xsave);
+ if (ret < 0) {
+ qemu_free(xsave);
+ return ret;
+ }
+
+ cwd = (uint16_t)xsave->region[0];
+ swd = (uint16_t)(xsave->region[0] >> 16);
+ twd = (uint16_t)xsave->region[1];
+ fop = (uint16_t)(xsave->region[1] >> 16);
+ env->fpstt = (swd >> 11) & 7;
+ env->fpus = swd;
+ env->fpuc = cwd;
+ for (i = 0; i < 8; ++i)
+ env->fptags[i] = !((twd >> i) & 1);
+ env->mxcsr = xsave->region[XSAVE_MXCSR];
+ memcpy(env->fpregs, &xsave->region[XSAVE_ST_SPACE],
+ sizeof env->fpregs);
+ memcpy(env->xmm_regs, &xsave->region[XSAVE_XMM_SPACE],
+ sizeof env->xmm_regs);
+ env->xstate_bv = *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV];
+ memcpy(env->ymmh_regs, &xsave->region[XSAVE_YMMH_SPACE],
+ sizeof env->ymmh_regs);
+ qemu_free(xsave);
+ return 0;
+#else
+ return kvm_get_fpu(env);
+#endif
+}
+
+static int kvm_get_xcrs(CPUState *env)
+{
+#ifdef KVM_CAP_XCRS
+ int i, ret;
+ struct kvm_xcrs xcrs;
+
+ if (!kvm_has_xcrs())
+ return 0;
+
+ ret = kvm_vcpu_ioctl(env, KVM_GET_XCRS, &xcrs);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < xcrs.nr_xcrs; i++)
+ /* Only support xcr0 now */
+ if (xcrs.xcrs[0].xcr == 0) {
+ env->xcr0 = xcrs.xcrs[0].value;
+ break;
+ }
+ return 0;
+#else
+ return 0;
+#endif
+}
+
static int kvm_get_sregs(CPUState *env)
{
struct kvm_sregs sregs;
uint32_t hflags;
- int ret;
+ int bit, i, ret;
ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
if (ret < 0)
return ret;
- memcpy(env->interrupt_bitmap,
- sregs.interrupt_bitmap,
- sizeof(sregs.interrupt_bitmap));
+ /* There can only be one pending IRQ set in the bitmap at a time, so try
+ to find it and save its number instead (-1 for none). */
+ env->interrupt_injected = -1;
+ for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
+ if (sregs.interrupt_bitmap[i]) {
+ bit = ctz64(sregs.interrupt_bitmap[i]);
+ env->interrupt_injected = i * 64 + bit;
+ break;
+ }
+ }
get_seg(&env->segs[R_CS], &sregs.cs);
get_seg(&env->segs[R_DS], &sregs.ds);
env->cr[3] = sregs.cr3;
env->cr[4] = sregs.cr4;
- cpu_set_apic_base(env, sregs.apic_base);
+ cpu_set_apic_base(env->apic_state, sregs.apic_base);
env->efer = sregs.efer;
- //cpu_set_apic_tpr(env, sregs.cr8);
+ //cpu_set_apic_tpr(env->apic_state, sregs.cr8);
#define HFLAG_COPY_MASK ~( \
HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
msrs[n++].index = MSR_IA32_SYSENTER_EIP;
if (kvm_has_msr_star(env))
msrs[n++].index = MSR_STAR;
+ if (kvm_has_msr_hsave_pa(env))
+ msrs[n++].index = MSR_VM_HSAVE_PA;
msrs[n++].index = MSR_IA32_TSC;
#ifdef TARGET_X86_64
/* FIXME lm_capable_kernel */
msrs[n++].index = MSR_FMASK;
msrs[n++].index = MSR_LSTAR;
#endif
+ msrs[n++].index = MSR_KVM_SYSTEM_TIME;
+ msrs[n++].index = MSR_KVM_WALL_CLOCK;
+
+#ifdef KVM_CAP_MCE
+ if (env->mcg_cap) {
+ msrs[n++].index = MSR_MCG_STATUS;
+ msrs[n++].index = MSR_MCG_CTL;
+ for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++)
+ msrs[n++].index = MSR_MC0_CTL + i;
+ }
+#endif
+
msr_data.info.nmsrs = n;
ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data);
if (ret < 0)
case MSR_IA32_TSC:
env->tsc = msrs[i].data;
break;
+ case MSR_VM_HSAVE_PA:
+ env->vm_hsave = msrs[i].data;
+ break;
+ case MSR_KVM_SYSTEM_TIME:
+ env->system_time_msr = msrs[i].data;
+ break;
+ case MSR_KVM_WALL_CLOCK:
+ env->wall_clock_msr = msrs[i].data;
+ break;
+#ifdef KVM_CAP_MCE
+ case MSR_MCG_STATUS:
+ env->mcg_status = msrs[i].data;
+ break;
+ case MSR_MCG_CTL:
+ env->mcg_ctl = msrs[i].data;
+ break;
+#endif
+ default:
+#ifdef KVM_CAP_MCE
+ if (msrs[i].index >= MSR_MC0_CTL &&
+ msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
+ env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
+ }
+#endif
+ break;
}
}
return 0;
}
-int kvm_arch_put_registers(CPUState *env)
+static int kvm_put_mp_state(CPUState *env)
{
+ struct kvm_mp_state mp_state = { .mp_state = env->mp_state };
+
+ return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state);
+}
+
+static int kvm_get_mp_state(CPUState *env)
+{
+ struct kvm_mp_state mp_state;
int ret;
+ ret = kvm_vcpu_ioctl(env, KVM_GET_MP_STATE, &mp_state);
+ if (ret < 0) {
+ return ret;
+ }
+ env->mp_state = mp_state.mp_state;
+ return 0;
+}
+
+static int kvm_put_vcpu_events(CPUState *env, int level)
+{
+#ifdef KVM_CAP_VCPU_EVENTS
+ struct kvm_vcpu_events events;
+
+ if (!kvm_has_vcpu_events()) {
+ return 0;
+ }
+
+ events.exception.injected = (env->exception_injected >= 0);
+ events.exception.nr = env->exception_injected;
+ events.exception.has_error_code = env->has_error_code;
+ events.exception.error_code = env->error_code;
+
+ events.interrupt.injected = (env->interrupt_injected >= 0);
+ events.interrupt.nr = env->interrupt_injected;
+ events.interrupt.soft = env->soft_interrupt;
+
+ events.nmi.injected = env->nmi_injected;
+ events.nmi.pending = env->nmi_pending;
+ events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
+
+ events.sipi_vector = env->sipi_vector;
+
+ events.flags = 0;
+ if (level >= KVM_PUT_RESET_STATE) {
+ events.flags |=
+ KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR;
+ }
+
+ return kvm_vcpu_ioctl(env, KVM_SET_VCPU_EVENTS, &events);
+#else
+ return 0;
+#endif
+}
+
+static int kvm_get_vcpu_events(CPUState *env)
+{
+#ifdef KVM_CAP_VCPU_EVENTS
+ struct kvm_vcpu_events events;
+ int ret;
+
+ if (!kvm_has_vcpu_events()) {
+ return 0;
+ }
+
+ ret = kvm_vcpu_ioctl(env, KVM_GET_VCPU_EVENTS, &events);
+ if (ret < 0) {
+ return ret;
+ }
+ env->exception_injected =
+ events.exception.injected ? events.exception.nr : -1;
+ env->has_error_code = events.exception.has_error_code;
+ env->error_code = events.exception.error_code;
+
+ env->interrupt_injected =
+ events.interrupt.injected ? events.interrupt.nr : -1;
+ env->soft_interrupt = events.interrupt.soft;
+
+ env->nmi_injected = events.nmi.injected;
+ env->nmi_pending = events.nmi.pending;
+ if (events.nmi.masked) {
+ env->hflags2 |= HF2_NMI_MASK;
+ } else {
+ env->hflags2 &= ~HF2_NMI_MASK;
+ }
+
+ env->sipi_vector = events.sipi_vector;
+#endif
+
+ return 0;
+}
+
+static int kvm_guest_debug_workarounds(CPUState *env)
+{
+ int ret = 0;
+#ifdef KVM_CAP_SET_GUEST_DEBUG
+ unsigned long reinject_trap = 0;
+
+ if (!kvm_has_vcpu_events()) {
+ if (env->exception_injected == 1) {
+ reinject_trap = KVM_GUESTDBG_INJECT_DB;
+ } else if (env->exception_injected == 3) {
+ reinject_trap = KVM_GUESTDBG_INJECT_BP;
+ }
+ env->exception_injected = -1;
+ }
+
+ /*
+ * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
+ * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
+ * by updating the debug state once again if single-stepping is on.
+ * Another reason to call kvm_update_guest_debug here is a pending debug
+ * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
+ * reinject them via SET_GUEST_DEBUG.
+ */
+ if (reinject_trap ||
+ (!kvm_has_robust_singlestep() && env->singlestep_enabled)) {
+ ret = kvm_update_guest_debug(env, reinject_trap);
+ }
+#endif /* KVM_CAP_SET_GUEST_DEBUG */
+ return ret;
+}
+
+static int kvm_put_debugregs(CPUState *env)
+{
+#ifdef KVM_CAP_DEBUGREGS
+ struct kvm_debugregs dbgregs;
+ int i;
+
+ if (!kvm_has_debugregs()) {
+ return 0;
+ }
+
+ for (i = 0; i < 4; i++) {
+ dbgregs.db[i] = env->dr[i];
+ }
+ dbgregs.dr6 = env->dr[6];
+ dbgregs.dr7 = env->dr[7];
+ dbgregs.flags = 0;
+
+ return kvm_vcpu_ioctl(env, KVM_SET_DEBUGREGS, &dbgregs);
+#else
+ return 0;
+#endif
+}
+
+static int kvm_get_debugregs(CPUState *env)
+{
+#ifdef KVM_CAP_DEBUGREGS
+ struct kvm_debugregs dbgregs;
+ int i, ret;
+
+ if (!kvm_has_debugregs()) {
+ return 0;
+ }
+
+ ret = kvm_vcpu_ioctl(env, KVM_GET_DEBUGREGS, &dbgregs);
+ if (ret < 0) {
+ return ret;
+ }
+ for (i = 0; i < 4; i++) {
+ env->dr[i] = dbgregs.db[i];
+ }
+ env->dr[4] = env->dr[6] = dbgregs.dr6;
+ env->dr[5] = env->dr[7] = dbgregs.dr7;
+#endif
+
+ return 0;
+}
+
+int kvm_arch_put_registers(CPUState *env, int level)
+{
+ int ret;
+
+ assert(cpu_is_stopped(env) || qemu_cpu_self(env));
+
ret = kvm_getput_regs(env, 1);
if (ret < 0)
return ret;
- ret = kvm_put_fpu(env);
+ ret = kvm_put_xsave(env);
+ if (ret < 0)
+ return ret;
+
+ ret = kvm_put_xcrs(env);
if (ret < 0)
return ret;
if (ret < 0)
return ret;
- ret = kvm_put_msrs(env);
+ ret = kvm_put_msrs(env, level);
+ if (ret < 0)
+ return ret;
+
+ if (level >= KVM_PUT_RESET_STATE) {
+ ret = kvm_put_mp_state(env);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = kvm_put_vcpu_events(env, level);
+ if (ret < 0)
+ return ret;
+
+ /* must be last */
+ ret = kvm_guest_debug_workarounds(env);
+ if (ret < 0)
+ return ret;
+
+ ret = kvm_put_debugregs(env);
if (ret < 0)
return ret;
{
int ret;
+ assert(cpu_is_stopped(env) || qemu_cpu_self(env));
+
ret = kvm_getput_regs(env, 0);
if (ret < 0)
return ret;
- ret = kvm_get_fpu(env);
+ ret = kvm_get_xsave(env);
+ if (ret < 0)
+ return ret;
+
+ ret = kvm_get_xcrs(env);
if (ret < 0)
return ret;
if (ret < 0)
return ret;
+ ret = kvm_get_mp_state(env);
+ if (ret < 0)
+ return ret;
+
+ ret = kvm_get_vcpu_events(env);
+ if (ret < 0)
+ return ret;
+
+ ret = kvm_get_debugregs(env);
+ if (ret < 0)
+ return ret;
+
return 0;
}
struct kvm_interrupt intr;
intr.irq = irq;
/* FIXME: errors */
- dprintf("injected interrupt %d\n", irq);
+ DPRINTF("injected interrupt %d\n", irq);
kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr);
}
}
else
run->request_interrupt_window = 0;
- dprintf("setting tpr\n");
- run->cr8 = cpu_get_apic_tpr(env);
+ DPRINTF("setting tpr\n");
+ run->cr8 = cpu_get_apic_tpr(env->apic_state);
return 0;
}
else
env->eflags &= ~IF_MASK;
- cpu_set_apic_tpr(env, run->cr8);
- cpu_set_apic_base(env, run->apic_base);
+ cpu_set_apic_tpr(env->apic_state, run->cr8);
+ cpu_set_apic_base(env->apic_state, run->apic_base);
return 0;
}
+int kvm_arch_process_irqchip_events(CPUState *env)
+{
+ if (env->interrupt_request & CPU_INTERRUPT_INIT) {
+ kvm_cpu_synchronize_state(env);
+ do_cpu_init(env);
+ env->exception_index = EXCP_HALTED;
+ }
+
+ if (env->interrupt_request & CPU_INTERRUPT_SIPI) {
+ kvm_cpu_synchronize_state(env);
+ do_cpu_sipi(env);
+ }
+
+ return env->halted;
+}
+
static int kvm_handle_halt(CPUState *env)
{
if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
switch (run->exit_reason) {
case KVM_EXIT_HLT:
- dprintf("handle_hlt\n");
+ DPRINTF("handle_hlt\n");
ret = kvm_handle_halt(env);
break;
}
return ret;
}
+
+#ifdef KVM_CAP_SET_GUEST_DEBUG
+int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
+{
+ static const uint8_t int3 = 0xcc;
+
+ if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
+ cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&int3, 1, 1))
+ return -EINVAL;
+ return 0;
+}
+
+int kvm_arch_remove_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
+{
+ uint8_t int3;
+
+ if (cpu_memory_rw_debug(env, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
+ cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1))
+ return -EINVAL;
+ return 0;
+}
+
+static struct {
+ target_ulong addr;
+ int len;
+ int type;
+} hw_breakpoint[4];
+
+static int nb_hw_breakpoint;
+
+static int find_hw_breakpoint(target_ulong addr, int len, int type)
+{
+ int n;
+
+ for (n = 0; n < nb_hw_breakpoint; n++)
+ if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
+ (hw_breakpoint[n].len == len || len == -1))
+ return n;
+ return -1;
+}
+
+int kvm_arch_insert_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ switch (type) {
+ case GDB_BREAKPOINT_HW:
+ len = 1;
+ break;
+ case GDB_WATCHPOINT_WRITE:
+ case GDB_WATCHPOINT_ACCESS:
+ switch (len) {
+ case 1:
+ break;
+ case 2:
+ case 4:
+ case 8:
+ if (addr & (len - 1))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ if (nb_hw_breakpoint == 4)
+ return -ENOBUFS;
+
+ if (find_hw_breakpoint(addr, len, type) >= 0)
+ return -EEXIST;
+
+ hw_breakpoint[nb_hw_breakpoint].addr = addr;
+ hw_breakpoint[nb_hw_breakpoint].len = len;
+ hw_breakpoint[nb_hw_breakpoint].type = type;
+ nb_hw_breakpoint++;
+
+ return 0;
+}
+
+int kvm_arch_remove_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ int n;
+
+ n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
+ if (n < 0)
+ return -ENOENT;
+
+ nb_hw_breakpoint--;
+ hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
+
+ return 0;
+}
+
+void kvm_arch_remove_all_hw_breakpoints(void)
+{
+ nb_hw_breakpoint = 0;
+}
+
+static CPUWatchpoint hw_watchpoint;
+
+int kvm_arch_debug(struct kvm_debug_exit_arch *arch_info)
+{
+ int handle = 0;
+ int n;
+
+ if (arch_info->exception == 1) {
+ if (arch_info->dr6 & (1 << 14)) {
+ if (cpu_single_env->singlestep_enabled)
+ handle = 1;
+ } else {
+ for (n = 0; n < 4; n++)
+ if (arch_info->dr6 & (1 << n))
+ switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
+ case 0x0:
+ handle = 1;
+ break;
+ case 0x1:
+ handle = 1;
+ cpu_single_env->watchpoint_hit = &hw_watchpoint;
+ hw_watchpoint.vaddr = hw_breakpoint[n].addr;
+ hw_watchpoint.flags = BP_MEM_WRITE;
+ break;
+ case 0x3:
+ handle = 1;
+ cpu_single_env->watchpoint_hit = &hw_watchpoint;
+ hw_watchpoint.vaddr = hw_breakpoint[n].addr;
+ hw_watchpoint.flags = BP_MEM_ACCESS;
+ break;
+ }
+ }
+ } else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc))
+ handle = 1;
+
+ if (!handle) {
+ cpu_synchronize_state(cpu_single_env);
+ assert(cpu_single_env->exception_injected == -1);
+
+ cpu_single_env->exception_injected = arch_info->exception;
+ cpu_single_env->has_error_code = 0;
+ }
+
+ return handle;
+}
+
+void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg)
+{
+ const uint8_t type_code[] = {
+ [GDB_BREAKPOINT_HW] = 0x0,
+ [GDB_WATCHPOINT_WRITE] = 0x1,
+ [GDB_WATCHPOINT_ACCESS] = 0x3
+ };
+ const uint8_t len_code[] = {
+ [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
+ };
+ int n;
+
+ if (kvm_sw_breakpoints_active(env))
+ dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
+
+ if (nb_hw_breakpoint > 0) {
+ dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
+ dbg->arch.debugreg[7] = 0x0600;
+ for (n = 0; n < nb_hw_breakpoint; n++) {
+ dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
+ dbg->arch.debugreg[7] |= (2 << (n * 2)) |
+ (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
+ (len_code[hw_breakpoint[n].len] << (18 + n*4));
+ }
+ }
+ /* Legal xcr0 for loading */
+ env->xcr0 = 1;
+}
+#endif /* KVM_CAP_SET_GUEST_DEBUG */
+
+bool kvm_arch_stop_on_emulation_error(CPUState *env)
+{
+ return !(env->cr[0] & CR0_PE_MASK) ||
+ ((env->segs[R_CS].selector & 3) != 3);
+}
+
+static void hardware_memory_error(void)
+{
+ fprintf(stderr, "Hardware memory error!\n");
+ exit(1);
+}
+
+#ifdef KVM_CAP_MCE
+static void kvm_mce_broadcast_rest(CPUState *env)
+{
+ CPUState *cenv;
+ int family, model, cpuver = env->cpuid_version;
+
+ family = (cpuver >> 8) & 0xf;
+ model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0xf);
+
+ /* Broadcast MCA signal for processor version 06H_EH and above */
+ if ((family == 6 && model >= 14) || family > 6) {
+ for (cenv = first_cpu; cenv != NULL; cenv = cenv->next_cpu) {
+ if (cenv == env) {
+ continue;
+ }
+ kvm_inject_x86_mce(cenv, 1, MCI_STATUS_VAL | MCI_STATUS_UC,
+ MCG_STATUS_MCIP | MCG_STATUS_RIPV, 0, 0, 1);
+ }
+ }
+}
+#endif
+
+int kvm_on_sigbus_vcpu(CPUState *env, int code, void *addr)
+{
+#if defined(KVM_CAP_MCE)
+ struct kvm_x86_mce mce = {
+ .bank = 9,
+ };
+ void *vaddr;
+ ram_addr_t ram_addr;
+ target_phys_addr_t paddr;
+ int r;
+
+ if ((env->mcg_cap & MCG_SER_P) && addr
+ && (code == BUS_MCEERR_AR
+ || code == BUS_MCEERR_AO)) {
+ if (code == BUS_MCEERR_AR) {
+ /* Fake an Intel architectural Data Load SRAR UCR */
+ mce.status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN
+ | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S
+ | MCI_STATUS_AR | 0x134;
+ mce.misc = (MCM_ADDR_PHYS << 6) | 0xc;
+ mce.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_EIPV;
+ } else {
+ /*
+ * If there is an MCE excpetion being processed, ignore
+ * this SRAO MCE
+ */
+ r = kvm_mce_in_exception(env);
+ if (r == -1) {
+ fprintf(stderr, "Failed to get MCE status\n");
+ } else if (r) {
+ return 0;
+ }
+ /* Fake an Intel architectural Memory scrubbing UCR */
+ mce.status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN
+ | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S
+ | 0xc0;
+ mce.misc = (MCM_ADDR_PHYS << 6) | 0xc;
+ mce.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
+ }
+ vaddr = (void *)addr;
+ if (qemu_ram_addr_from_host(vaddr, &ram_addr) ||
+ !kvm_physical_memory_addr_from_ram(env->kvm_state, ram_addr, &paddr)) {
+ fprintf(stderr, "Hardware memory error for memory used by "
+ "QEMU itself instead of guest system!\n");
+ /* Hope we are lucky for AO MCE */
+ if (code == BUS_MCEERR_AO) {
+ return 0;
+ } else {
+ hardware_memory_error();
+ }
+ }
+ mce.addr = paddr;
+ r = kvm_set_mce(env, &mce);
+ if (r < 0) {
+ fprintf(stderr, "kvm_set_mce: %s\n", strerror(errno));
+ abort();
+ }
+ kvm_mce_broadcast_rest(env);
+ } else
+#endif
+ {
+ if (code == BUS_MCEERR_AO) {
+ return 0;
+ } else if (code == BUS_MCEERR_AR) {
+ hardware_memory_error();
+ } else {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+int kvm_on_sigbus(int code, void *addr)
+{
+#if defined(KVM_CAP_MCE)
+ if ((first_cpu->mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) {
+ uint64_t status;
+ void *vaddr;
+ ram_addr_t ram_addr;
+ target_phys_addr_t paddr;
+
+ /* Hope we are lucky for AO MCE */
+ vaddr = addr;
+ if (qemu_ram_addr_from_host(vaddr, &ram_addr) ||
+ !kvm_physical_memory_addr_from_ram(first_cpu->kvm_state, ram_addr, &paddr)) {
+ fprintf(stderr, "Hardware memory error for memory used by "
+ "QEMU itself instead of guest system!: %p\n", addr);
+ return 0;
+ }
+ status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN
+ | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S
+ | 0xc0;
+ kvm_inject_x86_mce(first_cpu, 9, status,
+ MCG_STATUS_MCIP | MCG_STATUS_RIPV, paddr,
+ (MCM_ADDR_PHYS << 6) | 0xc, 1);
+ kvm_mce_broadcast_rest(first_cpu);
+ } else
+#endif
+ {
+ if (code == BUS_MCEERR_AO) {
+ return 0;
+ } else if (code == BUS_MCEERR_AR) {
+ hardware_memory_error();
+ } else {
+ return 1;
+ }
+ }
+ return 0;
+}