]> Git Repo - linux.git/commitdiff
Merge branch 'kvm-updates/2.6.37' of git://git.kernel.org/pub/scm/virt/kvm/kvm
authorLinus Torvalds <[email protected]>
Sun, 24 Oct 2010 19:47:25 +0000 (12:47 -0700)
committerLinus Torvalds <[email protected]>
Sun, 24 Oct 2010 19:47:25 +0000 (12:47 -0700)
* 'kvm-updates/2.6.37' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (321 commits)
  KVM: Drop CONFIG_DMAR dependency around kvm_iommu_map_pages
  KVM: Fix signature of kvm_iommu_map_pages stub
  KVM: MCE: Send SRAR SIGBUS directly
  KVM: MCE: Add MCG_SER_P into KVM_MCE_CAP_SUPPORTED
  KVM: fix typo in copyright notice
  KVM: Disable interrupts around get_kernel_ns()
  KVM: MMU: Avoid sign extension in mmu_alloc_direct_roots() pae root address
  KVM: MMU: move access code parsing to FNAME(walk_addr) function
  KVM: MMU: audit: check whether have unsync sps after root sync
  KVM: MMU: audit: introduce audit_printk to cleanup audit code
  KVM: MMU: audit: unregister audit tracepoints before module unloaded
  KVM: MMU: audit: fix vcpu's spte walking
  KVM: MMU: set access bit for direct mapping
  KVM: MMU: cleanup for error mask set while walk guest page table
  KVM: MMU: update 'root_hpa' out of loop in PAE shadow path
  KVM: x86 emulator: Eliminate compilation warning in x86_decode_insn()
  KVM: x86: Fix constant type in kvm_get_time_scale
  KVM: VMX: Add AX to list of registers clobbered by guest switch
  KVM guest: Move a printk that's using the clock before it's ready
  KVM: x86: TSC catchup mode
  ...

1  2 
Documentation/kernel-parameters.txt
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kvm/book3s_paired_singles.c
arch/powerpc/kvm/emulate.c
arch/x86/kvm/lapic.c
arch/x86/kvm/x86.c
include/linux/kvm_host.h
virt/kvm/kvm_main.c

index 4cd8b86e00ead86676c9e563efc63f0946246e0e,8dc2548294826b2acd87b42e254564320e8cde9e..9533af74a12799312b721084945413cf651b072c
@@@ -43,11 -43,10 +43,11 @@@ parameter is applicable
        AVR32   AVR32 architecture is enabled.
        AX25    Appropriate AX.25 support is enabled.
        BLACKFIN Blackfin architecture is enabled.
 -      DRM     Direct Rendering Management support is enabled.
        EDD     BIOS Enhanced Disk Drive Services (EDD) is enabled
        EFI     EFI Partitioning (GPT) is enabled
        EIDE    EIDE/ATAPI support is enabled.
 +      DRM     Direct Rendering Management support is enabled.
 +      DYNAMIC_DEBUG Build in debug messages and enable them at runtime
        FB      The frame buffer device is enabled.
        GCOV    GCOV profiling is enabled.
        HW      Appropriate hardware is enabled.
@@@ -456,7 -455,7 +456,7 @@@ and is between 256 and 4096 characters
                        [ARM] imx_timer1,OSTS,netx_timer,mpu_timer2,
                                pxa_timer,timer3,32k_counter,timer0_1
                        [AVR32] avr32
 -                      [X86-32] pit,hpet,tsc,vmi-timer;
 +                      [X86-32] pit,hpet,tsc;
                                scx200_hrt on Geode; cyclone on IBM x440
                        [MIPS] MIPS
                        [PARISC] cr16
                        Format: <port#>,<type>
                        See also Documentation/input/joystick-parport.txt
  
 +      ddebug_query=   [KNL,DYNAMIC_DEBUG] Enable debug messages at early boot
 +                      time. See Documentation/dynamic-debug-howto.txt for
 +                      details.
 +
        debug           [KNL] Enable kernel debugging (events log level).
  
        debug_locks_verbose=
        kvm.oos_shadow= [KVM] Disable out-of-sync shadow paging.
                        Default is 1 (enabled)
  
-       kvm-amd.nested= [KVM,AMD] Allow nested virtualization in KVM/SVM.
+       kvm.mmu_audit=  [KVM] This is a R/W parameter which allows audit
+                       KVM MMU at runtime.
                        Default is 0 (off)
  
+       kvm-amd.nested= [KVM,AMD] Allow nested virtualization in KVM/SVM.
+                       Default is 1 (enabled)
        kvm-amd.npt=    [KVM,AMD] Disable nested paging (virtualized MMU)
                        for all guests.
                        Default is 1 (enabled) if in 64bit or 32bit-PAE mode
  
        nojitter        [IA64] Disables jitter checking for ITC timers.
  
+       no-kvmclock     [X86,KVM] Disable paravirtualized KVM clock driver
        nolapic         [X86-32,APIC] Do not enable or use the local APIC.
  
        nolapic_timer   [X86-32,APIC] Do not use the local APIC timer.
                        Reserves a hole at the top of the kernel virtual
                        address space.
  
 +      reservelow=     [X86]
 +                      Format: nn[K]
 +                      Set the amount of memory to reserve for BIOS at
 +                      the bottom of the address space.
 +
        reset_devices   [KNL] Force drivers to reset the underlying device
                        during initialization.
  
                        in <PAGE_SIZE> units (needed only for swap files).
                        See  Documentation/power/swsusp-and-swap-files.txt
  
 +      hibernate=      [HIBERNATION]
 +              noresume        Don't check if there's a hibernation image
 +                              present during boot.
 +              nocompress      Don't compress/decompress hibernation images.
 +
        retain_initrd   [RAM] Keep initrd memory after extraction
  
        rhash_entries=  [KNL,NET]
  
        switches=       [HW,M68k]
  
 +      sysfs.deprecated=0|1 [KNL]
 +                      Enable/disable old style sysfs layout for old udev
 +                      on older distributions. When this option is enabled
 +                      very new udev will not work anymore. When this option
 +                      is disabled (or CONFIG_SYSFS_DEPRECATED not compiled)
 +                      in older udev will not work anymore.
 +                      Default depends on CONFIG_SYSFS_DEPRECATED_V2 set in
 +                      the kernel configuration.
 +
        sysrq_always_enabled
                        [KNL]
                        Ignore sysrq setting - this boot parameter will
                        disables clocksource verification at runtime.
                        Used to enable high-resolution timer mode on older
                        hardware, and in virtualized environment.
 +                      [x86] noirqtime: Do not use TSC to do irq accounting.
 +                      Used to run time disable IRQ_TIME_ACCOUNTING on any
 +                      platforms where RDTSC is slow and this accounting
 +                      can add overhead.
  
        turbografx.map[2|3]=    [HW,JOY]
                        TurboGraFX parallel port interface
index 4ed076a4db2453001468eeb9bc662aa9cf7def73,be257b0aae36b8cc810dfe3b9511e4b4404956b9..36c30f31ec93ae481706f104e273ed96b85567e7
@@@ -55,9 -55,7 +55,9 @@@ obj-$(CONFIG_IBMVIO)          += vio.
  obj-$(CONFIG_IBMEBUS)           += ibmebus.o
  obj-$(CONFIG_GENERIC_TBSYNC)  += smp-tbsync.o
  obj-$(CONFIG_CRASH_DUMP)      += crash_dump.o
 +ifeq ($(CONFIG_PPC32),y)
  obj-$(CONFIG_E500)            += idle_e500.o
 +endif
  obj-$(CONFIG_6xx)             += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o
  obj-$(CONFIG_TAU)             += tau_6xx.o
  obj-$(CONFIG_HIBERNATION)     += swsusp.o suspend.o
@@@ -69,7 -67,7 +69,7 @@@ endi
  obj64-$(CONFIG_HIBERNATION)   += swsusp_asm64.o
  obj-$(CONFIG_MODULES)         += module.o module_$(CONFIG_WORD_SIZE).o
  obj-$(CONFIG_44x)             += cpu_setup_44x.o
 -obj-$(CONFIG_FSL_BOOKE)               += cpu_setup_fsl_booke.o dbell.o
 +obj-$(CONFIG_PPC_FSL_BOOK3E)  += cpu_setup_fsl_booke.o dbell.o
  obj-$(CONFIG_PPC_BOOK3E_64)   += dbell.o
  
  extra-y                               := head_$(CONFIG_WORD_SIZE).o
@@@ -129,6 -127,8 +129,8 @@@ ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC),
  obj-y                         += ppc_save_regs.o
  endif
  
+ obj-$(CONFIG_KVM_GUEST)               += kvm.o kvm_emul.o
  # Disable GCOV in odd or sensitive code
  GCOV_PROFILE_prom_init.o := n
  GCOV_PROFILE_ftrace.o := n
index c3e01945ad4f6f8493c37cc13b7d5298176f4a98,7f0d6fcc28a3a3b703fb60a55fd752c46bd1dbb8..bd0df2e6aa8f7370376abda55d34f6670179022e
  #ifdef CONFIG_PPC_ISERIES
  #include <asm/iseries/alpaca.h>
  #endif
- #ifdef CONFIG_KVM
+ #if defined(CONFIG_KVM) || defined(CONFIG_KVM_GUEST)
  #include <linux/kvm_host.h>
- #ifndef CONFIG_BOOKE
- #include <asm/kvm_book3s.h>
  #endif
+ #if defined(CONFIG_KVM) && defined(CONFIG_PPC_BOOK3S)
+ #include <asm/kvm_book3s.h>
  #endif
  
  #ifdef CONFIG_PPC32
@@@ -61,7 -61,7 +61,7 @@@
  #endif
  #endif
  
 -#if defined(CONFIG_FSL_BOOKE)
 +#if defined(CONFIG_PPC_FSL_BOOK3E)
  #include "../mm/mmu_decl.h"
  #endif
  
@@@ -181,19 -181,17 +181,19 @@@ int main(void
               offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid));
        DEFINE(SLBSHADOW_STACKESID,
               offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid));
 +      DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area));
        DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
        DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
        DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
        DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
 -      DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area));
 +      DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx));
 +      DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx));
  #endif /* CONFIG_PPC_STD_MMU_64 */
        DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
        DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
        DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state));
 -      DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr));
 -      DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr));
 +      DEFINE(PACA_STARTTIME, offsetof(struct paca_struct, starttime));
 +      DEFINE(PACA_STARTTIME_USER, offsetof(struct paca_struct, starttime_user));
        DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
        DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
        DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
        DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
        DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
        DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
-       DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr));
        DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
        DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
        DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
        DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
        DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
+       DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
+       DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
  
        /* book3s */
  #ifdef CONFIG_PPC_BOOK3S
        DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
  #endif /* CONFIG_PPC_BOOK3S */
  #endif
+ #ifdef CONFIG_KVM_GUEST
+       DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared,
+                                           scratch1));
+       DEFINE(KVM_MAGIC_SCRATCH2, offsetof(struct kvm_vcpu_arch_shared,
+                                           scratch2));
+       DEFINE(KVM_MAGIC_SCRATCH3, offsetof(struct kvm_vcpu_arch_shared,
+                                           scratch3));
+       DEFINE(KVM_MAGIC_INT, offsetof(struct kvm_vcpu_arch_shared,
+                                      int_pending));
+       DEFINE(KVM_MAGIC_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
+       DEFINE(KVM_MAGIC_CRITICAL, offsetof(struct kvm_vcpu_arch_shared,
+                                           critical));
+       DEFINE(KVM_MAGIC_SR, offsetof(struct kvm_vcpu_arch_shared, sr));
+ #endif
  #ifdef CONFIG_44x
        DEFINE(PGD_T_LOG2, PGD_T_LOG2);
        DEFINE(PTE_T_LOG2, PTE_T_LOG2);
  #endif
 -#ifdef CONFIG_FSL_BOOKE
 +#ifdef CONFIG_PPC_FSL_BOOK3E
        DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam));
        DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0));
        DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1));
index 39b0c48872d23b337f07c00bb0299e284a8f9ab5,1667a078b3e6c8f2ee89d955a7ed3444334f3eea..9f8b01d6466f90e61a9841daedc1e431e862eabf
@@@ -299,6 -299,12 +299,12 @@@ slb_miss_user_pseries
        b       .                               /* prevent spec. execution */
  #endif /* __DISABLED__ */
  
+ /* KVM's trampoline code needs to be close to the interrupt handlers */
+ #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+ #include "../kvm/book3s_rmhandlers.S"
+ #endif
        .align  7
        .globl  __end_interrupts
  __end_interrupts:
@@@ -818,12 -824,12 +824,12 @@@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISE
  
        /*
         * hash_page couldn't handle it, set soft interrupt enable back
 -       * to what it was before the trap.  Note that .raw_local_irq_restore
 +       * to what it was before the trap.  Note that .arch_local_irq_restore
         * handles any interrupts pending at this point.
         */
        ld      r3,SOFTE(r1)
        TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f)
 -      bl      .raw_local_irq_restore
 +      bl      .arch_local_irq_restore
        b       11f
  
  /* We have a data breakpoint exception - handle it */
index 35a701f3ece479e5eb76849147e27c9bff22ea39,807576f148ce8401eafdfce9737976a07103a7b3..7b0ee96c1bedec56a280e90ab47f7cceeba1d692
  
  static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt)
  {
 -      kvm_cvt_df(&vcpu->arch.fpr[rt], &vcpu->arch.qpr[rt], &vcpu->arch.fpscr);
 +      kvm_cvt_df(&vcpu->arch.fpr[rt], &vcpu->arch.qpr[rt]);
  }
  
  static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
  {
        u64 dsisr;
+       struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
  
-       vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 36, 0);
-       vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0);
-       vcpu->arch.dear = eaddr;
+       shared->msr = kvmppc_set_field(shared->msr, 33, 36, 0);
+       shared->msr = kvmppc_set_field(shared->msr, 42, 47, 0);
+       shared->dar = eaddr;
        /* Page Fault */
        dsisr = kvmppc_set_field(0, 33, 33, 1);
        if (is_store)
-               to_book3s(vcpu)->dsisr = kvmppc_set_field(dsisr, 38, 38, 1);
+               shared->dsisr = kvmppc_set_field(dsisr, 38, 38, 1);
        kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
  }
  
@@@ -204,7 -205,7 +205,7 @@@ static int kvmppc_emulate_fpr_load(stru
        /* put in registers */
        switch (ls_type) {
        case FPU_LS_SINGLE:
 -              kvm_cvt_fd((u32*)tmp, &vcpu->arch.fpr[rs], &vcpu->arch.fpscr);
 +              kvm_cvt_fd((u32*)tmp, &vcpu->arch.fpr[rs]);
                vcpu->arch.qpr[rs] = *((u32*)tmp);
                break;
        case FPU_LS_DOUBLE:
@@@ -230,7 -231,7 +231,7 @@@ static int kvmppc_emulate_fpr_store(str
  
        switch (ls_type) {
        case FPU_LS_SINGLE:
 -              kvm_cvt_df(&vcpu->arch.fpr[rs], (u32*)tmp, &vcpu->arch.fpscr);
 +              kvm_cvt_df(&vcpu->arch.fpr[rs], (u32*)tmp);
                val = *((u32*)tmp);
                len = sizeof(u32);
                break;
@@@ -296,7 -297,7 +297,7 @@@ static int kvmppc_emulate_psq_load(stru
        emulated = EMULATE_DONE;
  
        /* put in registers */
 -      kvm_cvt_fd(&tmp[0], &vcpu->arch.fpr[rs], &vcpu->arch.fpscr);
 +      kvm_cvt_fd(&tmp[0], &vcpu->arch.fpr[rs]);
        vcpu->arch.qpr[rs] = tmp[1];
  
        dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0],
@@@ -314,7 -315,7 +315,7 @@@ static int kvmppc_emulate_psq_store(str
        u32 tmp[2];
        int len = w ? sizeof(u32) : sizeof(u64);
  
 -      kvm_cvt_df(&vcpu->arch.fpr[rs], &tmp[0], &vcpu->arch.fpscr);
 +      kvm_cvt_df(&vcpu->arch.fpr[rs], &tmp[0]);
        tmp[1] = vcpu->arch.qpr[rs];
  
        r = kvmppc_st(vcpu, &addr, len, tmp, true);
@@@ -516,9 -517,9 +517,9 @@@ static int kvmppc_ps_three_in(struct kv
        WARN_ON(rc);
  
        /* PS0 */
 -      kvm_cvt_df(&fpr[reg_in1], &ps0_in1, &vcpu->arch.fpscr);
 -      kvm_cvt_df(&fpr[reg_in2], &ps0_in2, &vcpu->arch.fpscr);
 -      kvm_cvt_df(&fpr[reg_in3], &ps0_in3, &vcpu->arch.fpscr);
 +      kvm_cvt_df(&fpr[reg_in1], &ps0_in1);
 +      kvm_cvt_df(&fpr[reg_in2], &ps0_in2);
 +      kvm_cvt_df(&fpr[reg_in3], &ps0_in3);
  
        if (scalar & SCALAR_LOW)
                ps0_in2 = qpr[reg_in2];
                          ps0_in1, ps0_in2, ps0_in3, ps0_out);
  
        if (!(scalar & SCALAR_NO_PS0))
 -              kvm_cvt_fd(&ps0_out, &fpr[reg_out], &vcpu->arch.fpscr);
 +              kvm_cvt_fd(&ps0_out, &fpr[reg_out]);
  
        /* PS1 */
        ps1_in1 = qpr[reg_in1];
@@@ -566,12 -567,12 +567,12 @@@ static int kvmppc_ps_two_in(struct kvm_
        WARN_ON(rc);
  
        /* PS0 */
 -      kvm_cvt_df(&fpr[reg_in1], &ps0_in1, &vcpu->arch.fpscr);
 +      kvm_cvt_df(&fpr[reg_in1], &ps0_in1);
  
        if (scalar & SCALAR_LOW)
                ps0_in2 = qpr[reg_in2];
        else
 -              kvm_cvt_df(&fpr[reg_in2], &ps0_in2, &vcpu->arch.fpscr);
 +              kvm_cvt_df(&fpr[reg_in2], &ps0_in2);
  
        func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2);
  
                dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n",
                                  ps0_in1, ps0_in2, ps0_out);
  
 -              kvm_cvt_fd(&ps0_out, &fpr[reg_out], &vcpu->arch.fpscr);
 +              kvm_cvt_fd(&ps0_out, &fpr[reg_out]);
        }
  
        /* PS1 */
@@@ -615,13 -616,13 +616,13 @@@ static int kvmppc_ps_one_in(struct kvm_
        WARN_ON(rc);
  
        /* PS0 */
 -      kvm_cvt_df(&fpr[reg_in], &ps0_in, &vcpu->arch.fpscr);
 +      kvm_cvt_df(&fpr[reg_in], &ps0_in);
        func(&vcpu->arch.fpscr, &ps0_out, &ps0_in);
  
        dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n",
                          ps0_in, ps0_out);
  
 -      kvm_cvt_fd(&ps0_out, &fpr[reg_out], &vcpu->arch.fpscr);
 +      kvm_cvt_fd(&ps0_out, &fpr[reg_out]);
  
        /* PS1 */
        ps1_in = qpr[reg_in];
@@@ -658,7 -659,7 +659,7 @@@ int kvmppc_emulate_paired_single(struc
        if (!kvmppc_inst_is_paired_single(vcpu, inst))
                return EMULATE_FAIL;
  
-       if (!(vcpu->arch.msr & MSR_FP)) {
+       if (!(vcpu->arch.shared->msr & MSR_FP)) {
                kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL);
                return EMULATE_AGAIN;
        }
  #ifdef DEBUG
        for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
                u32 f;
 -              kvm_cvt_df(&vcpu->arch.fpr[i], &f, &vcpu->arch.fpscr);
 +              kvm_cvt_df(&vcpu->arch.fpr[i], &f);
                dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx    QPR[%d] = 0x%x\n",
                        i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]);
        }
                        vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra];
                        /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
                        kvm_cvt_df(&vcpu->arch.fpr[ax_rb],
 -                                 &vcpu->arch.qpr[ax_rd],
 -                                 &vcpu->arch.fpscr);
 +                                 &vcpu->arch.qpr[ax_rd]);
                        break;
                case OP_4X_PS_MERGE01:
                        WARN_ON(rcomp);
                        WARN_ON(rcomp);
                        /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
                        kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
 -                                 &vcpu->arch.fpr[ax_rd],
 -                                 &vcpu->arch.fpscr);
 +                                 &vcpu->arch.fpr[ax_rd]);
                        /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
                        kvm_cvt_df(&vcpu->arch.fpr[ax_rb],
 -                                 &vcpu->arch.qpr[ax_rd],
 -                                 &vcpu->arch.fpscr);
 +                                 &vcpu->arch.qpr[ax_rd]);
                        break;
                case OP_4X_PS_MERGE11:
                        WARN_ON(rcomp);
                        /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
                        kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
 -                                 &vcpu->arch.fpr[ax_rd],
 -                                 &vcpu->arch.fpscr);
 +                                 &vcpu->arch.fpr[ax_rd]);
                        vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
                        break;
                }
  #ifdef DEBUG
        for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
                u32 f;
 -              kvm_cvt_df(&vcpu->arch.fpr[i], &f, &vcpu->arch.fpscr);
 +              kvm_cvt_df(&vcpu->arch.fpr[i], &f);
                dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f);
        }
  #endif
index b83ba581fd8edb13e4d552ec8b6c9f5c6eb73076,454869b5e91e88cf6421554d9c8ad8160d484641..c64fd2909bb2e892536bd7776fc4b683d06d2c3b
@@@ -145,7 -145,7 +145,7 @@@ int kvmppc_emulate_instruction(struct k
        /* this default type might be overwritten by subcategories */
        kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
  
 -      pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
 +      pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
  
        switch (get_op(inst)) {
        case OP_TRAP:
  
                        switch (sprn) {
                        case SPRN_SRR0:
-                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr0); break;
+                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0);
+                               break;
                        case SPRN_SRR1:
-                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr1); break;
+                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr1);
+                               break;
                        case SPRN_PVR:
                                kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break;
                        case SPRN_PIR:
                                kvmppc_set_gpr(vcpu, rt, get_tb()); break;
  
                        case SPRN_SPRG0:
-                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg0); break;
+                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg0);
+                               break;
                        case SPRN_SPRG1:
-                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg1); break;
+                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg1);
+                               break;
                        case SPRN_SPRG2:
-                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg2); break;
+                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg2);
+                               break;
                        case SPRN_SPRG3:
-                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg3); break;
+                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg3);
+                               break;
                        /* Note: SPRG4-7 are user-readable, so we don't get
                         * a trap. */
  
                        {
                                u64 jd = get_tb() - vcpu->arch.dec_jiffies;
                                kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd);
 -                              pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n",
 +                              pr_debug("mfDEC: %x - %llx = %lx\n",
                                         vcpu->arch.dec, jd,
                                         kvmppc_get_gpr(vcpu, rt));
                                break;
                        rs = get_rs(inst);
                        switch (sprn) {
                        case SPRN_SRR0:
-                               vcpu->arch.srr0 = kvmppc_get_gpr(vcpu, rs); break;
+                               vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs);
+                               break;
                        case SPRN_SRR1:
-                               vcpu->arch.srr1 = kvmppc_get_gpr(vcpu, rs); break;
+                               vcpu->arch.shared->srr1 = kvmppc_get_gpr(vcpu, rs);
+                               break;
  
                        /* XXX We need to context-switch the timebase for
                         * watchdog and FIT. */
                                break;
  
                        case SPRN_SPRG0:
-                               vcpu->arch.sprg0 = kvmppc_get_gpr(vcpu, rs); break;
+                               vcpu->arch.shared->sprg0 = kvmppc_get_gpr(vcpu, rs);
+                               break;
                        case SPRN_SPRG1:
-                               vcpu->arch.sprg1 = kvmppc_get_gpr(vcpu, rs); break;
+                               vcpu->arch.shared->sprg1 = kvmppc_get_gpr(vcpu, rs);
+                               break;
                        case SPRN_SPRG2:
-                               vcpu->arch.sprg2 = kvmppc_get_gpr(vcpu, rs); break;
+                               vcpu->arch.shared->sprg2 = kvmppc_get_gpr(vcpu, rs);
+                               break;
                        case SPRN_SPRG3:
-                               vcpu->arch.sprg3 = kvmppc_get_gpr(vcpu, rs); break;
+                               vcpu->arch.shared->sprg3 = kvmppc_get_gpr(vcpu, rs);
+                               break;
  
                        default:
                                emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
diff --combined arch/x86/kvm/lapic.c
index 22b06f7660f4f44459ef62568659f745d2d8dc83,82118087d9e98cbcc2a4fc546b55ee4ce80b949a..413f8973a85575975a98eabf3b43d42063946e43
@@@ -5,7 -5,7 +5,7 @@@
   * Copyright (C) 2006 Qumranet, Inc.
   * Copyright (C) 2007 Novell
   * Copyright (C) 2007 Intel
-  * Copyright 2009 Red Hat, Inc. and/or its affilates.
+  * Copyright 2009 Red Hat, Inc. and/or its affiliates.
   *
   * Authors:
   *   Dor Laor <[email protected]>
@@@ -259,9 -259,10 +259,10 @@@ static inline int apic_find_highest_isr
  
  static void apic_update_ppr(struct kvm_lapic *apic)
  {
-       u32 tpr, isrv, ppr;
+       u32 tpr, isrv, ppr, old_ppr;
        int isr;
  
+       old_ppr = apic_get_reg(apic, APIC_PROCPRI);
        tpr = apic_get_reg(apic, APIC_TASKPRI);
        isr = apic_find_highest_isr(apic);
        isrv = (isr != -1) ? isr : 0;
        apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
                   apic, ppr, isr, isrv);
  
-       apic_set_reg(apic, APIC_PROCPRI, ppr);
+       if (old_ppr != ppr) {
+               apic_set_reg(apic, APIC_PROCPRI, ppr);
+               kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
+       }
  }
  
  static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
@@@ -391,6 -395,7 +395,7 @@@ static int __apic_accept_irq(struct kvm
                        break;
                }
  
+               kvm_make_request(KVM_REQ_EVENT, vcpu);
                kvm_vcpu_kick(vcpu);
                break;
  
                                       "INIT on a runnable vcpu %d\n",
                                       vcpu->vcpu_id);
                        vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
+                       kvm_make_request(KVM_REQ_EVENT, vcpu);
                        kvm_vcpu_kick(vcpu);
                } else {
                        apic_debug("Ignoring de-assert INIT to vcpu %d\n",
                        result = 1;
                        vcpu->arch.sipi_vector = vector;
                        vcpu->arch.mp_state = KVM_MP_STATE_SIPI_RECEIVED;
+                       kvm_make_request(KVM_REQ_EVENT, vcpu);
                        kvm_vcpu_kick(vcpu);
                }
                break;
@@@ -475,6 -482,7 +482,7 @@@ static void apic_set_eoi(struct kvm_lap
                trigger_mode = IOAPIC_EDGE_TRIG;
        if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI))
                kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode);
+       kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
  }
  
  static void apic_send_ipi(struct kvm_lapic *apic)
@@@ -1056,13 -1064,14 +1064,13 @@@ int kvm_create_lapic(struct kvm_vcpu *v
  
        vcpu->arch.apic = apic;
  
 -      apic->regs_page = alloc_page(GFP_KERNEL);
 +      apic->regs_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
        if (apic->regs_page == NULL) {
                printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
                       vcpu->vcpu_id);
                goto nomem_free_apic;
        }
        apic->regs = page_address(apic->regs_page);
 -      memset(apic->regs, 0, PAGE_SIZE);
        apic->vcpu = vcpu;
  
        hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
@@@ -1151,6 -1160,7 +1159,7 @@@ void kvm_apic_post_state_restore(struc
        update_divide_count(apic);
        start_apic_timer(apic);
        apic->irr_pending = true;
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
  }
  
  void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
diff --combined arch/x86/kvm/x86.c
index 6c2ecf0a806d67040335f7c0c57c0b57fed3384d,2e090784863a48281ab9d88258540cde6a6576cf..2288ad829b327bb68d0efa65c104dcc7f2bbe7b5
@@@ -6,7 -6,7 +6,7 @@@
   * Copyright (C) 2006 Qumranet, Inc.
   * Copyright (C) 2008 Qumranet, Inc.
   * Copyright IBM Corporation, 2008
-  * Copyright 2010 Red Hat, Inc. and/or its affilates.
+  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
   *
   * Authors:
   *   Avi Kivity   <[email protected]>
@@@ -55,6 -55,8 +55,8 @@@
  #include <asm/mce.h>
  #include <asm/i387.h>
  #include <asm/xcr.h>
+ #include <asm/pvclock.h>
+ #include <asm/div64.h>
  
  #define MAX_IO_MSRS 256
  #define CR0_RESERVED_BITS                                             \
@@@ -71,7 -73,7 +73,7 @@@
  #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
  
  #define KVM_MAX_MCE_BANKS 32
- #define KVM_MCE_CAP_SUPPORTED MCG_CTL_P
+ #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P)
  
  /* EFER defaults:
   * - enable syscall per default because its emulated by KVM
@@@ -282,6 -284,8 +284,8 @@@ static void kvm_multiple_exception(stru
        u32 prev_nr;
        int class1, class2;
  
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
        if (!vcpu->arch.exception.pending) {
        queue:
                vcpu->arch.exception.pending = true;
@@@ -327,16 -331,28 +331,28 @@@ void kvm_requeue_exception(struct kvm_v
  }
  EXPORT_SYMBOL_GPL(kvm_requeue_exception);
  
- void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
-                          u32 error_code)
+ void kvm_inject_page_fault(struct kvm_vcpu *vcpu)
  {
+       unsigned error_code = vcpu->arch.fault.error_code;
        ++vcpu->stat.pf_guest;
-       vcpu->arch.cr2 = addr;
+       vcpu->arch.cr2 = vcpu->arch.fault.address;
        kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
  }
  
+ void kvm_propagate_fault(struct kvm_vcpu *vcpu)
+ {
+       if (mmu_is_nested(vcpu) && !vcpu->arch.fault.nested)
+               vcpu->arch.nested_mmu.inject_page_fault(vcpu);
+       else
+               vcpu->arch.mmu.inject_page_fault(vcpu);
+       vcpu->arch.fault.nested = false;
+ }
  void kvm_inject_nmi(struct kvm_vcpu *vcpu)
  {
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
        vcpu->arch.nmi_pending = 1;
  }
  EXPORT_SYMBOL_GPL(kvm_inject_nmi);
@@@ -366,19 -382,50 +382,50 @@@ bool kvm_require_cpl(struct kvm_vcpu *v
  }
  EXPORT_SYMBOL_GPL(kvm_require_cpl);
  
+ /*
+  * This function will be used to read from the physical memory of the currently
+  * running guest. The difference to kvm_read_guest_page is that this function
+  * can read from guest physical or from the guest's guest physical memory.
+  */
+ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+                           gfn_t ngfn, void *data, int offset, int len,
+                           u32 access)
+ {
+       gfn_t real_gfn;
+       gpa_t ngpa;
+       ngpa     = gfn_to_gpa(ngfn);
+       real_gfn = mmu->translate_gpa(vcpu, ngpa, access);
+       if (real_gfn == UNMAPPED_GVA)
+               return -EFAULT;
+       real_gfn = gpa_to_gfn(real_gfn);
+       return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len);
+ }
+ EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
+ int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
+                              void *data, int offset, int len, u32 access)
+ {
+       return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
+                                      data, offset, len, access);
+ }
  /*
   * Load the pae pdptrs.  Return true is they are all valid.
   */
- int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
+ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
  {
        gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
        unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
        int i;
        int ret;
-       u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
+       u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
  
-       ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
-                                 offset * sizeof(u64), sizeof(pdpte));
+       ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
+                                     offset * sizeof(u64), sizeof(pdpte),
+                                     PFERR_USER_MASK|PFERR_WRITE_MASK);
        if (ret < 0) {
                ret = 0;
                goto out;
        }
        ret = 1;
  
-       memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
+       memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
        __set_bit(VCPU_EXREG_PDPTR,
                  (unsigned long *)&vcpu->arch.regs_avail);
        __set_bit(VCPU_EXREG_PDPTR,
@@@ -405,8 -452,10 +452,10 @@@ EXPORT_SYMBOL_GPL(load_pdptrs)
  
  static bool pdptrs_changed(struct kvm_vcpu *vcpu)
  {
-       u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
+       u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
        bool changed = true;
+       int offset;
+       gfn_t gfn;
        int r;
  
        if (is_long_mode(vcpu) || !is_pae(vcpu))
                      (unsigned long *)&vcpu->arch.regs_avail))
                return true;
  
-       r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
+       gfn = (vcpu->arch.cr3 & ~31u) >> PAGE_SHIFT;
+       offset = (vcpu->arch.cr3 & ~31u) & (PAGE_SIZE - 1);
+       r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
+                                      PFERR_USER_MASK | PFERR_WRITE_MASK);
        if (r < 0)
                goto out;
-       changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
+       changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
  out:
  
        return changed;
@@@ -458,7 -510,8 +510,8 @@@ int kvm_set_cr0(struct kvm_vcpu *vcpu, 
                                return 1;
                } else
  #endif
-               if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3))
+               if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
+                                                vcpu->arch.cr3))
                        return 1;
        }
  
@@@ -547,7 -600,7 +600,7 @@@ int kvm_set_cr4(struct kvm_vcpu *vcpu, 
                        return 1;
        } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
                   && ((cr4 ^ old_cr4) & pdptr_bits)
-                  && !load_pdptrs(vcpu, vcpu->arch.cr3))
+                  && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3))
                return 1;
  
        if (cr4 & X86_CR4_VMXE)
@@@ -580,7 -633,8 +633,8 @@@ int kvm_set_cr3(struct kvm_vcpu *vcpu, 
                if (is_pae(vcpu)) {
                        if (cr3 & CR3_PAE_RESERVED_BITS)
                                return 1;
-                       if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3))
+                       if (is_paging(vcpu) &&
+                           !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
                                return 1;
                }
                /*
@@@ -737,7 -791,7 +791,7 @@@ static u32 msrs_to_save[] = 
  #ifdef CONFIG_X86_64
        MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
  #endif
-       MSR_IA32_TSC, MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
+       MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
  };
  
  static unsigned num_msrs_to_save;
@@@ -838,7 -892,7 +892,7 @@@ static void kvm_write_wall_clock(struc
  
        /*
         * The guest calculates current wall clock time by adding
-        * system time (updated by kvm_write_guest_time below) to the
+        * system time (updated by kvm_guest_time_update below) to the
         * wall clock specified here.  guest system time equals host
         * system time for us, thus we must fill in host boot time here.
         */
@@@ -866,65 -920,229 +920,229 @@@ static uint32_t div_frac(uint32_t divid
        return quotient;
  }
  
- static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
+ static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz,
+                              s8 *pshift, u32 *pmultiplier)
  {
-       uint64_t nsecs = 1000000000LL;
+       uint64_t scaled64;
        int32_t  shift = 0;
        uint64_t tps64;
        uint32_t tps32;
  
-       tps64 = tsc_khz * 1000LL;
-       while (tps64 > nsecs*2) {
+       tps64 = base_khz * 1000LL;
+       scaled64 = scaled_khz * 1000LL;
+       while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
                tps64 >>= 1;
                shift--;
        }
  
        tps32 = (uint32_t)tps64;
-       while (tps32 <= (uint32_t)nsecs) {
-               tps32 <<= 1;
+       while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
+               if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
+                       scaled64 >>= 1;
+               else
+                       tps32 <<= 1;
                shift++;
        }
  
-       hv_clock->tsc_shift = shift;
-       hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
+       *pshift = shift;
+       *pmultiplier = div_frac(scaled64, tps32);
  
-       pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
-                __func__, tsc_khz, hv_clock->tsc_shift,
-                hv_clock->tsc_to_system_mul);
+       pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n",
+                __func__, base_khz, scaled_khz, shift, *pmultiplier);
+ }
+ static inline u64 get_kernel_ns(void)
+ {
+       struct timespec ts;
+       WARN_ON(preemptible());
+       ktime_get_ts(&ts);
+       monotonic_to_bootbased(&ts);
+       return timespec_to_ns(&ts);
  }
  
  static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
+ unsigned long max_tsc_khz;
  
- static void kvm_write_guest_time(struct kvm_vcpu *v)
+ static inline int kvm_tsc_changes_freq(void)
+ {
+       int cpu = get_cpu();
+       int ret = !boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
+                 cpufreq_quick_get(cpu) != 0;
+       put_cpu();
+       return ret;
+ }
+ static inline u64 nsec_to_cycles(u64 nsec)
+ {
+       u64 ret;
+       WARN_ON(preemptible());
+       if (kvm_tsc_changes_freq())
+               printk_once(KERN_WARNING
+                "kvm: unreliable cycle conversion on adjustable rate TSC\n");
+       ret = nsec * __get_cpu_var(cpu_tsc_khz);
+       do_div(ret, USEC_PER_SEC);
+       return ret;
+ }
+ static void kvm_arch_set_tsc_khz(struct kvm *kvm, u32 this_tsc_khz)
+ {
+       /* Compute a scale to convert nanoseconds in TSC cycles */
+       kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
+                          &kvm->arch.virtual_tsc_shift,
+                          &kvm->arch.virtual_tsc_mult);
+       kvm->arch.virtual_tsc_khz = this_tsc_khz;
+ }
+ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
+ {
+       u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.last_tsc_nsec,
+                                     vcpu->kvm->arch.virtual_tsc_mult,
+                                     vcpu->kvm->arch.virtual_tsc_shift);
+       tsc += vcpu->arch.last_tsc_write;
+       return tsc;
+ }
+ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
+ {
+       struct kvm *kvm = vcpu->kvm;
+       u64 offset, ns, elapsed;
+       unsigned long flags;
+       s64 sdiff;
+       spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
+       offset = data - native_read_tsc();
+       ns = get_kernel_ns();
+       elapsed = ns - kvm->arch.last_tsc_nsec;
+       sdiff = data - kvm->arch.last_tsc_write;
+       if (sdiff < 0)
+               sdiff = -sdiff;
+       /*
+        * Special case: close write to TSC within 5 seconds of
+        * another CPU is interpreted as an attempt to synchronize
+        * The 5 seconds is to accomodate host load / swapping as
+        * well as any reset of TSC during the boot process.
+        *
+        * In that case, for a reliable TSC, we can match TSC offsets,
+        * or make a best guest using elapsed value.
+        */
+       if (sdiff < nsec_to_cycles(5ULL * NSEC_PER_SEC) &&
+           elapsed < 5ULL * NSEC_PER_SEC) {
+               if (!check_tsc_unstable()) {
+                       offset = kvm->arch.last_tsc_offset;
+                       pr_debug("kvm: matched tsc offset for %llu\n", data);
+               } else {
+                       u64 delta = nsec_to_cycles(elapsed);
+                       offset += delta;
+                       pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
+               }
+               ns = kvm->arch.last_tsc_nsec;
+       }
+       kvm->arch.last_tsc_nsec = ns;
+       kvm->arch.last_tsc_write = data;
+       kvm->arch.last_tsc_offset = offset;
+       kvm_x86_ops->write_tsc_offset(vcpu, offset);
+       spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
+       /* Reset of TSC must disable overshoot protection below */
+       vcpu->arch.hv_clock.tsc_timestamp = 0;
+       vcpu->arch.last_tsc_write = data;
+       vcpu->arch.last_tsc_nsec = ns;
+ }
+ EXPORT_SYMBOL_GPL(kvm_write_tsc);
+ static int kvm_guest_time_update(struct kvm_vcpu *v)
  {
-       struct timespec ts;
        unsigned long flags;
        struct kvm_vcpu_arch *vcpu = &v->arch;
        void *shared_kaddr;
        unsigned long this_tsc_khz;
+       s64 kernel_ns, max_kernel_ns;
+       u64 tsc_timestamp;
  
-       if ((!vcpu->time_page))
-               return;
+       /* Keep irq disabled to prevent changes to the clock */
+       local_irq_save(flags);
+       kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp);
+       kernel_ns = get_kernel_ns();
+       this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
  
-       this_tsc_khz = get_cpu_var(cpu_tsc_khz);
-       if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
-               kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
-               vcpu->hv_clock_tsc_khz = this_tsc_khz;
+       if (unlikely(this_tsc_khz == 0)) {
+               local_irq_restore(flags);
+               kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
+               return 1;
+       }
+       /*
+        * We may have to catch up the TSC to match elapsed wall clock
+        * time for two reasons, even if kvmclock is used.
+        *   1) CPU could have been running below the maximum TSC rate
+        *   2) Broken TSC compensation resets the base at each VCPU
+        *      entry to avoid unknown leaps of TSC even when running
+        *      again on the same CPU.  This may cause apparent elapsed
+        *      time to disappear, and the guest to stand still or run
+        *      very slowly.
+        */
+       if (vcpu->tsc_catchup) {
+               u64 tsc = compute_guest_tsc(v, kernel_ns);
+               if (tsc > tsc_timestamp) {
+                       kvm_x86_ops->adjust_tsc_offset(v, tsc - tsc_timestamp);
+                       tsc_timestamp = tsc;
+               }
        }
-       put_cpu_var(cpu_tsc_khz);
  
-       /* Keep irq disabled to prevent changes to the clock */
-       local_irq_save(flags);
-       kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
-       ktime_get_ts(&ts);
-       monotonic_to_bootbased(&ts);
        local_irq_restore(flags);
  
-       /* With all the info we got, fill in the values */
+       if (!vcpu->time_page)
+               return 0;
  
-       vcpu->hv_clock.system_time = ts.tv_nsec +
-                                    (NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset;
+       /*
+        * Time as measured by the TSC may go backwards when resetting the base
+        * tsc_timestamp.  The reason for this is that the TSC resolution is
+        * higher than the resolution of the other clock scales.  Thus, many
+        * possible measurments of the TSC correspond to one measurement of any
+        * other clock, and so a spread of values is possible.  This is not a
+        * problem for the computation of the nanosecond clock; with TSC rates
+        * around 1GHZ, there can only be a few cycles which correspond to one
+        * nanosecond value, and any path through this code will inevitably
+        * take longer than that.  However, with the kernel_ns value itself,
+        * the precision may be much lower, down to HZ granularity.  If the
+        * first sampling of TSC against kernel_ns ends in the low part of the
+        * range, and the second in the high end of the range, we can get:
+        *
+        * (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new
+        *
+        * As the sampling errors potentially range in the thousands of cycles,
+        * it is possible such a time value has already been observed by the
+        * guest.  To protect against this, we must compute the system time as
+        * observed by the guest and ensure the new system time is greater.
+        */
+       max_kernel_ns = 0;
+       if (vcpu->hv_clock.tsc_timestamp && vcpu->last_guest_tsc) {
+               max_kernel_ns = vcpu->last_guest_tsc -
+                               vcpu->hv_clock.tsc_timestamp;
+               max_kernel_ns = pvclock_scale_delta(max_kernel_ns,
+                                   vcpu->hv_clock.tsc_to_system_mul,
+                                   vcpu->hv_clock.tsc_shift);
+               max_kernel_ns += vcpu->last_kernel_ns;
+       }
  
+       if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
+               kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
+                                  &vcpu->hv_clock.tsc_shift,
+                                  &vcpu->hv_clock.tsc_to_system_mul);
+               vcpu->hw_tsc_khz = this_tsc_khz;
+       }
+       if (max_kernel_ns > kernel_ns)
+               kernel_ns = max_kernel_ns;
+       /* With all the info we got, fill in the values */
+       vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
+       vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
+       vcpu->last_kernel_ns = kernel_ns;
+       vcpu->last_guest_tsc = tsc_timestamp;
        vcpu->hv_clock.flags = 0;
  
        /*
        kunmap_atomic(shared_kaddr, KM_USER0);
  
        mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
- }
- static int kvm_request_guest_time_update(struct kvm_vcpu *v)
- {
-       struct kvm_vcpu_arch *vcpu = &v->arch;
-       if (!vcpu->time_page)
-               return 0;
-       kvm_make_request(KVM_REQ_KVMCLOCK_UPDATE, v);
-       return 1;
+       return 0;
  }
  
  static bool msr_mtrr_valid(unsigned msr)
@@@ -1277,6 -1486,7 +1486,7 @@@ int kvm_set_msr_common(struct kvm_vcpu 
                }
  
                vcpu->arch.time = data;
+               kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
  
                /* we verify if the enable bit is set... */
                if (!(data & 1))
                        kvm_release_page_clean(vcpu->arch.time_page);
                        vcpu->arch.time_page = NULL;
                }
-               kvm_request_guest_time_update(vcpu);
                break;
        }
        case MSR_IA32_MCG_CTL:
                pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
                        "0x%x data 0x%llx\n", msr, data);
                break;
+       case MSR_K7_CLK_CTL:
+               /*
+                * Ignore all writes to this no longer documented MSR.
+                * Writes are only relevant for old K7 processors,
+                * all pre-dating SVM, but a recommended workaround from
+                * AMD for these chips. It is possible to speicify the
+                * affected processor models on the command line, hence
+                * the need to ignore the workaround.
+                */
+               break;
        case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
                if (kvm_hv_msr_partition_wide(msr)) {
                        int r;
@@@ -1522,6 -1740,20 +1740,20 @@@ int kvm_get_msr_common(struct kvm_vcpu 
        case 0xcd: /* fsb frequency */
                data = 3;
                break;
+               /*
+                * MSR_EBC_FREQUENCY_ID
+                * Conservative value valid for even the basic CPU models.
+                * Models 0,1: 000 in bits 23:21 indicating a bus speed of
+                * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
+                * and 266MHz for model 3, or 4. Set Core Clock
+                * Frequency to System Bus Frequency Ratio to 1 (bits
+                * 31:24) even though these are only valid for CPU
+                * models > 2, however guests may end up dividing or
+                * multiplying by zero otherwise.
+                */
+       case MSR_EBC_FREQUENCY_ID:
+               data = 1 << 24;
+               break;
        case MSR_IA32_APICBASE:
                data = kvm_get_apic_base(vcpu);
                break;
        case MSR_IA32_MCG_STATUS:
        case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
                return get_msr_mce(vcpu, msr, pdata);
+       case MSR_K7_CLK_CTL:
+               /*
+                * Provide expected ramp-up count for K7. All other
+                * are set to zero, indicating minimum divisors for
+                * every field.
+                *
+                * This prevents guest kernels on AMD host with CPU
+                * type 6, model 8 and higher from exploding due to
+                * the rdmsr failing.
+                */
+               data = 0x20000000;
+               break;
        case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
                if (kvm_hv_msr_partition_wide(msr)) {
                        int r;
@@@ -1808,19 -2052,28 +2052,28 @@@ void kvm_arch_vcpu_load(struct kvm_vcp
        }
  
        kvm_x86_ops->vcpu_load(vcpu, cpu);
-       if (unlikely(per_cpu(cpu_tsc_khz, cpu) == 0)) {
-               unsigned long khz = cpufreq_quick_get(cpu);
-               if (!khz)
-                       khz = tsc_khz;
-               per_cpu(cpu_tsc_khz, cpu) = khz;
+       if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
+               /* Make sure TSC doesn't go backwards */
+               s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
+                               native_read_tsc() - vcpu->arch.last_host_tsc;
+               if (tsc_delta < 0)
+                       mark_tsc_unstable("KVM discovered backwards TSC");
+               if (check_tsc_unstable()) {
+                       kvm_x86_ops->adjust_tsc_offset(vcpu, -tsc_delta);
+                       vcpu->arch.tsc_catchup = 1;
+                       kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
+               }
+               if (vcpu->cpu != cpu)
+                       kvm_migrate_timers(vcpu);
+               vcpu->cpu = cpu;
        }
-       kvm_request_guest_time_update(vcpu);
  }
  
  void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
  {
        kvm_x86_ops->vcpu_put(vcpu);
        kvm_put_guest_fpu(vcpu);
+       vcpu->arch.last_host_tsc = native_read_tsc();
  }
  
  static int is_efer_nx(void)
@@@ -1991,14 -2244,13 +2244,14 @@@ static void do_cpuid_ent(struct kvm_cpu
                0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
                0 /* Reserved, DCA */ | F(XMM4_1) |
                F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
 -              0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX);
 +              0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
 +              F(F16C);
        /* cpuid 0x80000001.ecx */
        const u32 kvm_supported_word6_x86_features =
-               F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
+               F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
                F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
 -              F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
 -              0 /* SKINIT */ | 0 /* WDT */;
 +              F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
 +              0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
  
        /* all calls to cpuid_count() should be made on the same cpu */
        get_cpu();
@@@ -2204,6 -2456,7 +2457,7 @@@ static int kvm_vcpu_ioctl_interrupt(str
                return -ENXIO;
  
        kvm_queue_interrupt(vcpu, irq->irq, false);
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
  
        return 0;
  }
@@@ -2357,6 -2610,8 +2611,8 @@@ static int kvm_vcpu_ioctl_x86_set_vcpu_
        if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
                vcpu->arch.sipi_vector = events->sipi_vector;
  
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
        return 0;
  }
  
@@@ -2760,7 -3015,7 +3016,7 @@@ static int kvm_vm_ioctl_set_nr_mmu_page
  
  static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
  {
-       return kvm->arch.n_alloc_mmu_pages;
+       return kvm->arch.n_max_mmu_pages;
  }
  
  static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
@@@ -2796,18 -3051,18 +3052,18 @@@ static int kvm_vm_ioctl_set_irqchip(str
        r = 0;
        switch (chip->chip_id) {
        case KVM_IRQCHIP_PIC_MASTER:
-               raw_spin_lock(&pic_irqchip(kvm)->lock);
+               spin_lock(&pic_irqchip(kvm)->lock);
                memcpy(&pic_irqchip(kvm)->pics[0],
                        &chip->chip.pic,
                        sizeof(struct kvm_pic_state));
-               raw_spin_unlock(&pic_irqchip(kvm)->lock);
+               spin_unlock(&pic_irqchip(kvm)->lock);
                break;
        case KVM_IRQCHIP_PIC_SLAVE:
-               raw_spin_lock(&pic_irqchip(kvm)->lock);
+               spin_lock(&pic_irqchip(kvm)->lock);
                memcpy(&pic_irqchip(kvm)->pics[1],
                        &chip->chip.pic,
                        sizeof(struct kvm_pic_state));
-               raw_spin_unlock(&pic_irqchip(kvm)->lock);
+               spin_unlock(&pic_irqchip(kvm)->lock);
                break;
        case KVM_IRQCHIP_IOAPIC:
                r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
@@@ -3201,7 -3456,6 +3457,6 @@@ long kvm_arch_vm_ioctl(struct file *fil
                break;
        }
        case KVM_SET_CLOCK: {
-               struct timespec now;
                struct kvm_clock_data user_ns;
                u64 now_ns;
                s64 delta;
                        goto out;
  
                r = 0;
-               ktime_get_ts(&now);
-               now_ns = timespec_to_ns(&now);
+               local_irq_disable();
+               now_ns = get_kernel_ns();
                delta = user_ns.clock - now_ns;
+               local_irq_enable();
                kvm->arch.kvmclock_offset = delta;
                break;
        }
        case KVM_GET_CLOCK: {
-               struct timespec now;
                struct kvm_clock_data user_ns;
                u64 now_ns;
  
-               ktime_get_ts(&now);
-               now_ns = timespec_to_ns(&now);
+               local_irq_disable();
+               now_ns = get_kernel_ns();
                user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
+               local_irq_enable();
                user_ns.flags = 0;
  
                r = -EFAULT;
@@@ -3292,30 -3547,51 +3548,51 @@@ void kvm_get_segment(struct kvm_vcpu *v
        kvm_x86_ops->get_segment(vcpu, var, seg);
  }
  
+ static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
+ {
+       return gpa;
+ }
+ static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
+ {
+       gpa_t t_gpa;
+       u32 error;
+       BUG_ON(!mmu_is_nested(vcpu));
+       /* NPT walks are always user-walks */
+       access |= PFERR_USER_MASK;
+       t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &error);
+       if (t_gpa == UNMAPPED_GVA)
+               vcpu->arch.fault.nested = true;
+       return t_gpa;
+ }
  gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
  {
        u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
-       return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
+       return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
  }
  
   gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
  {
        u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
        access |= PFERR_FETCH_MASK;
-       return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
+       return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
  }
  
  gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
  {
        u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
        access |= PFERR_WRITE_MASK;
-       return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
+       return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
  }
  
  /* uses this to access any guest's mapped memory without checking CPL */
  gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
  {
-       return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, 0, error);
+       return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, error);
  }
  
  static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
        int r = X86EMUL_CONTINUE;
  
        while (bytes) {
-               gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, access, error);
+               gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
+                                                           error);
                unsigned offset = addr & (PAGE_SIZE-1);
                unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
                int ret;
@@@ -3381,8 -3658,9 +3659,9 @@@ static int kvm_write_guest_virt_system(
        int r = X86EMUL_CONTINUE;
  
        while (bytes) {
-               gpa_t gpa =  vcpu->arch.mmu.gva_to_gpa(vcpu, addr,
-                                                      PFERR_WRITE_MASK, error);
+               gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
+                                                            PFERR_WRITE_MASK,
+                                                            error);
                unsigned offset = addr & (PAGE_SIZE-1);
                unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
                int ret;
@@@ -3624,7 -3902,7 +3903,7 @@@ static int emulator_pio_in_emulated(in
        if (vcpu->arch.pio.count)
                goto data_avail;
  
-       trace_kvm_pio(1, port, size, 1);
+       trace_kvm_pio(0, port, size, 1);
  
        vcpu->arch.pio.port = port;
        vcpu->arch.pio.in = 1;
@@@ -3652,7 -3930,7 +3931,7 @@@ static int emulator_pio_out_emulated(in
                              const void *val, unsigned int count,
                              struct kvm_vcpu *vcpu)
  {
-       trace_kvm_pio(0, port, size, 1);
+       trace_kvm_pio(1, port, size, 1);
  
        vcpu->arch.pio.port = port;
        vcpu->arch.pio.in = 0;
@@@ -3791,6 -4069,11 +4070,11 @@@ static void emulator_get_gdt(struct des
        kvm_x86_ops->get_gdt(vcpu, dt);
  }
  
+ static void emulator_get_idt(struct desc_ptr *dt, struct kvm_vcpu *vcpu)
+ {
+       kvm_x86_ops->get_idt(vcpu, dt);
+ }
  static unsigned long emulator_get_cached_segment_base(int seg,
                                                      struct kvm_vcpu *vcpu)
  {
@@@ -3884,6 -4167,7 +4168,7 @@@ static struct x86_emulate_ops emulate_o
        .set_segment_selector = emulator_set_segment_selector,
        .get_cached_segment_base = emulator_get_cached_segment_base,
        .get_gdt             = emulator_get_gdt,
+       .get_idt             = emulator_get_idt,
        .get_cr              = emulator_get_cr,
        .set_cr              = emulator_set_cr,
        .cpl                 = emulator_get_cpl,
@@@ -3919,13 -4203,64 +4204,64 @@@ static void inject_emulated_exception(s
  {
        struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
        if (ctxt->exception == PF_VECTOR)
-               kvm_inject_page_fault(vcpu, ctxt->cr2, ctxt->error_code);
+               kvm_propagate_fault(vcpu);
        else if (ctxt->error_code_valid)
                kvm_queue_exception_e(vcpu, ctxt->exception, ctxt->error_code);
        else
                kvm_queue_exception(vcpu, ctxt->exception);
  }
  
+ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
+ {
+       struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
+       int cs_db, cs_l;
+       cache_all_regs(vcpu);
+       kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
+       vcpu->arch.emulate_ctxt.vcpu = vcpu;
+       vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
+       vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
+       vcpu->arch.emulate_ctxt.mode =
+               (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
+               (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
+               ? X86EMUL_MODE_VM86 : cs_l
+               ? X86EMUL_MODE_PROT64 : cs_db
+               ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
+       memset(c, 0, sizeof(struct decode_cache));
+       memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
+ }
+ int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq)
+ {
+       struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
+       int ret;
+       init_emulate_ctxt(vcpu);
+       vcpu->arch.emulate_ctxt.decode.op_bytes = 2;
+       vcpu->arch.emulate_ctxt.decode.ad_bytes = 2;
+       vcpu->arch.emulate_ctxt.decode.eip = vcpu->arch.emulate_ctxt.eip;
+       ret = emulate_int_real(&vcpu->arch.emulate_ctxt, &emulate_ops, irq);
+       if (ret != X86EMUL_CONTINUE)
+               return EMULATE_FAIL;
+       vcpu->arch.emulate_ctxt.eip = c->eip;
+       memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
+       kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
+       kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
+       if (irq == NMI_VECTOR)
+               vcpu->arch.nmi_pending = false;
+       else
+               vcpu->arch.interrupt.pending = false;
+       return EMULATE_DONE;
+ }
+ EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
  static int handle_emulation_failure(struct kvm_vcpu *vcpu)
  {
        ++vcpu->stat.insn_emulation_fail;
@@@ -3982,24 -4317,15 +4318,15 @@@ int emulate_instruction(struct kvm_vcp
        cache_all_regs(vcpu);
  
        if (!(emulation_type & EMULTYPE_NO_DECODE)) {
-               int cs_db, cs_l;
-               kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
-               vcpu->arch.emulate_ctxt.vcpu = vcpu;
-               vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
-               vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
-               vcpu->arch.emulate_ctxt.mode =
-                       (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
-                       (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
-                       ? X86EMUL_MODE_VM86 : cs_l
-                       ? X86EMUL_MODE_PROT64 : cs_db
-                       ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
-               memset(c, 0, sizeof(struct decode_cache));
-               memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
+               init_emulate_ctxt(vcpu);
                vcpu->arch.emulate_ctxt.interruptibility = 0;
                vcpu->arch.emulate_ctxt.exception = -1;
+               vcpu->arch.emulate_ctxt.perm_ok = false;
+               r = x86_decode_insn(&vcpu->arch.emulate_ctxt);
+               if (r == X86EMUL_PROPAGATE_FAULT)
+                       goto done;
  
-               r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
                trace_kvm_emulate_insn_start(vcpu);
  
                /* Only allow emulation of specific instructions on #UD
        memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
  
  restart:
-       r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
+       r = x86_emulate_insn(&vcpu->arch.emulate_ctxt);
  
-       if (r) { /* emulation failed */
+       if (r == EMULATION_FAILED) {
                if (reexecute_instruction(vcpu, cr2))
                        return EMULATE_DONE;
  
                return handle_emulation_failure(vcpu);
        }
  
-       toggle_interruptibility(vcpu, vcpu->arch.emulate_ctxt.interruptibility);
-       kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
-       memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
-       kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
+ done:
        if (vcpu->arch.emulate_ctxt.exception >= 0) {
                inject_emulated_exception(vcpu);
-               return EMULATE_DONE;
-       }
-       if (vcpu->arch.pio.count) {
+               r = EMULATE_DONE;
+       } else if (vcpu->arch.pio.count) {
                if (!vcpu->arch.pio.in)
                        vcpu->arch.pio.count = 0;
-               return EMULATE_DO_MMIO;
-       }
-       if (vcpu->mmio_needed) {
+               r = EMULATE_DO_MMIO;
+       } else if (vcpu->mmio_needed) {
                if (vcpu->mmio_is_write)
                        vcpu->mmio_needed = 0;
-               return EMULATE_DO_MMIO;
-       }
-       if (vcpu->arch.emulate_ctxt.restart)
+               r = EMULATE_DO_MMIO;
+       } else if (r == EMULATION_RESTART)
                goto restart;
+       else
+               r = EMULATE_DONE;
  
-       return EMULATE_DONE;
+       toggle_interruptibility(vcpu, vcpu->arch.emulate_ctxt.interruptibility);
+       kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
+       memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
+       kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
+       return r;
  }
  EXPORT_SYMBOL_GPL(emulate_instruction);
  
@@@ -4097,9 -4421,23 +4422,23 @@@ int kvm_fast_pio_out(struct kvm_vcpu *v
  }
  EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
  
- static void bounce_off(void *info)
+ static void tsc_bad(void *info)
+ {
+       __get_cpu_var(cpu_tsc_khz) = 0;
+ }
+ static void tsc_khz_changed(void *data)
  {
-       /* nothing */
+       struct cpufreq_freqs *freq = data;
+       unsigned long khz = 0;
+       if (data)
+               khz = freq->new;
+       else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
+               khz = cpufreq_quick_get(raw_smp_processor_id());
+       if (!khz)
+               khz = tsc_khz;
+       __get_cpu_var(cpu_tsc_khz) = khz;
  }
  
  static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
        struct kvm_vcpu *vcpu;
        int i, send_ipi = 0;
  
+       /*
+        * We allow guests to temporarily run on slowing clocks,
+        * provided we notify them after, or to run on accelerating
+        * clocks, provided we notify them before.  Thus time never
+        * goes backwards.
+        *
+        * However, we have a problem.  We can't atomically update
+        * the frequency of a given CPU from this function; it is
+        * merely a notifier, which can be called from any CPU.
+        * Changing the TSC frequency at arbitrary points in time
+        * requires a recomputation of local variables related to
+        * the TSC for each VCPU.  We must flag these local variables
+        * to be updated and be sure the update takes place with the
+        * new frequency before any guests proceed.
+        *
+        * Unfortunately, the combination of hotplug CPU and frequency
+        * change creates an intractable locking scenario; the order
+        * of when these callouts happen is undefined with respect to
+        * CPU hotplug, and they can race with each other.  As such,
+        * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
+        * undefined; you can actually have a CPU frequency change take
+        * place in between the computation of X and the setting of the
+        * variable.  To protect against this problem, all updates of
+        * the per_cpu tsc_khz variable are done in an interrupt
+        * protected IPI, and all callers wishing to update the value
+        * must wait for a synchronous IPI to complete (which is trivial
+        * if the caller is on the CPU already).  This establishes the
+        * necessary total order on variable updates.
+        *
+        * Note that because a guest time update may take place
+        * anytime after the setting of the VCPU's request bit, the
+        * correct TSC value must be set before the request.  However,
+        * to ensure the update actually makes it to any guest which
+        * starts running in hardware virtualization between the set
+        * and the acquisition of the spinlock, we must also ping the
+        * CPU after setting the request bit.
+        *
+        */
        if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
                return 0;
        if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
                return 0;
-       per_cpu(cpu_tsc_khz, freq->cpu) = freq->new;
+       smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
  
        spin_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list) {
                kvm_for_each_vcpu(i, vcpu, kvm) {
                        if (vcpu->cpu != freq->cpu)
                                continue;
-                       if (!kvm_request_guest_time_update(vcpu))
-                               continue;
+                       kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
                        if (vcpu->cpu != smp_processor_id())
-                               send_ipi++;
+                               send_ipi = 1;
                }
        }
        spin_unlock(&kvm_lock);
                 * guest context is entered kvmclock will be updated,
                 * so the guest will not see stale values.
                 */
-               smp_call_function_single(freq->cpu, bounce_off, NULL, 1);
+               smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
        }
        return 0;
  }
  
  static struct notifier_block kvmclock_cpufreq_notifier_block = {
-         .notifier_call  = kvmclock_cpufreq_notifier
+       .notifier_call  = kvmclock_cpufreq_notifier
+ };
+ static int kvmclock_cpu_notifier(struct notifier_block *nfb,
+                                       unsigned long action, void *hcpu)
+ {
+       unsigned int cpu = (unsigned long)hcpu;
+       switch (action) {
+               case CPU_ONLINE:
+               case CPU_DOWN_FAILED:
+                       smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
+                       break;
+               case CPU_DOWN_PREPARE:
+                       smp_call_function_single(cpu, tsc_bad, NULL, 1);
+                       break;
+       }
+       return NOTIFY_OK;
+ }
+ static struct notifier_block kvmclock_cpu_notifier_block = {
+       .notifier_call  = kvmclock_cpu_notifier,
+       .priority = -INT_MAX
  };
  
  static void kvm_timer_init(void)
  {
        int cpu;
  
+       max_tsc_khz = tsc_khz;
+       register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
        if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
+ #ifdef CONFIG_CPU_FREQ
+               struct cpufreq_policy policy;
+               memset(&policy, 0, sizeof(policy));
+               cpufreq_get_policy(&policy, get_cpu());
+               if (policy.cpuinfo.max_freq)
+                       max_tsc_khz = policy.cpuinfo.max_freq;
+ #endif
                cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
                                          CPUFREQ_TRANSITION_NOTIFIER);
-               for_each_online_cpu(cpu) {
-                       unsigned long khz = cpufreq_get(cpu);
-                       if (!khz)
-                               khz = tsc_khz;
-                       per_cpu(cpu_tsc_khz, cpu) = khz;
-               }
-       } else {
-               for_each_possible_cpu(cpu)
-                       per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
        }
+       pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
+       for_each_online_cpu(cpu)
+               smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
  }
  
  static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
@@@ -4269,6 -4671,7 +4672,7 @@@ void kvm_arch_exit(void
        if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
                cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
                                            CPUFREQ_TRANSITION_NOTIFIER);
+       unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block);
        kvm_x86_ops = NULL;
        kvm_mmu_module_exit();
  }
@@@ -4684,8 -5087,11 +5088,11 @@@ static int vcpu_enter_guest(struct kvm_
                        kvm_mmu_unload(vcpu);
                if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
                        __kvm_migrate_timers(vcpu);
-               if (kvm_check_request(KVM_REQ_KVMCLOCK_UPDATE, vcpu))
-                       kvm_write_guest_time(vcpu);
+               if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
+                       r = kvm_guest_time_update(vcpu);
+                       if (unlikely(r))
+                               goto out;
+               }
                if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
                        kvm_mmu_sync_roots(vcpu);
                if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
        if (unlikely(r))
                goto out;
  
+       if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
+               inject_pending_event(vcpu);
+               /* enable NMI/IRQ window open exits if needed */
+               if (vcpu->arch.nmi_pending)
+                       kvm_x86_ops->enable_nmi_window(vcpu);
+               else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
+                       kvm_x86_ops->enable_irq_window(vcpu);
+               if (kvm_lapic_enabled(vcpu)) {
+                       update_cr8_intercept(vcpu);
+                       kvm_lapic_sync_to_vapic(vcpu);
+               }
+       }
        preempt_disable();
  
        kvm_x86_ops->prepare_guest_switch(vcpu);
                smp_wmb();
                local_irq_enable();
                preempt_enable();
+               kvm_x86_ops->cancel_injection(vcpu);
                r = 1;
                goto out;
        }
  
-       inject_pending_event(vcpu);
-       /* enable NMI/IRQ window open exits if needed */
-       if (vcpu->arch.nmi_pending)
-               kvm_x86_ops->enable_nmi_window(vcpu);
-       else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
-               kvm_x86_ops->enable_irq_window(vcpu);
-       if (kvm_lapic_enabled(vcpu)) {
-               update_cr8_intercept(vcpu);
-               kvm_lapic_sync_to_vapic(vcpu);
-       }
        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
  
        kvm_guest_enter();
        if (hw_breakpoint_active())
                hw_breakpoint_restore();
  
+       kvm_get_msr(vcpu, MSR_IA32_TSC, &vcpu->arch.last_guest_tsc);
        atomic_set(&vcpu->guest_mode, 0);
        smp_wmb();
        local_irq_enable();
@@@ -4899,8 -5310,7 +5311,7 @@@ int kvm_arch_vcpu_ioctl_run(struct kvm_
        if (!irqchip_in_kernel(vcpu->kvm))
                kvm_set_cr8(vcpu, kvm_run->cr8);
  
-       if (vcpu->arch.pio.count || vcpu->mmio_needed ||
-           vcpu->arch.emulate_ctxt.restart) {
+       if (vcpu->arch.pio.count || vcpu->mmio_needed) {
                if (vcpu->mmio_needed) {
                        memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
                        vcpu->mmio_read_completed = 1;
@@@ -4981,6 -5391,8 +5392,8 @@@ int kvm_arch_vcpu_ioctl_set_regs(struc
  
        vcpu->arch.exception.pending = false;
  
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
        return 0;
  }
  
@@@ -5044,6 -5456,7 +5457,7 @@@ int kvm_arch_vcpu_ioctl_set_mpstate(str
                                    struct kvm_mp_state *mp_state)
  {
        vcpu->arch.mp_state = mp_state->mp_state;
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
        return 0;
  }
  
@@@ -5051,24 -5464,11 +5465,11 @@@ int kvm_task_switch(struct kvm_vcpu *vc
                    bool has_error_code, u32 error_code)
  {
        struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
-       int cs_db, cs_l, ret;
-       cache_all_regs(vcpu);
-       kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
+       int ret;
  
-       vcpu->arch.emulate_ctxt.vcpu = vcpu;
-       vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
-       vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
-       vcpu->arch.emulate_ctxt.mode =
-               (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
-               (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
-               ? X86EMUL_MODE_VM86 : cs_l
-               ? X86EMUL_MODE_PROT64 : cs_db
-               ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
-       memset(c, 0, sizeof(struct decode_cache));
-       memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
+       init_emulate_ctxt(vcpu);
  
-       ret = emulator_task_switch(&vcpu->arch.emulate_ctxt, &emulate_ops,
+       ret = emulator_task_switch(&vcpu->arch.emulate_ctxt,
                                   tss_selector, reason, has_error_code,
                                   error_code);
  
        memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
        kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
        kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
        return EMULATE_DONE;
  }
  EXPORT_SYMBOL_GPL(kvm_task_switch);
@@@ -5113,7 -5514,7 +5515,7 @@@ int kvm_arch_vcpu_ioctl_set_sregs(struc
        mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
        kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
        if (!is_long_mode(vcpu) && is_pae(vcpu)) {
-               load_pdptrs(vcpu, vcpu->arch.cr3);
+               load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
                mmu_reset_needed = 1;
        }
  
            !is_protmode(vcpu))
                vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
  
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
        return 0;
  }
  
@@@ -5334,6 -5737,10 +5738,10 @@@ void kvm_arch_vcpu_free(struct kvm_vcp
  struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
                                                unsigned int id)
  {
+       if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
+               printk_once(KERN_WARNING
+               "kvm: SMP vm created on host with unstable TSC; "
+               "guest TSC will not be reliable\n");
        return kvm_x86_ops->vcpu_create(kvm, id);
  }
  
@@@ -5376,22 -5783,22 +5784,22 @@@ int kvm_arch_vcpu_reset(struct kvm_vcp
        vcpu->arch.dr6 = DR6_FIXED_1;
        vcpu->arch.dr7 = DR7_FIXED_1;
  
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
        return kvm_x86_ops->vcpu_reset(vcpu);
  }
  
  int kvm_arch_hardware_enable(void *garbage)
  {
-       /*
-        * Since this may be called from a hotplug notifcation,
-        * we can't get the CPU frequency directly.
-        */
-       if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
-               int cpu = raw_smp_processor_id();
-               per_cpu(cpu_tsc_khz, cpu) = 0;
-       }
+       struct kvm *kvm;
+       struct kvm_vcpu *vcpu;
+       int i;
  
        kvm_shared_msr_cpu_online();
+       list_for_each_entry(kvm, &vm_list, vm_list)
+               kvm_for_each_vcpu(i, vcpu, kvm)
+                       if (vcpu->cpu == smp_processor_id())
+                               kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
        return kvm_x86_ops->hardware_enable(garbage);
  }
  
@@@ -5425,7 -5832,11 +5833,11 @@@ int kvm_arch_vcpu_init(struct kvm_vcpu 
        BUG_ON(vcpu->kvm == NULL);
        kvm = vcpu->kvm;
  
+       vcpu->arch.emulate_ctxt.ops = &emulate_ops;
+       vcpu->arch.walk_mmu = &vcpu->arch.mmu;
        vcpu->arch.mmu.root_hpa = INVALID_PAGE;
+       vcpu->arch.mmu.translate_gpa = translate_gpa;
+       vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
        if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
                vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
        else
        }
        vcpu->arch.pio_data = page_address(page);
  
+       if (!kvm->arch.virtual_tsc_khz)
+               kvm_arch_set_tsc_khz(kvm, max_tsc_khz);
        r = kvm_mmu_create(vcpu);
        if (r < 0)
                goto fail_free_pio_data;
@@@ -5497,7 -5911,7 +5912,7 @@@ struct  kvm *kvm_arch_create_vm(void
        /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
        set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
  
-       rdtscll(kvm->arch.vm_init_tsc);
+       spin_lock_init(&kvm->arch.tsc_write_lock);
  
        return kvm;
  }
@@@ -5684,6 -6098,7 +6099,7 @@@ void kvm_set_rflags(struct kvm_vcpu *vc
            kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
                rflags |= X86_EFLAGS_TF;
        kvm_x86_ops->set_rflags(vcpu, rflags);
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
  }
  EXPORT_SYMBOL_GPL(kvm_set_rflags);
  
diff --combined include/linux/kvm_host.h
index ac740b26eb1071950a26ce64ddca68f8212e8742,866ed30843636b0c5218a41e84fa0442b5d0c8a3..a0557422715e596ee46451c72e8184bff4b3201d
  #define KVM_REQ_PENDING_TIMER      5
  #define KVM_REQ_UNHALT             6
  #define KVM_REQ_MMU_SYNC           7
- #define KVM_REQ_KVMCLOCK_UPDATE    8
+ #define KVM_REQ_CLOCK_UPDATE       8
  #define KVM_REQ_KICK               9
  #define KVM_REQ_DEACTIVATE_FPU    10
+ #define KVM_REQ_EVENT             11
  
  #define KVM_USERSPACE_IRQ_SOURCE_ID   0
  
@@@ -205,7 -206,7 +206,7 @@@ struct kvm 
  
        struct mutex irq_lock;
  #ifdef CONFIG_HAVE_KVM_IRQCHIP
 -      struct kvm_irq_routing_table *irq_routing;
 +      struct kvm_irq_routing_table __rcu *irq_routing;
        struct hlist_head mask_notifier_list;
        struct hlist_head irq_ack_notifier_list;
  #endif
@@@ -289,6 -290,9 +290,9 @@@ void kvm_arch_commit_memory_region(stru
  void kvm_disable_largepages(void);
  void kvm_arch_flush_shadow(struct kvm *kvm);
  
+ int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
+                           int nr_pages);
  struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
  unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
  void kvm_release_page_clean(struct page *page);
@@@ -296,6 -300,8 +300,8 @@@ void kvm_release_page_dirty(struct pag
  void kvm_set_page_dirty(struct page *page);
  void kvm_set_page_accessed(struct page *page);
  
+ pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr);
+ pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
  pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
  pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
                         struct kvm_memory_slot *slot, gfn_t gfn);
@@@ -477,8 -483,7 +483,7 @@@ int kvm_deassign_device(struct kvm *kvm
                        struct kvm_assigned_dev_kernel *assigned_dev);
  #else /* CONFIG_IOMMU_API */
  static inline int kvm_iommu_map_pages(struct kvm *kvm,
-                                     gfn_t base_gfn,
-                                     unsigned long npages)
+                                     struct kvm_memory_slot *slot)
  {
        return 0;
  }
@@@ -518,11 -523,22 +523,22 @@@ static inline void kvm_guest_exit(void
        current->flags &= ~PF_VCPU;
  }
  
+ static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
+                                              gfn_t gfn)
+ {
+       return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
+ }
  static inline gpa_t gfn_to_gpa(gfn_t gfn)
  {
        return (gpa_t)gfn << PAGE_SHIFT;
  }
  
+ static inline gfn_t gpa_to_gfn(gpa_t gpa)
+ {
+       return (gfn_t)(gpa >> PAGE_SHIFT);
+ }
  static inline hpa_t pfn_to_hpa(pfn_t pfn)
  {
        return (hpa_t)pfn << PAGE_SHIFT;
diff --combined virt/kvm/kvm_main.c
index 60e5e4612b0b329fb616564fad02067934e90652,ac326de43ce4215653391cc0901dca1791342205..5225052aebc15d28709359b7ee365f0490035203
@@@ -5,7 -5,7 +5,7 @@@
   * machines without emulation or binary translation.
   *
   * Copyright (C) 2006 Qumranet, Inc.
-  * Copyright 2010 Red Hat, Inc. and/or its affilates.
+  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
   *
   * Authors:
   *   Avi Kivity   <[email protected]>
@@@ -705,14 -705,12 +705,12 @@@ skip_lpage
        if (r)
                goto out_free;
  
- #ifdef CONFIG_DMAR
        /* map the pages in iommu page table */
        if (npages) {
                r = kvm_iommu_map_pages(kvm, &new);
                if (r)
                        goto out_free;
        }
- #endif
  
        r = -ENOMEM;
        slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
@@@ -927,35 -925,46 +925,46 @@@ int memslot_id(struct kvm *kvm, gfn_t g
        return memslot - slots->memslots;
  }
  
- static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
- {
-       return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
- }
- unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
+ static unsigned long gfn_to_hva_many(struct kvm *kvm, gfn_t gfn,
+                                    gfn_t *nr_pages)
  {
        struct kvm_memory_slot *slot;
  
        slot = gfn_to_memslot(kvm, gfn);
        if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
                return bad_hva();
+       if (nr_pages)
+               *nr_pages = slot->npages - (gfn - slot->base_gfn);
        return gfn_to_hva_memslot(slot, gfn);
  }
+ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
+ {
+       return gfn_to_hva_many(kvm, gfn, NULL);
+ }
  EXPORT_SYMBOL_GPL(gfn_to_hva);
  
- static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr)
+ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic)
  {
        struct page *page[1];
        int npages;
        pfn_t pfn;
  
-       might_sleep();
-       npages = get_user_pages_fast(addr, 1, 1, page);
+       if (atomic)
+               npages = __get_user_pages_fast(addr, 1, 1, page);
+       else {
+               might_sleep();
+               npages = get_user_pages_fast(addr, 1, 1, page);
+       }
  
        if (unlikely(npages != 1)) {
                struct vm_area_struct *vma;
  
+               if (atomic)
+                       goto return_fault_page;
                down_read(&current->mm->mmap_sem);
                if (is_hwpoison_address(addr)) {
                        up_read(&current->mm->mmap_sem);
                if (vma == NULL || addr < vma->vm_start ||
                    !(vma->vm_flags & VM_PFNMAP)) {
                        up_read(&current->mm->mmap_sem);
+ return_fault_page:
                        get_page(fault_page);
                        return page_to_pfn(fault_page);
                }
        return pfn;
  }
  
- pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
+ pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr)
+ {
+       return hva_to_pfn(kvm, addr, true);
+ }
+ EXPORT_SYMBOL_GPL(hva_to_pfn_atomic);
+ static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic)
  {
        unsigned long addr;
  
                return page_to_pfn(bad_page);
        }
  
-       return hva_to_pfn(kvm, addr);
+       return hva_to_pfn(kvm, addr, atomic);
+ }
+ pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
+ {
+       return __gfn_to_pfn(kvm, gfn, true);
+ }
+ EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
+ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
+ {
+       return __gfn_to_pfn(kvm, gfn, false);
  }
  EXPORT_SYMBOL_GPL(gfn_to_pfn);
  
@@@ -999,9 -1026,26 +1026,26 @@@ pfn_t gfn_to_pfn_memslot(struct kvm *kv
                         struct kvm_memory_slot *slot, gfn_t gfn)
  {
        unsigned long addr = gfn_to_hva_memslot(slot, gfn);
-       return hva_to_pfn(kvm, addr);
+       return hva_to_pfn(kvm, addr, false);
  }
  
+ int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
+                                                                 int nr_pages)
+ {
+       unsigned long addr;
+       gfn_t entry;
+       addr = gfn_to_hva_many(kvm, gfn, &entry);
+       if (kvm_is_error_hva(addr))
+               return -1;
+       if (entry < nr_pages)
+               return 0;
+       return __get_user_pages_fast(addr, nr_pages, 1, pages);
+ }
+ EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
  struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
  {
        pfn_t pfn;
@@@ -1305,7 -1349,6 +1349,7 @@@ static struct file_operations kvm_vcpu_
        .unlocked_ioctl = kvm_vcpu_ioctl,
        .compat_ioctl   = kvm_vcpu_ioctl,
        .mmap           = kvm_vcpu_mmap,
 +      .llseek         = noop_llseek,
  };
  
  /*
@@@ -1775,7 -1818,6 +1819,7 @@@ static struct file_operations kvm_vm_fo
        .compat_ioctl   = kvm_vm_compat_ioctl,
  #endif
        .mmap           = kvm_vm_mmap,
 +      .llseek         = noop_llseek,
  };
  
  static int kvm_dev_ioctl_create_vm(void)
@@@ -1869,7 -1911,6 +1913,7 @@@ out
  static struct file_operations kvm_chardev_ops = {
        .unlocked_ioctl = kvm_dev_ioctl,
        .compat_ioctl   = kvm_dev_ioctl,
 +      .llseek         = noop_llseek,
  };
  
  static struct miscdevice kvm_dev = {
@@@ -1964,7 -2005,9 +2008,9 @@@ static int kvm_cpu_hotplug(struct notif
        case CPU_STARTING:
                printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
                       cpu);
+               spin_lock(&kvm_lock);
                hardware_enable(NULL);
+               spin_unlock(&kvm_lock);
                break;
        }
        return NOTIFY_OK;
@@@ -1977,7 -2020,7 +2023,7 @@@ asmlinkage void kvm_handle_fault_on_reb
                /* spin while reset goes on */
                local_irq_enable();
                while (true)
-                       ;
+                       cpu_relax();
        }
        /* Fault while not rebooting.  We want the trace. */
        BUG();
@@@ -2171,8 -2214,10 +2217,10 @@@ static int kvm_suspend(struct sys_devic
  
  static int kvm_resume(struct sys_device *dev)
  {
-       if (kvm_usage_count)
+       if (kvm_usage_count) {
+               WARN_ON(spin_is_locked(&kvm_lock));
                hardware_enable(NULL);
+       }
        return 0;
  }
  
This page took 0.189332 seconds and 4 git commands to generate.