]> Git Repo - linux.git/commitdiff
Merge tag 'v4.9-rc1' into x86/fpu, to resolve conflict
authorIngo Molnar <[email protected]>
Sun, 16 Oct 2016 11:04:34 +0000 (13:04 +0200)
committerIngo Molnar <[email protected]>
Sun, 16 Oct 2016 11:04:34 +0000 (13:04 +0200)
Signed-off-by: Ingo Molnar <[email protected]>
1  2 
arch/x86/kernel/fpu/core.c
arch/x86/kernel/fpu/xstate.c
arch/x86/kernel/process_64.c
arch/x86/kernel/smpboot.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/x86.c
include/linux/kvm_host.h

index 30f11ab6c07e37fa53c52841062f7d6962b5ae85,47004010ad5dd42ec03e5ca075d832ef0c925e38..52f5684405c1cf2a748d7eb9837180597305de09
@@@ -12,6 -12,7 +12,7 @@@
  #include <asm/traps.h>
  
  #include <linux/hardirq.h>
+ #include <linux/pkeys.h>
  
  #define CREATE_TRACE_POINTS
  #include <asm/trace/fpu.h>
@@@ -57,9 -58,27 +58,9 @@@ static bool kernel_fpu_disabled(void
        return this_cpu_read(in_kernel_fpu);
  }
  
 -/*
 - * Were we in an interrupt that interrupted kernel mode?
 - *
 - * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
 - * pair does nothing at all: the thread must not have fpu (so
 - * that we don't try to save the FPU state), and TS must
 - * be set (so that the clts/stts pair does nothing that is
 - * visible in the interrupted kernel thread).
 - *
 - * Except for the eagerfpu case when we return true; in the likely case
 - * the thread has FPU but we are not going to set/clear TS.
 - */
  static bool interrupted_kernel_fpu_idle(void)
  {
 -      if (kernel_fpu_disabled())
 -              return false;
 -
 -      if (use_eager_fpu())
 -              return true;
 -
 -      return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
 +      return !kernel_fpu_disabled();
  }
  
  /*
@@@ -106,7 -125,8 +107,7 @@@ void __kernel_fpu_begin(void
                 */
                copy_fpregs_to_fpstate(fpu);
        } else {
 -              this_cpu_write(fpu_fpregs_owner_ctx, NULL);
 -              __fpregs_activate_hw();
 +              __cpu_invalidate_fpregs_state();
        }
  }
  EXPORT_SYMBOL(__kernel_fpu_begin);
@@@ -117,6 -137,8 +118,6 @@@ void __kernel_fpu_end(void
  
        if (fpu->fpregs_active)
                copy_kernel_to_fpregs(&fpu->state);
 -      else
 -              __fpregs_deactivate_hw();
  
        kernel_fpu_enable();
  }
@@@ -178,7 -200,10 +179,7 @@@ void fpu__save(struct fpu *fpu
        trace_x86_fpu_before_save(fpu);
        if (fpu->fpregs_active) {
                if (!copy_fpregs_to_fpstate(fpu)) {
 -                      if (use_eager_fpu())
 -                              copy_kernel_to_fpregs(&fpu->state);
 -                      else
 -                              fpregs_deactivate(fpu);
 +                      copy_kernel_to_fpregs(&fpu->state);
                }
        }
        trace_x86_fpu_after_save(fpu);
@@@ -222,6 -247,7 +223,6 @@@ EXPORT_SYMBOL_GPL(fpstate_init)
  
  int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
  {
 -      dst_fpu->counter = 0;
        dst_fpu->fpregs_active = 0;
        dst_fpu->last_cpu = -1;
  
         * Don't let 'init optimized' areas of the XSAVE area
         * leak into the child task:
         */
 -      if (use_eager_fpu())
 -              memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
 +      memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
  
        /*
         * Save current FPU registers directly into the child
                memcpy(&src_fpu->state, &dst_fpu->state,
                       fpu_kernel_xstate_size);
  
 -              if (use_eager_fpu())
 -                      copy_kernel_to_fpregs(&src_fpu->state);
 -              else
 -                      fpregs_deactivate(src_fpu);
 +              copy_kernel_to_fpregs(&src_fpu->state);
        }
        preempt_enable();
  
@@@ -336,7 -366,7 +337,7 @@@ void fpu__activate_fpstate_write(struc
  
        if (fpu->fpstate_active) {
                /* Invalidate any lazy state: */
 -              fpu->last_cpu = -1;
 +              __fpu_invalidate_fpregs_state(fpu);
        } else {
                fpstate_init(&fpu->state);
                trace_x86_fpu_init_state(fpu);
@@@ -379,7 -409,7 +380,7 @@@ void fpu__current_fpstate_write_begin(v
         * ensures we will not be lazy and skip a XRSTOR in the
         * future.
         */
 -      fpu->last_cpu = -1;
 +      __fpu_invalidate_fpregs_state(fpu);
  }
  
  /*
@@@ -429,6 -459,7 +430,6 @@@ void fpu__restore(struct fpu *fpu
        trace_x86_fpu_before_restore(fpu);
        fpregs_activate(fpu);
        copy_kernel_to_fpregs(&fpu->state);
 -      fpu->counter++;
        trace_x86_fpu_after_restore(fpu);
        kernel_fpu_enable();
  }
@@@ -446,6 -477,7 +447,6 @@@ EXPORT_SYMBOL_GPL(fpu__restore)
  void fpu__drop(struct fpu *fpu)
  {
        preempt_disable();
 -      fpu->counter = 0;
  
        if (fpu->fpregs_active) {
                /* Ignore delayed exceptions from user space */
@@@ -474,6 -506,9 +475,9 @@@ static inline void copy_init_fpstate_to
                copy_kernel_to_fxregs(&init_fpstate.fxsave);
        else
                copy_kernel_to_fregs(&init_fpstate.fsave);
+       if (boot_cpu_has(X86_FEATURE_OSPKE))
+               copy_init_pkru_to_fpregs();
  }
  
  /*
@@@ -486,7 -521,7 +490,7 @@@ void fpu__clear(struct fpu *fpu
  {
        WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
  
 -      if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) {
 +      if (!static_cpu_has(X86_FEATURE_FPU)) {
                /* FPU state will be reallocated lazily at the first use. */
                fpu__drop(fpu);
        } else {
index 76bc2a1a3a79e4f4dae33f1013fe9a6b303f6eeb,124aa5c593f8da7aba6643bc609a332744d10ba6..17ad31fd0a9f7439889272f43671bfe00ceb23b8
@@@ -5,6 -5,7 +5,7 @@@
   */
  #include <linux/compat.h>
  #include <linux/cpu.h>
+ #include <linux/mman.h>
  #include <linux/pkeys.h>
  
  #include <asm/fpu/api.h>
@@@ -866,9 -867,10 +867,10 @@@ const void *get_xsave_field_ptr(int xsa
        return get_xsave_addr(&fpu->state.xsave, xsave_state);
  }
  
+ #ifdef CONFIG_ARCH_HAS_PKEYS
  #define NR_VALID_PKRU_BITS (CONFIG_NR_PROTECTION_KEYS * 2)
  #define PKRU_VALID_MASK (NR_VALID_PKRU_BITS - 1)
  /*
   * This will go out and modify PKRU register to set the access
   * rights for @pkey to @init_val.
@@@ -886,6 -888,15 +888,6 @@@ int arch_set_user_pkey_access(struct ta
         */
        if (!boot_cpu_has(X86_FEATURE_OSPKE))
                return -EINVAL;
 -      /*
 -       * For most XSAVE components, this would be an arduous task:
 -       * brining fpstate up to date with fpregs, updating fpstate,
 -       * then re-populating fpregs.  But, for components that are
 -       * never lazily managed, we can just access the fpregs
 -       * directly.  PKRU is never managed lazily, so we can just
 -       * manipulate it directly.  Make sure it stays that way.
 -       */
 -      WARN_ON_ONCE(!use_eager_fpu());
  
        /* Set the bits we need in PKRU:  */
        if (init_val & PKEY_DISABLE_ACCESS)
  
        return 0;
  }
+ #endif /* ! CONFIG_ARCH_HAS_PKEYS */
  
  /*
   * This is similar to user_regset_copyout(), but will not add offset to
index 705669efb7624c19f22c1fcbc15205ca82dc79a6,b3760b3c1ca09734a4479f63f3787dac42275a24..9c3a7b04e59e4931d708713d8fe3418e7368edc3
@@@ -109,12 -109,13 +109,13 @@@ void __show_regs(struct pt_regs *regs, 
        get_debugreg(d7, 7);
  
        /* Only print out debug registers if they are in their non-default state. */
-       if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
-           (d6 == DR6_RESERVED) && (d7 == 0x400))
-               return;
-       printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
-       printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
+       if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
+           (d6 == DR6_RESERVED) && (d7 == 0x400))) {
+               printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n",
+                      d0, d1, d2);
+               printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n",
+                      d3, d6, d7);
+       }
  
        if (boot_cpu_has(X86_FEATURE_OSPKE))
                printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru());
@@@ -264,8 -265,9 +265,8 @@@ __switch_to(struct task_struct *prev_p
        int cpu = smp_processor_id();
        struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
        unsigned prev_fsindex, prev_gsindex;
 -      fpu_switch_t fpu_switch;
  
 -      fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
 +      switch_fpu_prepare(prev_fpu, cpu);
  
        /* We must save %fs and %gs before load_TLS() because
         * %fs and %gs may be cleared by load_TLS().
                prev->gsbase = 0;
        prev->gsindex = prev_gsindex;
  
 -      switch_fpu_finish(next_fpu, fpu_switch);
 +      switch_fpu_finish(next_fpu, cpu);
  
        /*
         * Switch the PDA and FPU contexts.
index 5cb801acc2e5ffb09664540b6026341c0c801c75,951f093a96fe90709827a7f75430ad042c318774..5943bb7637cd18198cf5bb508adc0cf55ec4ab4a
@@@ -1111,7 -1111,7 +1111,7 @@@ int native_cpu_up(unsigned int cpu, str
                return err;
  
        /* the FPU context is blank, nobody can own it */
 -      __cpu_disable_lazy_restore(cpu);
 +      per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
  
        common_cpu_up(cpu, tidle);
  
@@@ -1407,9 -1407,21 +1407,21 @@@ __init void prefill_possible_map(void
  {
        int i, possible;
  
-       /* no processor from mptable or madt */
-       if (!num_processors)
-               num_processors = 1;
+       /* No boot processor was found in mptable or ACPI MADT */
+       if (!num_processors) {
+               int apicid = boot_cpu_physical_apicid;
+               int cpu = hard_smp_processor_id();
+               pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu);
+               /* Make sure boot cpu is enumerated */
+               if (apic->cpu_present_to_apicid(0) == BAD_APICID &&
+                   apic->apic_id_valid(apicid))
+                       generic_processor_info(apicid, boot_cpu_apic_version);
+               if (!num_processors)
+                       num_processors = 1;
+       }
  
        i = setup_max_cpus ?: 1;
        if (setup_possible_cpus == -1) {
diff --combined arch/x86/kvm/cpuid.c
index 1cf143e2b79406649a809eadb83bad766cb956a4,afa7bbb596cd745be6494c53bd9764af503616e2..0aefb626fa8f07ebc7d1c40516a9d8ea76d0c326
@@@ -16,6 -16,7 +16,6 @@@
  #include <linux/export.h>
  #include <linux/vmalloc.h>
  #include <linux/uaccess.h>
 -#include <asm/fpu/internal.h> /* For use_eager_fpu.  Ugh! */
  #include <asm/user.h>
  #include <asm/fpu/xstate.h>
  #include "cpuid.h"
@@@ -113,7 -114,8 +113,7 @@@ int kvm_update_cpuid(struct kvm_vcpu *v
        if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
                best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
  
 -      if (use_eager_fpu())
 -              kvm_x86_ops->fpu_activate(vcpu);
 +      kvm_x86_ops->fpu_activate(vcpu);
  
        /*
         * The existing code assumes virtual address is 48-bit in the canonical
@@@ -364,7 -366,8 +364,8 @@@ static inline int __do_cpuid_ent(struc
                F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
                F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
                F(ADX) | F(SMAP) | F(AVX512F) | F(AVX512PF) | F(AVX512ER) |
-               F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB);
+               F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
+               F(AVX512BW) | F(AVX512VL);
  
        /* cpuid 0xD.1.eax */
        const u32 kvm_cpuid_D_1_eax_x86_features =
diff --combined arch/x86/kvm/x86.c
index 2c7e775d72953bff8b8fc41004221028c725222f,6c633de84dd7339637e24604952fb4d2c5180562..d5700263dad2b230fe8a20db46a22ec7dc9ffc5e
@@@ -1367,7 -1367,7 +1367,7 @@@ static void kvm_track_tsc_matching(stru
  
  static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
  {
-       u64 curr_offset = kvm_x86_ops->read_tsc_offset(vcpu);
+       u64 curr_offset = vcpu->arch.tsc_offset;
        vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
  }
  
@@@ -1413,6 -1413,12 +1413,12 @@@ u64 kvm_read_l1_tsc(struct kvm_vcpu *vc
  }
  EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
  
+ static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+ {
+       kvm_x86_ops->write_tsc_offset(vcpu, offset);
+       vcpu->arch.tsc_offset = offset;
+ }
  void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
  {
        struct kvm *kvm = vcpu->kvm;
  
        raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
        offset = kvm_compute_tsc_offset(vcpu, data);
-       ns = get_kernel_ns();
+       ns = ktime_get_boot_ns();
        elapsed = ns - kvm->arch.last_tsc_nsec;
  
        if (vcpu->arch.virtual_tsc_khz) {
  
        if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated)
                update_ia32_tsc_adjust_msr(vcpu, offset);
-       kvm_x86_ops->write_tsc_offset(vcpu, offset);
+       kvm_vcpu_write_tsc_offset(vcpu, offset);
        raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
  
        spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
@@@ -1716,6 -1722,88 +1722,88 @@@ static void kvm_gen_update_masterclock(
  #endif
  }
  
+ static u64 __get_kvmclock_ns(struct kvm *kvm)
+ {
+       struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, 0);
+       struct kvm_arch *ka = &kvm->arch;
+       s64 ns;
+       if (vcpu->arch.hv_clock.flags & PVCLOCK_TSC_STABLE_BIT) {
+               u64 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
+               ns = __pvclock_read_cycles(&vcpu->arch.hv_clock, tsc);
+       } else {
+               ns = ktime_get_boot_ns() + ka->kvmclock_offset;
+       }
+       return ns;
+ }
+ u64 get_kvmclock_ns(struct kvm *kvm)
+ {
+       unsigned long flags;
+       s64 ns;
+       local_irq_save(flags);
+       ns = __get_kvmclock_ns(kvm);
+       local_irq_restore(flags);
+       return ns;
+ }
+ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
+ {
+       struct kvm_vcpu_arch *vcpu = &v->arch;
+       struct pvclock_vcpu_time_info guest_hv_clock;
+       if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+               &guest_hv_clock, sizeof(guest_hv_clock))))
+               return;
+       /* This VCPU is paused, but it's legal for a guest to read another
+        * VCPU's kvmclock, so we really have to follow the specification where
+        * it says that version is odd if data is being modified, and even after
+        * it is consistent.
+        *
+        * Version field updates must be kept separate.  This is because
+        * kvm_write_guest_cached might use a "rep movs" instruction, and
+        * writes within a string instruction are weakly ordered.  So there
+        * are three writes overall.
+        *
+        * As a small optimization, only write the version field in the first
+        * and third write.  The vcpu->pv_time cache is still valid, because the
+        * version field is the first in the struct.
+        */
+       BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
+       vcpu->hv_clock.version = guest_hv_clock.version + 1;
+       kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+                               &vcpu->hv_clock,
+                               sizeof(vcpu->hv_clock.version));
+       smp_wmb();
+       /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
+       vcpu->hv_clock.flags |= (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
+       if (vcpu->pvclock_set_guest_stopped_request) {
+               vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED;
+               vcpu->pvclock_set_guest_stopped_request = false;
+       }
+       trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
+       kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+                               &vcpu->hv_clock,
+                               sizeof(vcpu->hv_clock));
+       smp_wmb();
+       vcpu->hv_clock.version++;
+       kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+                               &vcpu->hv_clock,
+                               sizeof(vcpu->hv_clock.version));
+ }
  static int kvm_guest_time_update(struct kvm_vcpu *v)
  {
        unsigned long flags, tgt_tsc_khz;
        struct kvm_arch *ka = &v->kvm->arch;
        s64 kernel_ns;
        u64 tsc_timestamp, host_tsc;
-       struct pvclock_vcpu_time_info guest_hv_clock;
        u8 pvclock_flags;
        bool use_master_clock;
  
        }
        if (!use_master_clock) {
                host_tsc = rdtsc();
-               kernel_ns = get_kernel_ns();
+               kernel_ns = ktime_get_boot_ns();
        }
  
        tsc_timestamp = kvm_read_l1_tsc(v, host_tsc);
  
        local_irq_restore(flags);
  
-       if (!vcpu->pv_time_enabled)
-               return 0;
+       /* With all the info we got, fill in the values */
  
        if (kvm_has_tsc_control)
                tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz);
                vcpu->hw_tsc_khz = tgt_tsc_khz;
        }
  
-       /* With all the info we got, fill in the values */
        vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
        vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
        vcpu->last_guest_tsc = tsc_timestamp;
  
-       if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
-               &guest_hv_clock, sizeof(guest_hv_clock))))
-               return 0;
-       /* This VCPU is paused, but it's legal for a guest to read another
-        * VCPU's kvmclock, so we really have to follow the specification where
-        * it says that version is odd if data is being modified, and even after
-        * it is consistent.
-        *
-        * Version field updates must be kept separate.  This is because
-        * kvm_write_guest_cached might use a "rep movs" instruction, and
-        * writes within a string instruction are weakly ordered.  So there
-        * are three writes overall.
-        *
-        * As a small optimization, only write the version field in the first
-        * and third write.  The vcpu->pv_time cache is still valid, because the
-        * version field is the first in the struct.
-        */
-       BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
-       vcpu->hv_clock.version = guest_hv_clock.version + 1;
-       kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
-                               &vcpu->hv_clock,
-                               sizeof(vcpu->hv_clock.version));
-       smp_wmb();
-       /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
-       pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
-       if (vcpu->pvclock_set_guest_stopped_request) {
-               pvclock_flags |= PVCLOCK_GUEST_STOPPED;
-               vcpu->pvclock_set_guest_stopped_request = false;
-       }
        /* If the host uses TSC clocksource, then it is stable */
+       pvclock_flags = 0;
        if (use_master_clock)
                pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
  
        vcpu->hv_clock.flags = pvclock_flags;
  
-       trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
-       kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
-                               &vcpu->hv_clock,
-                               sizeof(vcpu->hv_clock));
-       smp_wmb();
-       vcpu->hv_clock.version++;
-       kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
-                               &vcpu->hv_clock,
-                               sizeof(vcpu->hv_clock.version));
+       if (vcpu->pv_time_enabled)
+               kvm_setup_pvclock_page(v);
+       if (v == kvm_get_vcpu(v->kvm, 0))
+               kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
        return 0;
  }
  
@@@ -2746,7 -2789,7 +2789,7 @@@ void kvm_arch_vcpu_load(struct kvm_vcp
                if (check_tsc_unstable()) {
                        u64 offset = kvm_compute_tsc_offset(vcpu,
                                                vcpu->arch.last_guest_tsc);
-                       kvm_x86_ops->write_tsc_offset(vcpu, offset);
+                       kvm_vcpu_write_tsc_offset(vcpu, offset);
                        vcpu->arch.tsc_catchup = 1;
                }
                if (kvm_lapic_hv_timer_in_use(vcpu) &&
@@@ -4039,7 -4082,6 +4082,6 @@@ long kvm_arch_vm_ioctl(struct file *fil
        case KVM_SET_CLOCK: {
                struct kvm_clock_data user_ns;
                u64 now_ns;
-               s64 delta;
  
                r = -EFAULT;
                if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
  
                r = 0;
                local_irq_disable();
-               now_ns = get_kernel_ns();
-               delta = user_ns.clock - now_ns;
+               now_ns = __get_kvmclock_ns(kvm);
+               kvm->arch.kvmclock_offset += user_ns.clock - now_ns;
                local_irq_enable();
-               kvm->arch.kvmclock_offset = delta;
                kvm_gen_update_masterclock(kvm);
                break;
        }
                struct kvm_clock_data user_ns;
                u64 now_ns;
  
-               local_irq_disable();
-               now_ns = get_kernel_ns();
-               user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
-               local_irq_enable();
+               now_ns = get_kvmclock_ns(kvm);
+               user_ns.clock = now_ns;
                user_ns.flags = 0;
                memset(&user_ns.pad, 0, sizeof(user_ns.pad));
  
@@@ -6700,7 -6739,6 +6739,6 @@@ static int vcpu_enter_guest(struct kvm_
  
        kvm_put_guest_xcr0(vcpu);
  
-       /* Interrupt is enabled by handle_external_intr() */
        kvm_x86_ops->handle_external_intr(vcpu);
  
        ++vcpu->stat.exits;
@@@ -7348,13 -7386,25 +7386,13 @@@ void kvm_load_guest_fpu(struct kvm_vcp
  
  void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
  {
 -      if (!vcpu->guest_fpu_loaded) {
 -              vcpu->fpu_counter = 0;
 +      if (!vcpu->guest_fpu_loaded)
                return;
 -      }
  
        vcpu->guest_fpu_loaded = 0;
        copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
        __kernel_fpu_end();
        ++vcpu->stat.fpu_reload;
 -      /*
 -       * If using eager FPU mode, or if the guest is a frequent user
 -       * of the FPU, just leave the FPU active for next time.
 -       * Every 255 times fpu_counter rolls over to 0; a guest that uses
 -       * the FPU in bursts will revert to loading it on demand.
 -       */
 -      if (!use_eager_fpu()) {
 -              if (++vcpu->fpu_counter < 5)
 -                      kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
 -      }
        trace_kvm_fpu(0);
  }
  
@@@ -7518,7 -7568,7 +7556,7 @@@ int kvm_arch_hardware_enable(void
         * before any KVM threads can be running.  Unfortunately, we can't
         * bring the TSCs fully up to date with real time, as we aren't yet far
         * enough into CPU bringup that we know how much real time has actually
-        * elapsed; our helper function, get_kernel_ns() will be using boot
+        * elapsed; our helper function, ktime_get_boot_ns() will be using boot
         * variables that haven't been updated yet.
         *
         * So we simply find the maximum observed TSC above, then record the
@@@ -7753,6 -7803,7 +7791,7 @@@ int kvm_arch_init_vm(struct kvm *kvm, u
        mutex_init(&kvm->arch.apic_map_lock);
        spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
  
+       kvm->arch.kvmclock_offset = -ktime_get_boot_ns();
        pvclock_update_vm_gtod_copy(kvm);
  
        INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
diff --combined include/linux/kvm_host.h
index 4e6905cd1e8e502d78b59b18048fe05fa0eca97d,01c0b9cc3915cf7b74054516e16fc43fe9fbcafb..cfc212d1cd60e0ae09e17fe35caf57605fda5244
@@@ -224,6 -224,7 +224,6 @@@ struct kvm_vcpu 
  
        int fpu_active;
        int guest_fpu_loaded, guest_xcr0_loaded;
 -      unsigned char fpu_counter;
        struct swait_queue_head wq;
        struct pid *pid;
        int sigset_active;
  #endif
        bool preempted;
        struct kvm_vcpu_arch arch;
+       struct dentry *debugfs_dentry;
  };
  
  static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
@@@ -748,6 -750,9 +749,9 @@@ int kvm_arch_vcpu_setup(struct kvm_vcp
  void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
  void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
  
+ bool kvm_arch_has_vcpu_debugfs(void);
+ int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu);
  int kvm_arch_hardware_enable(void);
  void kvm_arch_hardware_disable(void);
  int kvm_arch_hardware_setup(void);
This page took 0.090902 seconds and 4 git commands to generate.