]> Git Repo - linux.git/commitdiff
KVM: x86: extend struct kvm_vcpu_pv_apf_data with token info
authorVitaly Kuznetsov <[email protected]>
Mon, 25 May 2020 14:41:17 +0000 (16:41 +0200)
committerPaolo Bonzini <[email protected]>
Mon, 1 Jun 2020 08:26:06 +0000 (04:26 -0400)
Currently, APF mechanism relies on the #PF abuse where the token is being
passed through CR2. If we switch to using interrupts to deliver page-ready
notifications we need a different way to pass the data. Extent the existing
'struct kvm_vcpu_pv_apf_data' with token information for page-ready
notifications.

While on it, rename 'reason' to 'flags'. This doesn't change the semantics
as we only have reasons '1' and '2' and these can be treated as bit flags
but KVM_PV_REASON_PAGE_READY is going away with interrupt based delivery
making 'reason' name misleading.

The newly introduced apf_put_user_ready() temporary puts both flags and
token information, this will be changed to put token only when we switch
to interrupt based notifications.

Signed-off-by: Vitaly Kuznetsov <[email protected]>
Message-Id: <20200525144125[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/kvm_para.h
arch/x86/include/uapi/asm/kvm_para.h
arch/x86/kernel/kvm.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c

index 3485f8454088b52417c84b6f7fd7692566653d2c..033f6173a8574e7fe7c8cbb179177107129bc1cc 100644 (file)
@@ -770,7 +770,7 @@ struct kvm_vcpu_arch {
                u64 msr_val;
                u32 id;
                bool send_user_only;
-               u32 host_apf_reason;
+               u32 host_apf_flags;
                unsigned long nested_apf_token;
                bool delivery_as_pf_vmexit;
        } apf;
index 118e5c2379f9a574e8e4f93681f068da257b8417..57fd1966c4ea9a4063215e8d1d2f0690b7681066 100644 (file)
@@ -90,7 +90,7 @@ unsigned int kvm_arch_para_features(void);
 unsigned int kvm_arch_para_hints(void);
 void kvm_async_pf_task_wait_schedule(u32 token);
 void kvm_async_pf_task_wake(u32 token);
-u32 kvm_read_and_reset_pf_reason(void);
+u32 kvm_read_and_reset_apf_flags(void);
 void kvm_disable_steal_time(void);
 bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token);
 
@@ -131,7 +131,7 @@ static inline unsigned int kvm_arch_para_hints(void)
        return 0;
 }
 
-static inline u32 kvm_read_and_reset_pf_reason(void)
+static inline u32 kvm_read_and_reset_apf_flags(void)
 {
        return 0;
 }
index 2a8e0b6b9805a4c5f84ca4b2bf0c0e4ae74fcc02..d1cd5c0f431ae2c190e0d8c8fcdd2899f25cbcc5 100644 (file)
@@ -112,8 +112,9 @@ struct kvm_mmu_op_release_pt {
 #define KVM_PV_REASON_PAGE_READY 2
 
 struct kvm_vcpu_pv_apf_data {
-       __u32 reason;
-       __u8 pad[60];
+       __u32 flags;
+       __u32 token; /* Used for page ready notification only */
+       __u8 pad[56];
        __u32 enabled;
 };
 
index b3d9b0d7a37da15523aa473106c35792dde014c9..d6f22a3a1f7da4bd0ab1d5118577a2ae4e27cf8a 100644 (file)
@@ -218,23 +218,23 @@ again:
 }
 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
 
-u32 kvm_read_and_reset_pf_reason(void)
+u32 kvm_read_and_reset_apf_flags(void)
 {
-       u32 reason = 0;
+       u32 flags = 0;
 
        if (__this_cpu_read(apf_reason.enabled)) {
-               reason = __this_cpu_read(apf_reason.reason);
-               __this_cpu_write(apf_reason.reason, 0);
+               flags = __this_cpu_read(apf_reason.flags);
+               __this_cpu_write(apf_reason.flags, 0);
        }
 
-       return reason;
+       return flags;
 }
-EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
-NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
+EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags);
+NOKPROBE_SYMBOL(kvm_read_and_reset_apf_flags);
 
 bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
 {
-       u32 reason = kvm_read_and_reset_pf_reason();
+       u32 reason = kvm_read_and_reset_apf_flags();
 
        switch (reason) {
        case KVM_PV_REASON_PAGE_NOT_PRESENT:
index 2e62a03410c75583e49b97e0b1fd04c0e9c27462..5de1929cfc5531f65302ab45de129fe2833c7292 100644 (file)
@@ -4164,7 +4164,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
 #endif
 
        vcpu->arch.l1tf_flush_l1d = true;
-       switch (vcpu->arch.apf.host_apf_reason) {
+       switch (vcpu->arch.apf.host_apf_flags) {
        default:
                trace_kvm_page_fault(fault_address, error_code);
 
@@ -4174,13 +4174,13 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
                                insn_len);
                break;
        case KVM_PV_REASON_PAGE_NOT_PRESENT:
-               vcpu->arch.apf.host_apf_reason = 0;
+               vcpu->arch.apf.host_apf_flags = 0;
                local_irq_disable();
                kvm_async_pf_task_wait_schedule(fault_address);
                local_irq_enable();
                break;
        case KVM_PV_REASON_PAGE_READY:
-               vcpu->arch.apf.host_apf_reason = 0;
+               vcpu->arch.apf.host_apf_flags = 0;
                local_irq_disable();
                kvm_async_pf_task_wake(fault_address);
                local_irq_enable();
index 6b1049148c1b24ef7a2a04e51d9569012d5d0cbb..8a6db11dcb4372e30709d8272cf4b21a668eeecb 100644 (file)
@@ -921,7 +921,7 @@ int nested_svm_exit_special(struct vcpu_svm *svm)
                if (get_host_vmcb(svm)->control.intercept_exceptions & excp_bits)
                        return NESTED_EXIT_HOST;
                else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
-                        svm->vcpu.arch.apf.host_apf_reason)
+                        svm->vcpu.arch.apf.host_apf_flags)
                        /* Trap async PF even if not shadowing */
                        return NESTED_EXIT_HOST;
                break;
index 3871bfb40594ec3d401652402d13a69d0c19c4ef..9e333b91ff78282bfdb890ee1915b13dcfabc67a 100644 (file)
@@ -3459,7 +3459,8 @@ static fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
 
        /* if exit due to PF check for async PF */
        if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
-               svm->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
+               svm->vcpu.arch.apf.host_apf_flags =
+                       kvm_read_and_reset_apf_flags();
 
        if (npt_enabled) {
                vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
index 106fc6fceb97a3db9b61f1850e007080ac39268a..119a2f7395d69bcad5da80cd40bbcdfaccc575da 100644 (file)
@@ -5652,7 +5652,7 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
                if (is_nmi(intr_info))
                        return true;
                else if (is_page_fault(intr_info))
-                       return vcpu->arch.apf.host_apf_reason || !enable_ept;
+                       return vcpu->arch.apf.host_apf_flags || !enable_ept;
                else if (is_debug(intr_info) &&
                         vcpu->guest_debug &
                         (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
index 7b55dc6230a9296ea920cbe8cea2ce48b0dc0c46..5a43af0061efa18d951d745b12370f75746d9011 100644 (file)
@@ -4765,7 +4765,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
        if (is_page_fault(intr_info)) {
                cr2 = vmx_get_exit_qual(vcpu);
                /* EPT won't cause page fault directly */
-               WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept);
+               WARN_ON_ONCE(!vcpu->arch.apf.host_apf_flags && enable_ept);
                return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0);
        }
 
@@ -6360,7 +6360,7 @@ static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
 
        /* if exit due to PF check for async PF */
        if (is_page_fault(intr_info)) {
-               vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
+               vmx->vcpu.arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags();
        /* Handle machine checks before interrupts are enabled */
        } else if (is_machine_check(intr_info)) {
                kvm_machine_check();
index b9a5ff7b922cce8b8cc1f2c055da4ebac4dbb5fa..84aa3c1519edb884420d296ea574d9d2b0b10afd 100644 (file)
@@ -2690,7 +2690,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
        }
 
        if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
-                                       sizeof(u32)))
+                                       sizeof(u64)))
                return 1;
 
        vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
@@ -10420,8 +10420,17 @@ static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
        }
 }
 
-static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
+static inline int apf_put_user_notpresent(struct kvm_vcpu *vcpu)
 {
+       u32 reason = KVM_PV_REASON_PAGE_NOT_PRESENT;
+
+       return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason,
+                                     sizeof(reason));
+}
+
+static inline int apf_put_user_ready(struct kvm_vcpu *vcpu, u32 token)
+{
+       u64 val = (u64)token << 32 | KVM_PV_REASON_PAGE_READY;
 
        return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
                                      sizeof(val));
@@ -10466,7 +10475,7 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
        kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
 
        if (kvm_can_deliver_async_pf(vcpu) &&
-           !apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
+           !apf_put_user_notpresent(vcpu)) {
                fault.vector = PF_VECTOR;
                fault.error_code_valid = true;
                fault.error_code = 0;
@@ -10499,7 +10508,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
        trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa);
 
        if (vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED &&
-           !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
+           !apf_put_user_ready(vcpu, work->arch.token)) {
                        fault.vector = PF_VECTOR;
                        fault.error_code_valid = true;
                        fault.error_code = 0;
This page took 0.090743 seconds and 4 git commands to generate.