]> Git Repo - linux.git/commitdiff
Revert "KVM: x86: use the fast way to invalidate all pages"
authorSean Christopherson <[email protected]>
Tue, 5 Feb 2019 21:01:31 +0000 (13:01 -0800)
committerPaolo Bonzini <[email protected]>
Wed, 20 Feb 2019 21:48:45 +0000 (22:48 +0100)
Revert to a slow kvm_mmu_zap_all() for kvm_arch_flush_shadow_all().
Flushing all shadow entries is only done during VM teardown, i.e.
kvm_arch_flush_shadow_all() is only called when the associated MM struct
is being released or when the VM instance is being freed.

Although the performance of teardown itself isn't critical, KVM should
still voluntarily schedule to play nice with the rest of the kernel;
but that can be done without the fast invalidate mechanism in a future
patch.

This reverts commit 6ca18b6950f8dee29361722f28f69847724b276f.

Cc: Xiao Guangrong <[email protected]>
Signed-off-by: Sean Christopherson <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
arch/x86/kvm/mmu.c
arch/x86/kvm/x86.c

index df4025e6f1e17444421c3dd26441c86aa23174e7..5cdeb88850f8f24a98867f3ce5abab00b8c37d1d 100644 (file)
@@ -5846,6 +5846,21 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm,
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
 
+void kvm_mmu_zap_all(struct kvm *kvm)
+{
+       struct kvm_mmu_page *sp, *node;
+       LIST_HEAD(invalid_list);
+
+       spin_lock(&kvm->mmu_lock);
+restart:
+       list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
+               if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
+                       goto restart;
+
+       kvm_mmu_commit_zap_page(kvm, &invalid_list);
+       spin_unlock(&kvm->mmu_lock);
+}
+
 static void kvm_zap_obsolete_pages(struct kvm *kvm)
 {
        struct kvm_mmu_page *sp, *node;
index 78fb13f190a344da3216fb85646ca83d9f5fb891..65e4559eef2fc8589e0a4277077e766ceead3994 100644 (file)
@@ -9470,7 +9470,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
 
 void kvm_arch_flush_shadow_all(struct kvm *kvm)
 {
-       kvm_mmu_invalidate_zap_all_pages(kvm);
+       kvm_mmu_zap_all(kvm);
 }
 
 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
This page took 0.062156 seconds and 4 git commands to generate.