]> Git Repo - linux.git/commitdiff
x86/mm/tlb: Always use lazy TLB mode
authorRik van Riel <[email protected]>
Mon, 16 Jul 2018 19:03:36 +0000 (15:03 -0400)
committerIngo Molnar <[email protected]>
Tue, 17 Jul 2018 07:35:34 +0000 (09:35 +0200)
Now that CPUs in lazy TLB mode no longer receive TLB shootdown IPIs, except
at page table freeing time, and idle CPUs will no longer get shootdown IPIs
for things like mprotect and madvise, we can always use lazy TLB mode.

Tested-by: Song Liu <[email protected]>
Signed-off-by: Rik van Riel <[email protected]>
Acked-by: Dave Hansen <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
arch/x86/include/asm/tlbflush.h
arch/x86/mm/tlb.c

index 3aa3204b5dc0f010a182245e5d87f0a749f8bd40..511bf5fae8b82abb7ebbd7f4173f70afae9d1538 100644 (file)
@@ -148,22 +148,6 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
 #define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr)
 #endif
 
-static inline bool tlb_defer_switch_to_init_mm(void)
-{
-       /*
-        * If we have PCID, then switching to init_mm is reasonably
-        * fast.  If we don't have PCID, then switching to init_mm is
-        * quite slow, so we try to defer it in the hopes that we can
-        * avoid it entirely.  The latter approach runs the risk of
-        * receiving otherwise unnecessary IPIs.
-        *
-        * This choice is just a heuristic.  The tlb code can handle this
-        * function returning true or false regardless of whether we have
-        * PCID.
-        */
-       return !static_cpu_has(X86_FEATURE_PCID);
-}
-
 struct tlb_context {
        u64 ctx_id;
        u64 tlb_gen;
index e4156e37aa71ba491388ce66ba16728d1e033079..493559cae2d5a325d1ac15d49f52eb6f841de2e4 100644 (file)
@@ -379,20 +379,7 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
        if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
                return;
 
-       if (tlb_defer_switch_to_init_mm()) {
-               /*
-                * There's a significant optimization that may be possible
-                * here.  We have accurate enough TLB flush tracking that we
-                * don't need to maintain coherence of TLB per se when we're
-                * lazy.  We do, however, need to maintain coherence of
-                * paging-structure caches.  We could, in principle, leave our
-                * old mm loaded and only switch to init_mm when
-                * tlb_remove_page() happens.
-                */
-               this_cpu_write(cpu_tlbstate.is_lazy, true);
-       } else {
-               switch_mm(NULL, &init_mm, NULL);
-       }
+       this_cpu_write(cpu_tlbstate.is_lazy, true);
 }
 
 /*
This page took 0.076937 seconds and 4 git commands to generate.