]> Git Repo - J-linux.git/commitdiff
arm64: optimize flush tlb kernel range
authorKefeng Wang <[email protected]>
Mon, 23 Sep 2024 13:13:51 +0000 (21:13 +0800)
committerCatalin Marinas <[email protected]>
Wed, 16 Oct 2024 11:01:53 +0000 (12:01 +0100)
Currently the kernel TLBs is flushed page by page if the target
VA range is less than MAX_DVM_OPS * PAGE_SIZE, otherwise we'll
brutally issue a TLBI ALL.

But we could optimize it when CPU supports TLB range operations,
convert to use __flush_tlb_range_op() like other tlb range flush
to improve performance.

Co-developed-by: Yicong Yang <[email protected]>
Signed-off-by: Yicong Yang <[email protected]>
Signed-off-by: Kefeng Wang <[email protected]>
Reviewed-by: Anshuman Khandual <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Catalin Marinas <[email protected]>
arch/arm64/include/asm/tlbflush.h

index 5f5e7d1f2e7d036dd1a91b9ffcbd0f13021eda93..bc94e036a26b99b14d8acd767f61320362555dd5 100644 (file)
@@ -501,19 +501,21 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
 
 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
-       unsigned long addr;
+       const unsigned long stride = PAGE_SIZE;
+       unsigned long pages;
 
-       if ((end - start) > (MAX_DVM_OPS * PAGE_SIZE)) {
+       start = round_down(start, stride);
+       end = round_up(end, stride);
+       pages = (end - start) >> PAGE_SHIFT;
+
+       if (__flush_tlb_range_limit_excess(start, end, pages, stride)) {
                flush_tlb_all();
                return;
        }
 
-       start = __TLBI_VADDR(start, 0);
-       end = __TLBI_VADDR(end, 0);
-
        dsb(ishst);
-       for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
-               __tlbi(vaale1is, addr);
+       __flush_tlb_range_op(vaale1is, start, pages, stride, 0,
+                            TLBI_TTL_UNKNOWN, false, lpa2_is_enabled());
        dsb(ish);
        isb();
 }
This page took 0.055091 seconds and 4 git commands to generate.