1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1993 Linus Torvalds
4 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
7 * Numa awareness, Christoph Lameter, SGI, June 2005
8 * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
11 #include <linux/vmalloc.h>
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/interrupt.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/set_memory.h>
22 #include <linux/debugobjects.h>
23 #include <linux/kallsyms.h>
24 #include <linux/list.h>
25 #include <linux/notifier.h>
26 #include <linux/rbtree.h>
27 #include <linux/xarray.h>
29 #include <linux/rcupdate.h>
30 #include <linux/pfn.h>
31 #include <linux/kmemleak.h>
32 #include <linux/atomic.h>
33 #include <linux/compiler.h>
34 #include <linux/memcontrol.h>
35 #include <linux/llist.h>
36 #include <linux/uio.h>
37 #include <linux/bitops.h>
38 #include <linux/rbtree_augmented.h>
39 #include <linux/overflow.h>
40 #include <linux/pgtable.h>
41 #include <linux/hugetlb.h>
42 #include <linux/sched/mm.h>
43 #include <asm/tlbflush.h>
44 #include <asm/shmparam.h>
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/vmalloc.h>
50 #include "pgalloc-track.h"
52 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
53 static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1;
55 static int __init set_nohugeiomap(char *str)
57 ioremap_max_page_shift = PAGE_SHIFT;
60 early_param("nohugeiomap", set_nohugeiomap);
61 #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
62 static const unsigned int ioremap_max_page_shift = PAGE_SHIFT;
63 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
65 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
66 static bool __ro_after_init vmap_allow_huge = true;
68 static int __init set_nohugevmalloc(char *str)
70 vmap_allow_huge = false;
73 early_param("nohugevmalloc", set_nohugevmalloc);
74 #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
75 static const bool vmap_allow_huge = false;
76 #endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
78 bool is_vmalloc_addr(const void *x)
80 unsigned long addr = (unsigned long)kasan_reset_tag(x);
82 return addr >= VMALLOC_START && addr < VMALLOC_END;
84 EXPORT_SYMBOL(is_vmalloc_addr);
86 struct vfree_deferred {
87 struct llist_head list;
88 struct work_struct wq;
90 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
92 /*** Page table manipulation functions ***/
93 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
94 phys_addr_t phys_addr, pgprot_t prot,
95 unsigned int max_page_shift, pgtbl_mod_mask *mask)
99 unsigned long size = PAGE_SIZE;
101 pfn = phys_addr >> PAGE_SHIFT;
102 pte = pte_alloc_kernel_track(pmd, addr, mask);
106 BUG_ON(!pte_none(ptep_get(pte)));
108 #ifdef CONFIG_HUGETLB_PAGE
109 size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
110 if (size != PAGE_SIZE) {
111 pte_t entry = pfn_pte(pfn, prot);
113 entry = arch_make_huge_pte(entry, ilog2(size), 0);
114 set_huge_pte_at(&init_mm, addr, pte, entry, size);
115 pfn += PFN_DOWN(size);
119 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
121 } while (pte += PFN_DOWN(size), addr += size, addr != end);
122 *mask |= PGTBL_PTE_MODIFIED;
126 static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
127 phys_addr_t phys_addr, pgprot_t prot,
128 unsigned int max_page_shift)
130 if (max_page_shift < PMD_SHIFT)
133 if (!arch_vmap_pmd_supported(prot))
136 if ((end - addr) != PMD_SIZE)
139 if (!IS_ALIGNED(addr, PMD_SIZE))
142 if (!IS_ALIGNED(phys_addr, PMD_SIZE))
145 if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
148 return pmd_set_huge(pmd, phys_addr, prot);
151 static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
152 phys_addr_t phys_addr, pgprot_t prot,
153 unsigned int max_page_shift, pgtbl_mod_mask *mask)
158 pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
162 next = pmd_addr_end(addr, end);
164 if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
166 *mask |= PGTBL_PMD_MODIFIED;
170 if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask))
172 } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
176 static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
177 phys_addr_t phys_addr, pgprot_t prot,
178 unsigned int max_page_shift)
180 if (max_page_shift < PUD_SHIFT)
183 if (!arch_vmap_pud_supported(prot))
186 if ((end - addr) != PUD_SIZE)
189 if (!IS_ALIGNED(addr, PUD_SIZE))
192 if (!IS_ALIGNED(phys_addr, PUD_SIZE))
195 if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
198 return pud_set_huge(pud, phys_addr, prot);
201 static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
202 phys_addr_t phys_addr, pgprot_t prot,
203 unsigned int max_page_shift, pgtbl_mod_mask *mask)
208 pud = pud_alloc_track(&init_mm, p4d, addr, mask);
212 next = pud_addr_end(addr, end);
214 if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
216 *mask |= PGTBL_PUD_MODIFIED;
220 if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
221 max_page_shift, mask))
223 } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
227 static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
228 phys_addr_t phys_addr, pgprot_t prot,
229 unsigned int max_page_shift)
231 if (max_page_shift < P4D_SHIFT)
234 if (!arch_vmap_p4d_supported(prot))
237 if ((end - addr) != P4D_SIZE)
240 if (!IS_ALIGNED(addr, P4D_SIZE))
243 if (!IS_ALIGNED(phys_addr, P4D_SIZE))
246 if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
249 return p4d_set_huge(p4d, phys_addr, prot);
252 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
253 phys_addr_t phys_addr, pgprot_t prot,
254 unsigned int max_page_shift, pgtbl_mod_mask *mask)
259 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
263 next = p4d_addr_end(addr, end);
265 if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
267 *mask |= PGTBL_P4D_MODIFIED;
271 if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
272 max_page_shift, mask))
274 } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
278 static int vmap_range_noflush(unsigned long addr, unsigned long end,
279 phys_addr_t phys_addr, pgprot_t prot,
280 unsigned int max_page_shift)
286 pgtbl_mod_mask mask = 0;
292 pgd = pgd_offset_k(addr);
294 next = pgd_addr_end(addr, end);
295 err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
296 max_page_shift, &mask);
299 } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
301 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
302 arch_sync_kernel_mappings(start, end);
307 int vmap_page_range(unsigned long addr, unsigned long end,
308 phys_addr_t phys_addr, pgprot_t prot)
312 err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
313 ioremap_max_page_shift);
314 flush_cache_vmap(addr, end);
316 err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
317 ioremap_max_page_shift);
321 int ioremap_page_range(unsigned long addr, unsigned long end,
322 phys_addr_t phys_addr, pgprot_t prot)
324 struct vm_struct *area;
326 area = find_vm_area((void *)addr);
327 if (!area || !(area->flags & VM_IOREMAP)) {
328 WARN_ONCE(1, "vm_area at addr %lx is not marked as VM_IOREMAP\n", addr);
331 if (addr != (unsigned long)area->addr ||
332 (void *)end != area->addr + get_vm_area_size(area)) {
333 WARN_ONCE(1, "ioremap request [%lx,%lx) doesn't match vm_area [%lx, %lx)\n",
334 addr, end, (long)area->addr,
335 (long)area->addr + get_vm_area_size(area));
338 return vmap_page_range(addr, end, phys_addr, prot);
341 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
342 pgtbl_mod_mask *mask)
346 pte = pte_offset_kernel(pmd, addr);
348 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
349 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
350 } while (pte++, addr += PAGE_SIZE, addr != end);
351 *mask |= PGTBL_PTE_MODIFIED;
354 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
355 pgtbl_mod_mask *mask)
361 pmd = pmd_offset(pud, addr);
363 next = pmd_addr_end(addr, end);
365 cleared = pmd_clear_huge(pmd);
366 if (cleared || pmd_bad(*pmd))
367 *mask |= PGTBL_PMD_MODIFIED;
371 if (pmd_none_or_clear_bad(pmd))
373 vunmap_pte_range(pmd, addr, next, mask);
376 } while (pmd++, addr = next, addr != end);
379 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
380 pgtbl_mod_mask *mask)
386 pud = pud_offset(p4d, addr);
388 next = pud_addr_end(addr, end);
390 cleared = pud_clear_huge(pud);
391 if (cleared || pud_bad(*pud))
392 *mask |= PGTBL_PUD_MODIFIED;
396 if (pud_none_or_clear_bad(pud))
398 vunmap_pmd_range(pud, addr, next, mask);
399 } while (pud++, addr = next, addr != end);
402 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
403 pgtbl_mod_mask *mask)
408 p4d = p4d_offset(pgd, addr);
410 next = p4d_addr_end(addr, end);
414 *mask |= PGTBL_P4D_MODIFIED;
416 if (p4d_none_or_clear_bad(p4d))
418 vunmap_pud_range(p4d, addr, next, mask);
419 } while (p4d++, addr = next, addr != end);
423 * vunmap_range_noflush is similar to vunmap_range, but does not
424 * flush caches or TLBs.
426 * The caller is responsible for calling flush_cache_vmap() before calling
427 * this function, and flush_tlb_kernel_range after it has returned
428 * successfully (and before the addresses are expected to cause a page fault
429 * or be re-mapped for something else, if TLB flushes are being delayed or
432 * This is an internal function only. Do not use outside mm/.
434 void __vunmap_range_noflush(unsigned long start, unsigned long end)
438 unsigned long addr = start;
439 pgtbl_mod_mask mask = 0;
442 pgd = pgd_offset_k(addr);
444 next = pgd_addr_end(addr, end);
446 mask |= PGTBL_PGD_MODIFIED;
447 if (pgd_none_or_clear_bad(pgd))
449 vunmap_p4d_range(pgd, addr, next, &mask);
450 } while (pgd++, addr = next, addr != end);
452 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
453 arch_sync_kernel_mappings(start, end);
456 void vunmap_range_noflush(unsigned long start, unsigned long end)
458 kmsan_vunmap_range_noflush(start, end);
459 __vunmap_range_noflush(start, end);
463 * vunmap_range - unmap kernel virtual addresses
464 * @addr: start of the VM area to unmap
465 * @end: end of the VM area to unmap (non-inclusive)
467 * Clears any present PTEs in the virtual address range, flushes TLBs and
468 * caches. Any subsequent access to the address before it has been re-mapped
471 void vunmap_range(unsigned long addr, unsigned long end)
473 flush_cache_vunmap(addr, end);
474 vunmap_range_noflush(addr, end);
475 flush_tlb_kernel_range(addr, end);
478 static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
479 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
480 pgtbl_mod_mask *mask)
485 * nr is a running index into the array which helps higher level
486 * callers keep track of where we're up to.
489 pte = pte_alloc_kernel_track(pmd, addr, mask);
493 struct page *page = pages[*nr];
495 if (WARN_ON(!pte_none(ptep_get(pte))))
499 if (WARN_ON(!pfn_valid(page_to_pfn(page))))
502 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
504 } while (pte++, addr += PAGE_SIZE, addr != end);
505 *mask |= PGTBL_PTE_MODIFIED;
509 static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
510 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
511 pgtbl_mod_mask *mask)
516 pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
520 next = pmd_addr_end(addr, end);
521 if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask))
523 } while (pmd++, addr = next, addr != end);
527 static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
528 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
529 pgtbl_mod_mask *mask)
534 pud = pud_alloc_track(&init_mm, p4d, addr, mask);
538 next = pud_addr_end(addr, end);
539 if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask))
541 } while (pud++, addr = next, addr != end);
545 static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
546 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
547 pgtbl_mod_mask *mask)
552 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
556 next = p4d_addr_end(addr, end);
557 if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
559 } while (p4d++, addr = next, addr != end);
563 static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
564 pgprot_t prot, struct page **pages)
566 unsigned long start = addr;
571 pgtbl_mod_mask mask = 0;
574 pgd = pgd_offset_k(addr);
576 next = pgd_addr_end(addr, end);
578 mask |= PGTBL_PGD_MODIFIED;
579 err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
582 } while (pgd++, addr = next, addr != end);
584 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
585 arch_sync_kernel_mappings(start, end);
591 * vmap_pages_range_noflush is similar to vmap_pages_range, but does not
594 * The caller is responsible for calling flush_cache_vmap() after this
595 * function returns successfully and before the addresses are accessed.
597 * This is an internal function only. Do not use outside mm/.
599 int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
600 pgprot_t prot, struct page **pages, unsigned int page_shift)
602 unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
604 WARN_ON(page_shift < PAGE_SHIFT);
606 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) ||
607 page_shift == PAGE_SHIFT)
608 return vmap_small_pages_range_noflush(addr, end, prot, pages);
610 for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
613 err = vmap_range_noflush(addr, addr + (1UL << page_shift),
614 page_to_phys(pages[i]), prot,
619 addr += 1UL << page_shift;
625 int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
626 pgprot_t prot, struct page **pages, unsigned int page_shift)
628 int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages,
633 return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
637 * vmap_pages_range - map pages to a kernel virtual address
638 * @addr: start of the VM area to map
639 * @end: end of the VM area to map (non-inclusive)
640 * @prot: page protection flags to use
641 * @pages: pages to map (always PAGE_SIZE pages)
642 * @page_shift: maximum shift that the pages may be mapped with, @pages must
643 * be aligned and contiguous up to at least this shift.
646 * 0 on success, -errno on failure.
648 static int vmap_pages_range(unsigned long addr, unsigned long end,
649 pgprot_t prot, struct page **pages, unsigned int page_shift)
653 err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
654 flush_cache_vmap(addr, end);
658 static int check_sparse_vm_area(struct vm_struct *area, unsigned long start,
662 if (WARN_ON_ONCE(area->flags & VM_FLUSH_RESET_PERMS))
664 if (WARN_ON_ONCE(area->flags & VM_NO_GUARD))
666 if (WARN_ON_ONCE(!(area->flags & VM_SPARSE)))
668 if ((end - start) >> PAGE_SHIFT > totalram_pages())
670 if (start < (unsigned long)area->addr ||
671 (void *)end > area->addr + get_vm_area_size(area))
677 * vm_area_map_pages - map pages inside given sparse vm_area
679 * @start: start address inside vm_area
680 * @end: end address inside vm_area
681 * @pages: pages to map (always PAGE_SIZE pages)
683 int vm_area_map_pages(struct vm_struct *area, unsigned long start,
684 unsigned long end, struct page **pages)
688 err = check_sparse_vm_area(area, start, end);
692 return vmap_pages_range(start, end, PAGE_KERNEL, pages, PAGE_SHIFT);
696 * vm_area_unmap_pages - unmap pages inside given sparse vm_area
698 * @start: start address inside vm_area
699 * @end: end address inside vm_area
701 void vm_area_unmap_pages(struct vm_struct *area, unsigned long start,
704 if (check_sparse_vm_area(area, start, end))
707 vunmap_range(start, end);
710 int is_vmalloc_or_module_addr(const void *x)
713 * ARM, x86-64 and sparc64 put modules in a special place,
714 * and fall back on vmalloc() if that fails. Others
715 * just put it in the vmalloc space.
717 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
718 unsigned long addr = (unsigned long)kasan_reset_tag(x);
719 if (addr >= MODULES_VADDR && addr < MODULES_END)
722 return is_vmalloc_addr(x);
724 EXPORT_SYMBOL_GPL(is_vmalloc_or_module_addr);
727 * Walk a vmap address to the struct page it maps. Huge vmap mappings will
728 * return the tail page that corresponds to the base page address, which
729 * matches small vmap mappings.
731 struct page *vmalloc_to_page(const void *vmalloc_addr)
733 unsigned long addr = (unsigned long) vmalloc_addr;
734 struct page *page = NULL;
735 pgd_t *pgd = pgd_offset_k(addr);
742 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
743 * architectures that do not vmalloc module space
745 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
749 if (WARN_ON_ONCE(pgd_leaf(*pgd)))
750 return NULL; /* XXX: no allowance for huge pgd */
751 if (WARN_ON_ONCE(pgd_bad(*pgd)))
754 p4d = p4d_offset(pgd, addr);
758 return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
759 if (WARN_ON_ONCE(p4d_bad(*p4d)))
762 pud = pud_offset(p4d, addr);
766 return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
767 if (WARN_ON_ONCE(pud_bad(*pud)))
770 pmd = pmd_offset(pud, addr);
774 return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
775 if (WARN_ON_ONCE(pmd_bad(*pmd)))
778 ptep = pte_offset_kernel(pmd, addr);
779 pte = ptep_get(ptep);
780 if (pte_present(pte))
781 page = pte_page(pte);
785 EXPORT_SYMBOL(vmalloc_to_page);
788 * Map a vmalloc()-space virtual address to the physical page frame number.
790 unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
792 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
794 EXPORT_SYMBOL(vmalloc_to_pfn);
797 /*** Global kva allocator ***/
799 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
800 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
803 static DEFINE_SPINLOCK(free_vmap_area_lock);
804 static bool vmap_initialized __read_mostly;
807 * This kmem_cache is used for vmap_area objects. Instead of
808 * allocating from slab we reuse an object from this cache to
809 * make things faster. Especially in "no edge" splitting of
812 static struct kmem_cache *vmap_area_cachep;
815 * This linked list is used in pair with free_vmap_area_root.
816 * It gives O(1) access to prev/next to perform fast coalescing.
818 static LIST_HEAD(free_vmap_area_list);
821 * This augment red-black tree represents the free vmap space.
822 * All vmap_area objects in this tree are sorted by va->va_start
823 * address. It is used for allocation and merging when a vmap
824 * object is released.
826 * Each vmap_area node contains a maximum available free block
827 * of its sub-tree, right or left. Therefore it is possible to
828 * find a lowest match of free area.
830 static struct rb_root free_vmap_area_root = RB_ROOT;
833 * Preload a CPU with one object for "no edge" split case. The
834 * aim is to get rid of allocations from the atomic context, thus
835 * to use more permissive allocation masks.
837 static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
840 * This structure defines a single, solid model where a list and
841 * rb-tree are part of one entity protected by the lock. Nodes are
842 * sorted in ascending order, thus for O(1) access to left/right
843 * neighbors a list is used as well as for sequential traversal.
847 struct list_head head;
852 * A fast size storage contains VAs up to 1M size. A pool consists
853 * of linked between each other ready to go VAs of certain sizes.
854 * An index in the pool-array corresponds to number of pages + 1.
856 #define MAX_VA_SIZE_PAGES 256
859 struct list_head head;
864 * An effective vmap-node logic. Users make use of nodes instead
865 * of a global heap. It allows to balance an access and mitigate
868 static struct vmap_node {
869 /* Simple size segregated storage. */
870 struct vmap_pool pool[MAX_VA_SIZE_PAGES];
871 spinlock_t pool_lock;
874 /* Bookkeeping data of this node. */
879 * Ready-to-free areas.
881 struct list_head purge_list;
882 struct work_struct purge_work;
883 unsigned long nr_purged;
887 * Initial setup consists of one single node, i.e. a balancing
888 * is fully disabled. Later on, after vmap is initialized these
889 * parameters are updated based on a system capacity.
891 static struct vmap_node *vmap_nodes = &single;
892 static __read_mostly unsigned int nr_vmap_nodes = 1;
893 static __read_mostly unsigned int vmap_zone_size = 1;
895 static inline unsigned int
896 addr_to_node_id(unsigned long addr)
898 return (addr / vmap_zone_size) % nr_vmap_nodes;
901 static inline struct vmap_node *
902 addr_to_node(unsigned long addr)
904 return &vmap_nodes[addr_to_node_id(addr)];
907 static inline struct vmap_node *
908 id_to_node(unsigned int id)
910 return &vmap_nodes[id % nr_vmap_nodes];
914 * We use the value 0 to represent "no node", that is why
915 * an encoded value will be the node-id incremented by 1.
916 * It is always greater then 0. A valid node_id which can
917 * be encoded is [0:nr_vmap_nodes - 1]. If a passed node_id
918 * is not valid 0 is returned.
921 encode_vn_id(unsigned int node_id)
923 /* Can store U8_MAX [0:254] nodes. */
924 if (node_id < nr_vmap_nodes)
925 return (node_id + 1) << BITS_PER_BYTE;
927 /* Warn and no node encoded. */
928 WARN_ONCE(1, "Encode wrong node id (%u)\n", node_id);
933 * Returns an encoded node-id, the valid range is within
934 * [0:nr_vmap_nodes-1] values. Otherwise nr_vmap_nodes is
935 * returned if extracted data is wrong.
938 decode_vn_id(unsigned int val)
940 unsigned int node_id = (val >> BITS_PER_BYTE) - 1;
942 /* Can store U8_MAX [0:254] nodes. */
943 if (node_id < nr_vmap_nodes)
946 /* If it was _not_ zero, warn. */
947 WARN_ONCE(node_id != UINT_MAX,
948 "Decode wrong node id (%d)\n", node_id);
950 return nr_vmap_nodes;
954 is_vn_id_valid(unsigned int node_id)
956 if (node_id < nr_vmap_nodes)
962 static __always_inline unsigned long
963 va_size(struct vmap_area *va)
965 return (va->va_end - va->va_start);
968 static __always_inline unsigned long
969 get_subtree_max_size(struct rb_node *node)
971 struct vmap_area *va;
973 va = rb_entry_safe(node, struct vmap_area, rb_node);
974 return va ? va->subtree_max_size : 0;
977 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
978 struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
980 static void reclaim_and_purge_vmap_areas(void);
981 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
982 static void drain_vmap_area_work(struct work_struct *work);
983 static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work);
985 static atomic_long_t nr_vmalloc_pages;
987 unsigned long vmalloc_nr_pages(void)
989 return atomic_long_read(&nr_vmalloc_pages);
992 static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root)
994 struct rb_node *n = root->rb_node;
996 addr = (unsigned long)kasan_reset_tag((void *)addr);
999 struct vmap_area *va;
1001 va = rb_entry(n, struct vmap_area, rb_node);
1002 if (addr < va->va_start)
1004 else if (addr >= va->va_end)
1013 /* Look up the first VA which satisfies addr < va_end, NULL if none. */
1014 static struct vmap_area *
1015 __find_vmap_area_exceed_addr(unsigned long addr, struct rb_root *root)
1017 struct vmap_area *va = NULL;
1018 struct rb_node *n = root->rb_node;
1020 addr = (unsigned long)kasan_reset_tag((void *)addr);
1023 struct vmap_area *tmp;
1025 tmp = rb_entry(n, struct vmap_area, rb_node);
1026 if (tmp->va_end > addr) {
1028 if (tmp->va_start <= addr)
1040 * Returns a node where a first VA, that satisfies addr < va_end, resides.
1041 * If success, a node is locked. A user is responsible to unlock it when a
1042 * VA is no longer needed to be accessed.
1044 * Returns NULL if nothing found.
1046 static struct vmap_node *
1047 find_vmap_area_exceed_addr_lock(unsigned long addr, struct vmap_area **va)
1049 unsigned long va_start_lowest;
1050 struct vmap_node *vn;
1054 for (i = 0, va_start_lowest = 0; i < nr_vmap_nodes; i++) {
1055 vn = &vmap_nodes[i];
1057 spin_lock(&vn->busy.lock);
1058 *va = __find_vmap_area_exceed_addr(addr, &vn->busy.root);
1061 if (!va_start_lowest || (*va)->va_start < va_start_lowest)
1062 va_start_lowest = (*va)->va_start;
1063 spin_unlock(&vn->busy.lock);
1067 * Check if found VA exists, it might have gone away. In this case we
1068 * repeat the search because a VA has been removed concurrently and we
1069 * need to proceed to the next one, which is a rare case.
1071 if (va_start_lowest) {
1072 vn = addr_to_node(va_start_lowest);
1074 spin_lock(&vn->busy.lock);
1075 *va = __find_vmap_area(va_start_lowest, &vn->busy.root);
1080 spin_unlock(&vn->busy.lock);
1088 * This function returns back addresses of parent node
1089 * and its left or right link for further processing.
1091 * Otherwise NULL is returned. In that case all further
1092 * steps regarding inserting of conflicting overlap range
1093 * have to be declined and actually considered as a bug.
1095 static __always_inline struct rb_node **
1096 find_va_links(struct vmap_area *va,
1097 struct rb_root *root, struct rb_node *from,
1098 struct rb_node **parent)
1100 struct vmap_area *tmp_va;
1101 struct rb_node **link;
1104 link = &root->rb_node;
1105 if (unlikely(!*link)) {
1114 * Go to the bottom of the tree. When we hit the last point
1115 * we end up with parent rb_node and correct direction, i name
1116 * it link, where the new va->rb_node will be attached to.
1119 tmp_va = rb_entry(*link, struct vmap_area, rb_node);
1122 * During the traversal we also do some sanity check.
1123 * Trigger the BUG() if there are sides(left/right)
1126 if (va->va_end <= tmp_va->va_start)
1127 link = &(*link)->rb_left;
1128 else if (va->va_start >= tmp_va->va_end)
1129 link = &(*link)->rb_right;
1131 WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
1132 va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
1138 *parent = &tmp_va->rb_node;
1142 static __always_inline struct list_head *
1143 get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
1145 struct list_head *list;
1147 if (unlikely(!parent))
1149 * The red-black tree where we try to find VA neighbors
1150 * before merging or inserting is empty, i.e. it means
1151 * there is no free vmap space. Normally it does not
1152 * happen but we handle this case anyway.
1156 list = &rb_entry(parent, struct vmap_area, rb_node)->list;
1157 return (&parent->rb_right == link ? list->next : list);
1160 static __always_inline void
1161 __link_va(struct vmap_area *va, struct rb_root *root,
1162 struct rb_node *parent, struct rb_node **link,
1163 struct list_head *head, bool augment)
1166 * VA is still not in the list, but we can
1167 * identify its future previous list_head node.
1169 if (likely(parent)) {
1170 head = &rb_entry(parent, struct vmap_area, rb_node)->list;
1171 if (&parent->rb_right != link)
1175 /* Insert to the rb-tree */
1176 rb_link_node(&va->rb_node, parent, link);
1179 * Some explanation here. Just perform simple insertion
1180 * to the tree. We do not set va->subtree_max_size to
1181 * its current size before calling rb_insert_augmented().
1182 * It is because we populate the tree from the bottom
1183 * to parent levels when the node _is_ in the tree.
1185 * Therefore we set subtree_max_size to zero after insertion,
1186 * to let __augment_tree_propagate_from() puts everything to
1187 * the correct order later on.
1189 rb_insert_augmented(&va->rb_node,
1190 root, &free_vmap_area_rb_augment_cb);
1191 va->subtree_max_size = 0;
1193 rb_insert_color(&va->rb_node, root);
1196 /* Address-sort this list */
1197 list_add(&va->list, head);
1200 static __always_inline void
1201 link_va(struct vmap_area *va, struct rb_root *root,
1202 struct rb_node *parent, struct rb_node **link,
1203 struct list_head *head)
1205 __link_va(va, root, parent, link, head, false);
1208 static __always_inline void
1209 link_va_augment(struct vmap_area *va, struct rb_root *root,
1210 struct rb_node *parent, struct rb_node **link,
1211 struct list_head *head)
1213 __link_va(va, root, parent, link, head, true);
1216 static __always_inline void
1217 __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment)
1219 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
1223 rb_erase_augmented(&va->rb_node,
1224 root, &free_vmap_area_rb_augment_cb);
1226 rb_erase(&va->rb_node, root);
1228 list_del_init(&va->list);
1229 RB_CLEAR_NODE(&va->rb_node);
1232 static __always_inline void
1233 unlink_va(struct vmap_area *va, struct rb_root *root)
1235 __unlink_va(va, root, false);
1238 static __always_inline void
1239 unlink_va_augment(struct vmap_area *va, struct rb_root *root)
1241 __unlink_va(va, root, true);
1244 #if DEBUG_AUGMENT_PROPAGATE_CHECK
1246 * Gets called when remove the node and rotate.
1248 static __always_inline unsigned long
1249 compute_subtree_max_size(struct vmap_area *va)
1251 return max3(va_size(va),
1252 get_subtree_max_size(va->rb_node.rb_left),
1253 get_subtree_max_size(va->rb_node.rb_right));
1257 augment_tree_propagate_check(void)
1259 struct vmap_area *va;
1260 unsigned long computed_size;
1262 list_for_each_entry(va, &free_vmap_area_list, list) {
1263 computed_size = compute_subtree_max_size(va);
1264 if (computed_size != va->subtree_max_size)
1265 pr_emerg("tree is corrupted: %lu, %lu\n",
1266 va_size(va), va->subtree_max_size);
1272 * This function populates subtree_max_size from bottom to upper
1273 * levels starting from VA point. The propagation must be done
1274 * when VA size is modified by changing its va_start/va_end. Or
1275 * in case of newly inserting of VA to the tree.
1277 * It means that __augment_tree_propagate_from() must be called:
1278 * - After VA has been inserted to the tree(free path);
1279 * - After VA has been shrunk(allocation path);
1280 * - After VA has been increased(merging path).
1282 * Please note that, it does not mean that upper parent nodes
1283 * and their subtree_max_size are recalculated all the time up
1292 * For example if we modify the node 4, shrinking it to 2, then
1293 * no any modification is required. If we shrink the node 2 to 1
1294 * its subtree_max_size is updated only, and set to 1. If we shrink
1295 * the node 8 to 6, then its subtree_max_size is set to 6 and parent
1296 * node becomes 4--6.
1298 static __always_inline void
1299 augment_tree_propagate_from(struct vmap_area *va)
1302 * Populate the tree from bottom towards the root until
1303 * the calculated maximum available size of checked node
1304 * is equal to its current one.
1306 free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
1308 #if DEBUG_AUGMENT_PROPAGATE_CHECK
1309 augment_tree_propagate_check();
1314 insert_vmap_area(struct vmap_area *va,
1315 struct rb_root *root, struct list_head *head)
1317 struct rb_node **link;
1318 struct rb_node *parent;
1320 link = find_va_links(va, root, NULL, &parent);
1322 link_va(va, root, parent, link, head);
1326 insert_vmap_area_augment(struct vmap_area *va,
1327 struct rb_node *from, struct rb_root *root,
1328 struct list_head *head)
1330 struct rb_node **link;
1331 struct rb_node *parent;
1334 link = find_va_links(va, NULL, from, &parent);
1336 link = find_va_links(va, root, NULL, &parent);
1339 link_va_augment(va, root, parent, link, head);
1340 augment_tree_propagate_from(va);
1345 * Merge de-allocated chunk of VA memory with previous
1346 * and next free blocks. If coalesce is not done a new
1347 * free area is inserted. If VA has been merged, it is
1350 * Please note, it can return NULL in case of overlap
1351 * ranges, followed by WARN() report. Despite it is a
1352 * buggy behaviour, a system can be alive and keep
1355 static __always_inline struct vmap_area *
1356 __merge_or_add_vmap_area(struct vmap_area *va,
1357 struct rb_root *root, struct list_head *head, bool augment)
1359 struct vmap_area *sibling;
1360 struct list_head *next;
1361 struct rb_node **link;
1362 struct rb_node *parent;
1363 bool merged = false;
1366 * Find a place in the tree where VA potentially will be
1367 * inserted, unless it is merged with its sibling/siblings.
1369 link = find_va_links(va, root, NULL, &parent);
1374 * Get next node of VA to check if merging can be done.
1376 next = get_va_next_sibling(parent, link);
1377 if (unlikely(next == NULL))
1383 * |<------VA------>|<-----Next----->|
1388 sibling = list_entry(next, struct vmap_area, list);
1389 if (sibling->va_start == va->va_end) {
1390 sibling->va_start = va->va_start;
1392 /* Free vmap_area object. */
1393 kmem_cache_free(vmap_area_cachep, va);
1395 /* Point to the new merged area. */
1404 * |<-----Prev----->|<------VA------>|
1408 if (next->prev != head) {
1409 sibling = list_entry(next->prev, struct vmap_area, list);
1410 if (sibling->va_end == va->va_start) {
1412 * If both neighbors are coalesced, it is important
1413 * to unlink the "next" node first, followed by merging
1414 * with "previous" one. Otherwise the tree might not be
1415 * fully populated if a sibling's augmented value is
1416 * "normalized" because of rotation operations.
1419 __unlink_va(va, root, augment);
1421 sibling->va_end = va->va_end;
1423 /* Free vmap_area object. */
1424 kmem_cache_free(vmap_area_cachep, va);
1426 /* Point to the new merged area. */
1434 __link_va(va, root, parent, link, head, augment);
1439 static __always_inline struct vmap_area *
1440 merge_or_add_vmap_area(struct vmap_area *va,
1441 struct rb_root *root, struct list_head *head)
1443 return __merge_or_add_vmap_area(va, root, head, false);
1446 static __always_inline struct vmap_area *
1447 merge_or_add_vmap_area_augment(struct vmap_area *va,
1448 struct rb_root *root, struct list_head *head)
1450 va = __merge_or_add_vmap_area(va, root, head, true);
1452 augment_tree_propagate_from(va);
1457 static __always_inline bool
1458 is_within_this_va(struct vmap_area *va, unsigned long size,
1459 unsigned long align, unsigned long vstart)
1461 unsigned long nva_start_addr;
1463 if (va->va_start > vstart)
1464 nva_start_addr = ALIGN(va->va_start, align);
1466 nva_start_addr = ALIGN(vstart, align);
1468 /* Can be overflowed due to big size or alignment. */
1469 if (nva_start_addr + size < nva_start_addr ||
1470 nva_start_addr < vstart)
1473 return (nva_start_addr + size <= va->va_end);
1477 * Find the first free block(lowest start address) in the tree,
1478 * that will accomplish the request corresponding to passing
1479 * parameters. Please note, with an alignment bigger than PAGE_SIZE,
1480 * a search length is adjusted to account for worst case alignment
1483 static __always_inline struct vmap_area *
1484 find_vmap_lowest_match(struct rb_root *root, unsigned long size,
1485 unsigned long align, unsigned long vstart, bool adjust_search_size)
1487 struct vmap_area *va;
1488 struct rb_node *node;
1489 unsigned long length;
1491 /* Start from the root. */
1492 node = root->rb_node;
1494 /* Adjust the search size for alignment overhead. */
1495 length = adjust_search_size ? size + align - 1 : size;
1498 va = rb_entry(node, struct vmap_area, rb_node);
1500 if (get_subtree_max_size(node->rb_left) >= length &&
1501 vstart < va->va_start) {
1502 node = node->rb_left;
1504 if (is_within_this_va(va, size, align, vstart))
1508 * Does not make sense to go deeper towards the right
1509 * sub-tree if it does not have a free block that is
1510 * equal or bigger to the requested search length.
1512 if (get_subtree_max_size(node->rb_right) >= length) {
1513 node = node->rb_right;
1518 * OK. We roll back and find the first right sub-tree,
1519 * that will satisfy the search criteria. It can happen
1520 * due to "vstart" restriction or an alignment overhead
1521 * that is bigger then PAGE_SIZE.
1523 while ((node = rb_parent(node))) {
1524 va = rb_entry(node, struct vmap_area, rb_node);
1525 if (is_within_this_va(va, size, align, vstart))
1528 if (get_subtree_max_size(node->rb_right) >= length &&
1529 vstart <= va->va_start) {
1531 * Shift the vstart forward. Please note, we update it with
1532 * parent's start address adding "1" because we do not want
1533 * to enter same sub-tree after it has already been checked
1534 * and no suitable free block found there.
1536 vstart = va->va_start + 1;
1537 node = node->rb_right;
1547 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1548 #include <linux/random.h>
1550 static struct vmap_area *
1551 find_vmap_lowest_linear_match(struct list_head *head, unsigned long size,
1552 unsigned long align, unsigned long vstart)
1554 struct vmap_area *va;
1556 list_for_each_entry(va, head, list) {
1557 if (!is_within_this_va(va, size, align, vstart))
1567 find_vmap_lowest_match_check(struct rb_root *root, struct list_head *head,
1568 unsigned long size, unsigned long align)
1570 struct vmap_area *va_1, *va_2;
1571 unsigned long vstart;
1574 get_random_bytes(&rnd, sizeof(rnd));
1575 vstart = VMALLOC_START + rnd;
1577 va_1 = find_vmap_lowest_match(root, size, align, vstart, false);
1578 va_2 = find_vmap_lowest_linear_match(head, size, align, vstart);
1581 pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
1582 va_1, va_2, vstart);
1588 FL_FIT_TYPE = 1, /* full fit */
1589 LE_FIT_TYPE = 2, /* left edge fit */
1590 RE_FIT_TYPE = 3, /* right edge fit */
1591 NE_FIT_TYPE = 4 /* no edge fit */
1594 static __always_inline enum fit_type
1595 classify_va_fit_type(struct vmap_area *va,
1596 unsigned long nva_start_addr, unsigned long size)
1600 /* Check if it is within VA. */
1601 if (nva_start_addr < va->va_start ||
1602 nva_start_addr + size > va->va_end)
1606 if (va->va_start == nva_start_addr) {
1607 if (va->va_end == nva_start_addr + size)
1611 } else if (va->va_end == nva_start_addr + size) {
1620 static __always_inline int
1621 va_clip(struct rb_root *root, struct list_head *head,
1622 struct vmap_area *va, unsigned long nva_start_addr,
1625 struct vmap_area *lva = NULL;
1626 enum fit_type type = classify_va_fit_type(va, nva_start_addr, size);
1628 if (type == FL_FIT_TYPE) {
1630 * No need to split VA, it fully fits.
1636 unlink_va_augment(va, root);
1637 kmem_cache_free(vmap_area_cachep, va);
1638 } else if (type == LE_FIT_TYPE) {
1640 * Split left edge of fit VA.
1646 va->va_start += size;
1647 } else if (type == RE_FIT_TYPE) {
1649 * Split right edge of fit VA.
1655 va->va_end = nva_start_addr;
1656 } else if (type == NE_FIT_TYPE) {
1658 * Split no edge of fit VA.
1664 lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
1665 if (unlikely(!lva)) {
1667 * For percpu allocator we do not do any pre-allocation
1668 * and leave it as it is. The reason is it most likely
1669 * never ends up with NE_FIT_TYPE splitting. In case of
1670 * percpu allocations offsets and sizes are aligned to
1671 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
1672 * are its main fitting cases.
1674 * There are a few exceptions though, as an example it is
1675 * a first allocation (early boot up) when we have "one"
1676 * big free space that has to be split.
1678 * Also we can hit this path in case of regular "vmap"
1679 * allocations, if "this" current CPU was not preloaded.
1680 * See the comment in alloc_vmap_area() why. If so, then
1681 * GFP_NOWAIT is used instead to get an extra object for
1682 * split purpose. That is rare and most time does not
1685 * What happens if an allocation gets failed. Basically,
1686 * an "overflow" path is triggered to purge lazily freed
1687 * areas to free some memory, then, the "retry" path is
1688 * triggered to repeat one more time. See more details
1689 * in alloc_vmap_area() function.
1691 lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
1697 * Build the remainder.
1699 lva->va_start = va->va_start;
1700 lva->va_end = nva_start_addr;
1703 * Shrink this VA to remaining size.
1705 va->va_start = nva_start_addr + size;
1710 if (type != FL_FIT_TYPE) {
1711 augment_tree_propagate_from(va);
1713 if (lva) /* type == NE_FIT_TYPE */
1714 insert_vmap_area_augment(lva, &va->rb_node, root, head);
1720 static unsigned long
1721 va_alloc(struct vmap_area *va,
1722 struct rb_root *root, struct list_head *head,
1723 unsigned long size, unsigned long align,
1724 unsigned long vstart, unsigned long vend)
1726 unsigned long nva_start_addr;
1729 if (va->va_start > vstart)
1730 nva_start_addr = ALIGN(va->va_start, align);
1732 nva_start_addr = ALIGN(vstart, align);
1734 /* Check the "vend" restriction. */
1735 if (nva_start_addr + size > vend)
1738 /* Update the free vmap_area. */
1739 ret = va_clip(root, head, va, nva_start_addr, size);
1740 if (WARN_ON_ONCE(ret))
1743 return nva_start_addr;
1747 * Returns a start address of the newly allocated area, if success.
1748 * Otherwise a vend is returned that indicates failure.
1750 static __always_inline unsigned long
1751 __alloc_vmap_area(struct rb_root *root, struct list_head *head,
1752 unsigned long size, unsigned long align,
1753 unsigned long vstart, unsigned long vend)
1755 bool adjust_search_size = true;
1756 unsigned long nva_start_addr;
1757 struct vmap_area *va;
1760 * Do not adjust when:
1761 * a) align <= PAGE_SIZE, because it does not make any sense.
1762 * All blocks(their start addresses) are at least PAGE_SIZE
1764 * b) a short range where a requested size corresponds to exactly
1765 * specified [vstart:vend] interval and an alignment > PAGE_SIZE.
1766 * With adjusted search length an allocation would not succeed.
1768 if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size))
1769 adjust_search_size = false;
1771 va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
1775 nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend);
1776 if (nva_start_addr == vend)
1779 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1780 find_vmap_lowest_match_check(root, head, size, align);
1783 return nva_start_addr;
1787 * Free a region of KVA allocated by alloc_vmap_area
1789 static void free_vmap_area(struct vmap_area *va)
1791 struct vmap_node *vn = addr_to_node(va->va_start);
1794 * Remove from the busy tree/list.
1796 spin_lock(&vn->busy.lock);
1797 unlink_va(va, &vn->busy.root);
1798 spin_unlock(&vn->busy.lock);
1801 * Insert/Merge it back to the free tree/list.
1803 spin_lock(&free_vmap_area_lock);
1804 merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list);
1805 spin_unlock(&free_vmap_area_lock);
1809 preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)
1811 struct vmap_area *va = NULL;
1814 * Preload this CPU with one extra vmap_area object. It is used
1815 * when fit type of free area is NE_FIT_TYPE. It guarantees that
1816 * a CPU that does an allocation is preloaded.
1818 * We do it in non-atomic context, thus it allows us to use more
1819 * permissive allocation masks to be more stable under low memory
1820 * condition and high memory pressure.
1822 if (!this_cpu_read(ne_fit_preload_node))
1823 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1827 if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va))
1828 kmem_cache_free(vmap_area_cachep, va);
1831 static struct vmap_pool *
1832 size_to_va_pool(struct vmap_node *vn, unsigned long size)
1834 unsigned int idx = (size - 1) / PAGE_SIZE;
1836 if (idx < MAX_VA_SIZE_PAGES)
1837 return &vn->pool[idx];
1843 node_pool_add_va(struct vmap_node *n, struct vmap_area *va)
1845 struct vmap_pool *vp;
1847 vp = size_to_va_pool(n, va_size(va));
1851 spin_lock(&n->pool_lock);
1852 list_add(&va->list, &vp->head);
1853 WRITE_ONCE(vp->len, vp->len + 1);
1854 spin_unlock(&n->pool_lock);
1859 static struct vmap_area *
1860 node_pool_del_va(struct vmap_node *vn, unsigned long size,
1861 unsigned long align, unsigned long vstart,
1864 struct vmap_area *va = NULL;
1865 struct vmap_pool *vp;
1868 vp = size_to_va_pool(vn, size);
1869 if (!vp || list_empty(&vp->head))
1872 spin_lock(&vn->pool_lock);
1873 if (!list_empty(&vp->head)) {
1874 va = list_first_entry(&vp->head, struct vmap_area, list);
1876 if (IS_ALIGNED(va->va_start, align)) {
1878 * Do some sanity check and emit a warning
1879 * if one of below checks detects an error.
1881 err |= (va_size(va) != size);
1882 err |= (va->va_start < vstart);
1883 err |= (va->va_end > vend);
1885 if (!WARN_ON_ONCE(err)) {
1886 list_del_init(&va->list);
1887 WRITE_ONCE(vp->len, vp->len - 1);
1892 list_move_tail(&va->list, &vp->head);
1896 spin_unlock(&vn->pool_lock);
1901 static struct vmap_area *
1902 node_alloc(unsigned long size, unsigned long align,
1903 unsigned long vstart, unsigned long vend,
1904 unsigned long *addr, unsigned int *vn_id)
1906 struct vmap_area *va;
1912 * Fallback to a global heap if not vmalloc or there
1915 if (vstart != VMALLOC_START || vend != VMALLOC_END ||
1919 *vn_id = raw_smp_processor_id() % nr_vmap_nodes;
1920 va = node_pool_del_va(id_to_node(*vn_id), size, align, vstart, vend);
1921 *vn_id = encode_vn_id(*vn_id);
1924 *addr = va->va_start;
1930 * Allocate a region of KVA of the specified size and alignment, within the
1933 static struct vmap_area *alloc_vmap_area(unsigned long size,
1934 unsigned long align,
1935 unsigned long vstart, unsigned long vend,
1936 int node, gfp_t gfp_mask,
1937 unsigned long va_flags)
1939 struct vmap_node *vn;
1940 struct vmap_area *va;
1941 unsigned long freed;
1947 if (unlikely(!size || offset_in_page(size) || !is_power_of_2(align)))
1948 return ERR_PTR(-EINVAL);
1950 if (unlikely(!vmap_initialized))
1951 return ERR_PTR(-EBUSY);
1956 * If a VA is obtained from a global heap(if it fails here)
1957 * it is anyway marked with this "vn_id" so it is returned
1958 * to this pool's node later. Such way gives a possibility
1959 * to populate pools based on users demand.
1961 * On success a ready to go VA is returned.
1963 va = node_alloc(size, align, vstart, vend, &addr, &vn_id);
1965 gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
1967 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1969 return ERR_PTR(-ENOMEM);
1972 * Only scan the relevant parts containing pointers to other objects
1973 * to avoid false negatives.
1975 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
1980 preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
1981 addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
1982 size, align, vstart, vend);
1983 spin_unlock(&free_vmap_area_lock);
1986 trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend);
1989 * If an allocation fails, the "vend" address is
1990 * returned. Therefore trigger the overflow path.
1992 if (unlikely(addr == vend))
1995 va->va_start = addr;
1996 va->va_end = addr + size;
1998 va->flags = (va_flags | vn_id);
2000 vn = addr_to_node(va->va_start);
2002 spin_lock(&vn->busy.lock);
2003 insert_vmap_area(va, &vn->busy.root, &vn->busy.head);
2004 spin_unlock(&vn->busy.lock);
2006 BUG_ON(!IS_ALIGNED(va->va_start, align));
2007 BUG_ON(va->va_start < vstart);
2008 BUG_ON(va->va_end > vend);
2010 ret = kasan_populate_vmalloc(addr, size);
2013 return ERR_PTR(ret);
2020 reclaim_and_purge_vmap_areas();
2026 blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
2033 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
2034 pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
2037 kmem_cache_free(vmap_area_cachep, va);
2038 return ERR_PTR(-EBUSY);
2041 int register_vmap_purge_notifier(struct notifier_block *nb)
2043 return blocking_notifier_chain_register(&vmap_notify_list, nb);
2045 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
2047 int unregister_vmap_purge_notifier(struct notifier_block *nb)
2049 return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
2051 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
2054 * lazy_max_pages is the maximum amount of virtual address space we gather up
2055 * before attempting to purge with a TLB flush.
2057 * There is a tradeoff here: a larger number will cover more kernel page tables
2058 * and take slightly longer to purge, but it will linearly reduce the number of
2059 * global TLB flushes that must be performed. It would seem natural to scale
2060 * this number up linearly with the number of CPUs (because vmapping activity
2061 * could also scale linearly with the number of CPUs), however it is likely
2062 * that in practice, workloads might be constrained in other ways that mean
2063 * vmap activity will not scale linearly with CPUs. Also, I want to be
2064 * conservative and not introduce a big latency on huge systems, so go with
2065 * a less aggressive log scale. It will still be an improvement over the old
2066 * code, and it will be simple to change the scale factor if we find that it
2067 * becomes a problem on bigger systems.
2069 static unsigned long lazy_max_pages(void)
2073 log = fls(num_online_cpus());
2075 return log * (32UL * 1024 * 1024 / PAGE_SIZE);
2078 static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
2081 * Serialize vmap purging. There is no actual critical section protected
2082 * by this lock, but we want to avoid concurrent calls for performance
2083 * reasons and to make the pcpu_get_vm_areas more deterministic.
2085 static DEFINE_MUTEX(vmap_purge_lock);
2087 /* for per-CPU blocks */
2088 static void purge_fragmented_blocks_allcpus(void);
2089 static cpumask_t purge_nodes;
2092 reclaim_list_global(struct list_head *head)
2094 struct vmap_area *va, *n;
2096 if (list_empty(head))
2099 spin_lock(&free_vmap_area_lock);
2100 list_for_each_entry_safe(va, n, head, list)
2101 merge_or_add_vmap_area_augment(va,
2102 &free_vmap_area_root, &free_vmap_area_list);
2103 spin_unlock(&free_vmap_area_lock);
2107 decay_va_pool_node(struct vmap_node *vn, bool full_decay)
2109 struct vmap_area *va, *nva;
2110 struct list_head decay_list;
2111 struct rb_root decay_root;
2112 unsigned long n_decay;
2115 decay_root = RB_ROOT;
2116 INIT_LIST_HEAD(&decay_list);
2118 for (i = 0; i < MAX_VA_SIZE_PAGES; i++) {
2119 struct list_head tmp_list;
2121 if (list_empty(&vn->pool[i].head))
2124 INIT_LIST_HEAD(&tmp_list);
2126 /* Detach the pool, so no-one can access it. */
2127 spin_lock(&vn->pool_lock);
2128 list_replace_init(&vn->pool[i].head, &tmp_list);
2129 spin_unlock(&vn->pool_lock);
2132 WRITE_ONCE(vn->pool[i].len, 0);
2134 /* Decay a pool by ~25% out of left objects. */
2135 n_decay = vn->pool[i].len >> 2;
2137 list_for_each_entry_safe(va, nva, &tmp_list, list) {
2138 list_del_init(&va->list);
2139 merge_or_add_vmap_area(va, &decay_root, &decay_list);
2142 WRITE_ONCE(vn->pool[i].len, vn->pool[i].len - 1);
2150 * Attach the pool back if it has been partly decayed.
2151 * Please note, it is supposed that nobody(other contexts)
2152 * can populate the pool therefore a simple list replace
2153 * operation takes place here.
2155 if (!full_decay && !list_empty(&tmp_list)) {
2156 spin_lock(&vn->pool_lock);
2157 list_replace_init(&tmp_list, &vn->pool[i].head);
2158 spin_unlock(&vn->pool_lock);
2162 reclaim_list_global(&decay_list);
2165 static void purge_vmap_node(struct work_struct *work)
2167 struct vmap_node *vn = container_of(work,
2168 struct vmap_node, purge_work);
2169 struct vmap_area *va, *n_va;
2170 LIST_HEAD(local_list);
2174 list_for_each_entry_safe(va, n_va, &vn->purge_list, list) {
2175 unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
2176 unsigned long orig_start = va->va_start;
2177 unsigned long orig_end = va->va_end;
2178 unsigned int vn_id = decode_vn_id(va->flags);
2180 list_del_init(&va->list);
2182 if (is_vmalloc_or_module_addr((void *)orig_start))
2183 kasan_release_vmalloc(orig_start, orig_end,
2184 va->va_start, va->va_end);
2186 atomic_long_sub(nr, &vmap_lazy_nr);
2189 if (is_vn_id_valid(vn_id) && !vn->skip_populate)
2190 if (node_pool_add_va(vn, va))
2193 /* Go back to global. */
2194 list_add(&va->list, &local_list);
2197 reclaim_list_global(&local_list);
2201 * Purges all lazily-freed vmap areas.
2203 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end,
2204 bool full_pool_decay)
2206 unsigned long nr_purged_areas = 0;
2207 unsigned int nr_purge_helpers;
2208 unsigned int nr_purge_nodes;
2209 struct vmap_node *vn;
2212 lockdep_assert_held(&vmap_purge_lock);
2215 * Use cpumask to mark which node has to be processed.
2217 purge_nodes = CPU_MASK_NONE;
2219 for (i = 0; i < nr_vmap_nodes; i++) {
2220 vn = &vmap_nodes[i];
2222 INIT_LIST_HEAD(&vn->purge_list);
2223 vn->skip_populate = full_pool_decay;
2224 decay_va_pool_node(vn, full_pool_decay);
2226 if (RB_EMPTY_ROOT(&vn->lazy.root))
2229 spin_lock(&vn->lazy.lock);
2230 WRITE_ONCE(vn->lazy.root.rb_node, NULL);
2231 list_replace_init(&vn->lazy.head, &vn->purge_list);
2232 spin_unlock(&vn->lazy.lock);
2234 start = min(start, list_first_entry(&vn->purge_list,
2235 struct vmap_area, list)->va_start);
2237 end = max(end, list_last_entry(&vn->purge_list,
2238 struct vmap_area, list)->va_end);
2240 cpumask_set_cpu(i, &purge_nodes);
2243 nr_purge_nodes = cpumask_weight(&purge_nodes);
2244 if (nr_purge_nodes > 0) {
2245 flush_tlb_kernel_range(start, end);
2247 /* One extra worker is per a lazy_max_pages() full set minus one. */
2248 nr_purge_helpers = atomic_long_read(&vmap_lazy_nr) / lazy_max_pages();
2249 nr_purge_helpers = clamp(nr_purge_helpers, 1U, nr_purge_nodes) - 1;
2251 for_each_cpu(i, &purge_nodes) {
2252 vn = &vmap_nodes[i];
2254 if (nr_purge_helpers > 0) {
2255 INIT_WORK(&vn->purge_work, purge_vmap_node);
2257 if (cpumask_test_cpu(i, cpu_online_mask))
2258 schedule_work_on(i, &vn->purge_work);
2260 schedule_work(&vn->purge_work);
2264 vn->purge_work.func = NULL;
2265 purge_vmap_node(&vn->purge_work);
2266 nr_purged_areas += vn->nr_purged;
2270 for_each_cpu(i, &purge_nodes) {
2271 vn = &vmap_nodes[i];
2273 if (vn->purge_work.func) {
2274 flush_work(&vn->purge_work);
2275 nr_purged_areas += vn->nr_purged;
2280 trace_purge_vmap_area_lazy(start, end, nr_purged_areas);
2281 return nr_purged_areas > 0;
2285 * Reclaim vmap areas by purging fragmented blocks and purge_vmap_area_list.
2287 static void reclaim_and_purge_vmap_areas(void)
2290 mutex_lock(&vmap_purge_lock);
2291 purge_fragmented_blocks_allcpus();
2292 __purge_vmap_area_lazy(ULONG_MAX, 0, true);
2293 mutex_unlock(&vmap_purge_lock);
2296 static void drain_vmap_area_work(struct work_struct *work)
2298 mutex_lock(&vmap_purge_lock);
2299 __purge_vmap_area_lazy(ULONG_MAX, 0, false);
2300 mutex_unlock(&vmap_purge_lock);
2304 * Free a vmap area, caller ensuring that the area has been unmapped,
2305 * unlinked and flush_cache_vunmap had been called for the correct
2308 static void free_vmap_area_noflush(struct vmap_area *va)
2310 unsigned long nr_lazy_max = lazy_max_pages();
2311 unsigned long va_start = va->va_start;
2312 unsigned int vn_id = decode_vn_id(va->flags);
2313 struct vmap_node *vn;
2314 unsigned long nr_lazy;
2316 if (WARN_ON_ONCE(!list_empty(&va->list)))
2319 nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
2320 PAGE_SHIFT, &vmap_lazy_nr);
2323 * If it was request by a certain node we would like to
2324 * return it to that node, i.e. its pool for later reuse.
2326 vn = is_vn_id_valid(vn_id) ?
2327 id_to_node(vn_id):addr_to_node(va->va_start);
2329 spin_lock(&vn->lazy.lock);
2330 insert_vmap_area(va, &vn->lazy.root, &vn->lazy.head);
2331 spin_unlock(&vn->lazy.lock);
2333 trace_free_vmap_area_noflush(va_start, nr_lazy, nr_lazy_max);
2335 /* After this point, we may free va at any time */
2336 if (unlikely(nr_lazy > nr_lazy_max))
2337 schedule_work(&drain_vmap_work);
2341 * Free and unmap a vmap area
2343 static void free_unmap_vmap_area(struct vmap_area *va)
2345 flush_cache_vunmap(va->va_start, va->va_end);
2346 vunmap_range_noflush(va->va_start, va->va_end);
2347 if (debug_pagealloc_enabled_static())
2348 flush_tlb_kernel_range(va->va_start, va->va_end);
2350 free_vmap_area_noflush(va);
2353 struct vmap_area *find_vmap_area(unsigned long addr)
2355 struct vmap_node *vn;
2356 struct vmap_area *va;
2359 if (unlikely(!vmap_initialized))
2363 * An addr_to_node_id(addr) converts an address to a node index
2364 * where a VA is located. If VA spans several zones and passed
2365 * addr is not the same as va->va_start, what is not common, we
2366 * may need to scan extra nodes. See an example:
2369 * -|-----|-----|-----|-----|-
2372 * VA resides in node 1 whereas it spans 1, 2 an 0. If passed
2373 * addr is within 2 or 0 nodes we should do extra work.
2375 i = j = addr_to_node_id(addr);
2377 vn = &vmap_nodes[i];
2379 spin_lock(&vn->busy.lock);
2380 va = __find_vmap_area(addr, &vn->busy.root);
2381 spin_unlock(&vn->busy.lock);
2385 } while ((i = (i + 1) % nr_vmap_nodes) != j);
2390 static struct vmap_area *find_unlink_vmap_area(unsigned long addr)
2392 struct vmap_node *vn;
2393 struct vmap_area *va;
2397 * Check the comment in the find_vmap_area() about the loop.
2399 i = j = addr_to_node_id(addr);
2401 vn = &vmap_nodes[i];
2403 spin_lock(&vn->busy.lock);
2404 va = __find_vmap_area(addr, &vn->busy.root);
2406 unlink_va(va, &vn->busy.root);
2407 spin_unlock(&vn->busy.lock);
2411 } while ((i = (i + 1) % nr_vmap_nodes) != j);
2416 /*** Per cpu kva allocator ***/
2419 * vmap space is limited especially on 32 bit architectures. Ensure there is
2420 * room for at least 16 percpu vmap blocks per CPU.
2423 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
2424 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
2425 * instead (we just need a rough idea)
2427 #if BITS_PER_LONG == 32
2428 #define VMALLOC_SPACE (128UL*1024*1024)
2430 #define VMALLOC_SPACE (128UL*1024*1024*1024)
2433 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
2434 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
2435 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
2436 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
2437 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
2438 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
2439 #define VMAP_BBMAP_BITS \
2440 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
2441 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
2442 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
2444 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
2447 * Purge threshold to prevent overeager purging of fragmented blocks for
2448 * regular operations: Purge if vb->free is less than 1/4 of the capacity.
2450 #define VMAP_PURGE_THRESHOLD (VMAP_BBMAP_BITS / 4)
2452 #define VMAP_RAM 0x1 /* indicates vm_map_ram area*/
2453 #define VMAP_BLOCK 0x2 /* mark out the vmap_block sub-type*/
2454 #define VMAP_FLAGS_MASK 0x3
2456 struct vmap_block_queue {
2458 struct list_head free;
2461 * An xarray requires an extra memory dynamically to
2462 * be allocated. If it is an issue, we can use rb-tree
2465 struct xarray vmap_blocks;
2470 struct vmap_area *va;
2471 unsigned long free, dirty;
2472 DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS);
2473 unsigned long dirty_min, dirty_max; /*< dirty range */
2474 struct list_head free_list;
2475 struct rcu_head rcu_head;
2476 struct list_head purge;
2479 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
2480 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
2483 * In order to fast access to any "vmap_block" associated with a
2484 * specific address, we use a hash.
2486 * A per-cpu vmap_block_queue is used in both ways, to serialize
2487 * an access to free block chains among CPUs(alloc path) and it
2488 * also acts as a vmap_block hash(alloc/free paths). It means we
2489 * overload it, since we already have the per-cpu array which is
2490 * used as a hash table. When used as a hash a 'cpu' passed to
2491 * per_cpu() is not actually a CPU but rather a hash index.
2493 * A hash function is addr_to_vb_xa() which hashes any address
2494 * to a specific index(in a hash) it belongs to. This then uses a
2495 * per_cpu() macro to access an array with generated index.
2502 * 0 10 20 30 40 50 60
2503 * |------|------|------|------|------|------|...<vmap address space>
2504 * CPU0 CPU1 CPU2 CPU0 CPU1 CPU2
2506 * - CPU_1 invokes vm_unmap_ram(6), 6 belongs to CPU0 zone, thus
2507 * it access: CPU0/INDEX0 -> vmap_blocks -> xa_lock;
2509 * - CPU_2 invokes vm_unmap_ram(11), 11 belongs to CPU1 zone, thus
2510 * it access: CPU1/INDEX1 -> vmap_blocks -> xa_lock;
2512 * - CPU_0 invokes vm_unmap_ram(20), 20 belongs to CPU2 zone, thus
2513 * it access: CPU2/INDEX2 -> vmap_blocks -> xa_lock.
2515 * This technique almost always avoids lock contention on insert/remove,
2516 * however xarray spinlocks protect against any contention that remains.
2518 static struct xarray *
2519 addr_to_vb_xa(unsigned long addr)
2521 int index = (addr / VMAP_BLOCK_SIZE) % num_possible_cpus();
2523 return &per_cpu(vmap_block_queue, index).vmap_blocks;
2527 * We should probably have a fallback mechanism to allocate virtual memory
2528 * out of partially filled vmap blocks. However vmap block sizing should be
2529 * fairly reasonable according to the vmalloc size, so it shouldn't be a
2533 static unsigned long addr_to_vb_idx(unsigned long addr)
2535 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
2536 addr /= VMAP_BLOCK_SIZE;
2540 static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
2544 addr = va_start + (pages_off << PAGE_SHIFT);
2545 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
2546 return (void *)addr;
2550 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
2551 * block. Of course pages number can't exceed VMAP_BBMAP_BITS
2552 * @order: how many 2^order pages should be occupied in newly allocated block
2553 * @gfp_mask: flags for the page level allocator
2555 * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
2557 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
2559 struct vmap_block_queue *vbq;
2560 struct vmap_block *vb;
2561 struct vmap_area *va;
2563 unsigned long vb_idx;
2567 node = numa_node_id();
2569 vb = kmalloc_node(sizeof(struct vmap_block),
2570 gfp_mask & GFP_RECLAIM_MASK, node);
2572 return ERR_PTR(-ENOMEM);
2574 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
2575 VMALLOC_START, VMALLOC_END,
2577 VMAP_RAM|VMAP_BLOCK);
2580 return ERR_CAST(va);
2583 vaddr = vmap_block_vaddr(va->va_start, 0);
2584 spin_lock_init(&vb->lock);
2586 /* At least something should be left free */
2587 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
2588 bitmap_zero(vb->used_map, VMAP_BBMAP_BITS);
2589 vb->free = VMAP_BBMAP_BITS - (1UL << order);
2591 vb->dirty_min = VMAP_BBMAP_BITS;
2593 bitmap_set(vb->used_map, 0, (1UL << order));
2594 INIT_LIST_HEAD(&vb->free_list);
2596 xa = addr_to_vb_xa(va->va_start);
2597 vb_idx = addr_to_vb_idx(va->va_start);
2598 err = xa_insert(xa, vb_idx, vb, gfp_mask);
2602 return ERR_PTR(err);
2605 vbq = raw_cpu_ptr(&vmap_block_queue);
2606 spin_lock(&vbq->lock);
2607 list_add_tail_rcu(&vb->free_list, &vbq->free);
2608 spin_unlock(&vbq->lock);
2613 static void free_vmap_block(struct vmap_block *vb)
2615 struct vmap_node *vn;
2616 struct vmap_block *tmp;
2619 xa = addr_to_vb_xa(vb->va->va_start);
2620 tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start));
2623 vn = addr_to_node(vb->va->va_start);
2624 spin_lock(&vn->busy.lock);
2625 unlink_va(vb->va, &vn->busy.root);
2626 spin_unlock(&vn->busy.lock);
2628 free_vmap_area_noflush(vb->va);
2629 kfree_rcu(vb, rcu_head);
2632 static bool purge_fragmented_block(struct vmap_block *vb,
2633 struct vmap_block_queue *vbq, struct list_head *purge_list,
2636 if (vb->free + vb->dirty != VMAP_BBMAP_BITS ||
2637 vb->dirty == VMAP_BBMAP_BITS)
2640 /* Don't overeagerly purge usable blocks unless requested */
2641 if (!(force_purge || vb->free < VMAP_PURGE_THRESHOLD))
2644 /* prevent further allocs after releasing lock */
2645 WRITE_ONCE(vb->free, 0);
2646 /* prevent purging it again */
2647 WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS);
2649 vb->dirty_max = VMAP_BBMAP_BITS;
2650 spin_lock(&vbq->lock);
2651 list_del_rcu(&vb->free_list);
2652 spin_unlock(&vbq->lock);
2653 list_add_tail(&vb->purge, purge_list);
2657 static void free_purged_blocks(struct list_head *purge_list)
2659 struct vmap_block *vb, *n_vb;
2661 list_for_each_entry_safe(vb, n_vb, purge_list, purge) {
2662 list_del(&vb->purge);
2663 free_vmap_block(vb);
2667 static void purge_fragmented_blocks(int cpu)
2670 struct vmap_block *vb;
2671 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2674 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2675 unsigned long free = READ_ONCE(vb->free);
2676 unsigned long dirty = READ_ONCE(vb->dirty);
2678 if (free + dirty != VMAP_BBMAP_BITS ||
2679 dirty == VMAP_BBMAP_BITS)
2682 spin_lock(&vb->lock);
2683 purge_fragmented_block(vb, vbq, &purge, true);
2684 spin_unlock(&vb->lock);
2687 free_purged_blocks(&purge);
2690 static void purge_fragmented_blocks_allcpus(void)
2694 for_each_possible_cpu(cpu)
2695 purge_fragmented_blocks(cpu);
2698 static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
2700 struct vmap_block_queue *vbq;
2701 struct vmap_block *vb;
2705 BUG_ON(offset_in_page(size));
2706 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2707 if (WARN_ON(size == 0)) {
2709 * Allocating 0 bytes isn't what caller wants since
2710 * get_order(0) returns funny result. Just warn and terminate
2715 order = get_order(size);
2718 vbq = raw_cpu_ptr(&vmap_block_queue);
2719 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2720 unsigned long pages_off;
2722 if (READ_ONCE(vb->free) < (1UL << order))
2725 spin_lock(&vb->lock);
2726 if (vb->free < (1UL << order)) {
2727 spin_unlock(&vb->lock);
2731 pages_off = VMAP_BBMAP_BITS - vb->free;
2732 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
2733 WRITE_ONCE(vb->free, vb->free - (1UL << order));
2734 bitmap_set(vb->used_map, pages_off, (1UL << order));
2735 if (vb->free == 0) {
2736 spin_lock(&vbq->lock);
2737 list_del_rcu(&vb->free_list);
2738 spin_unlock(&vbq->lock);
2741 spin_unlock(&vb->lock);
2747 /* Allocate new block if nothing was found */
2749 vaddr = new_vmap_block(order, gfp_mask);
2754 static void vb_free(unsigned long addr, unsigned long size)
2756 unsigned long offset;
2758 struct vmap_block *vb;
2761 BUG_ON(offset_in_page(size));
2762 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2764 flush_cache_vunmap(addr, addr + size);
2766 order = get_order(size);
2767 offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
2769 xa = addr_to_vb_xa(addr);
2770 vb = xa_load(xa, addr_to_vb_idx(addr));
2772 spin_lock(&vb->lock);
2773 bitmap_clear(vb->used_map, offset, (1UL << order));
2774 spin_unlock(&vb->lock);
2776 vunmap_range_noflush(addr, addr + size);
2778 if (debug_pagealloc_enabled_static())
2779 flush_tlb_kernel_range(addr, addr + size);
2781 spin_lock(&vb->lock);
2783 /* Expand the not yet TLB flushed dirty range */
2784 vb->dirty_min = min(vb->dirty_min, offset);
2785 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
2787 WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order));
2788 if (vb->dirty == VMAP_BBMAP_BITS) {
2790 spin_unlock(&vb->lock);
2791 free_vmap_block(vb);
2793 spin_unlock(&vb->lock);
2796 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
2798 LIST_HEAD(purge_list);
2801 if (unlikely(!vmap_initialized))
2804 mutex_lock(&vmap_purge_lock);
2806 for_each_possible_cpu(cpu) {
2807 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2808 struct vmap_block *vb;
2812 xa_for_each(&vbq->vmap_blocks, idx, vb) {
2813 spin_lock(&vb->lock);
2816 * Try to purge a fragmented block first. If it's
2817 * not purgeable, check whether there is dirty
2818 * space to be flushed.
2820 if (!purge_fragmented_block(vb, vbq, &purge_list, false) &&
2821 vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) {
2822 unsigned long va_start = vb->va->va_start;
2825 s = va_start + (vb->dirty_min << PAGE_SHIFT);
2826 e = va_start + (vb->dirty_max << PAGE_SHIFT);
2828 start = min(s, start);
2831 /* Prevent that this is flushed again */
2832 vb->dirty_min = VMAP_BBMAP_BITS;
2837 spin_unlock(&vb->lock);
2841 free_purged_blocks(&purge_list);
2843 if (!__purge_vmap_area_lazy(start, end, false) && flush)
2844 flush_tlb_kernel_range(start, end);
2845 mutex_unlock(&vmap_purge_lock);
2849 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
2851 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
2852 * to amortize TLB flushing overheads. What this means is that any page you
2853 * have now, may, in a former life, have been mapped into kernel virtual
2854 * address by the vmap layer and so there might be some CPUs with TLB entries
2855 * still referencing that page (additional to the regular 1:1 kernel mapping).
2857 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
2858 * be sure that none of the pages we have control over will have any aliases
2859 * from the vmap layer.
2861 void vm_unmap_aliases(void)
2863 unsigned long start = ULONG_MAX, end = 0;
2866 _vm_unmap_aliases(start, end, flush);
2868 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
2871 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
2872 * @mem: the pointer returned by vm_map_ram
2873 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
2875 void vm_unmap_ram(const void *mem, unsigned int count)
2877 unsigned long size = (unsigned long)count << PAGE_SHIFT;
2878 unsigned long addr = (unsigned long)kasan_reset_tag(mem);
2879 struct vmap_area *va;
2883 BUG_ON(addr < VMALLOC_START);
2884 BUG_ON(addr > VMALLOC_END);
2885 BUG_ON(!PAGE_ALIGNED(addr));
2887 kasan_poison_vmalloc(mem, size);
2889 if (likely(count <= VMAP_MAX_ALLOC)) {
2890 debug_check_no_locks_freed(mem, size);
2891 vb_free(addr, size);
2895 va = find_unlink_vmap_area(addr);
2896 if (WARN_ON_ONCE(!va))
2899 debug_check_no_locks_freed((void *)va->va_start,
2900 (va->va_end - va->va_start));
2901 free_unmap_vmap_area(va);
2903 EXPORT_SYMBOL(vm_unmap_ram);
2906 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
2907 * @pages: an array of pointers to the pages to be mapped
2908 * @count: number of pages
2909 * @node: prefer to allocate data structures on this node
2911 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
2912 * faster than vmap so it's good. But if you mix long-life and short-life
2913 * objects with vm_map_ram(), it could consume lots of address space through
2914 * fragmentation (especially on a 32bit machine). You could see failures in
2915 * the end. Please use this function for short-lived objects.
2917 * Returns: a pointer to the address that has been mapped, or %NULL on failure
2919 void *vm_map_ram(struct page **pages, unsigned int count, int node)
2921 unsigned long size = (unsigned long)count << PAGE_SHIFT;
2925 if (likely(count <= VMAP_MAX_ALLOC)) {
2926 mem = vb_alloc(size, GFP_KERNEL);
2929 addr = (unsigned long)mem;
2931 struct vmap_area *va;
2932 va = alloc_vmap_area(size, PAGE_SIZE,
2933 VMALLOC_START, VMALLOC_END,
2934 node, GFP_KERNEL, VMAP_RAM);
2938 addr = va->va_start;
2942 if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
2943 pages, PAGE_SHIFT) < 0) {
2944 vm_unmap_ram(mem, count);
2949 * Mark the pages as accessible, now that they are mapped.
2950 * With hardware tag-based KASAN, marking is skipped for
2951 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
2953 mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL);
2957 EXPORT_SYMBOL(vm_map_ram);
2959 static struct vm_struct *vmlist __initdata;
2961 static inline unsigned int vm_area_page_order(struct vm_struct *vm)
2963 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2964 return vm->page_order;
2970 static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order)
2972 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2973 vm->page_order = order;
2980 * vm_area_add_early - add vmap area early during boot
2981 * @vm: vm_struct to add
2983 * This function is used to add fixed kernel vm area to vmlist before
2984 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
2985 * should contain proper values and the other fields should be zero.
2987 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2989 void __init vm_area_add_early(struct vm_struct *vm)
2991 struct vm_struct *tmp, **p;
2993 BUG_ON(vmap_initialized);
2994 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
2995 if (tmp->addr >= vm->addr) {
2996 BUG_ON(tmp->addr < vm->addr + vm->size);
2999 BUG_ON(tmp->addr + tmp->size > vm->addr);
3006 * vm_area_register_early - register vmap area early during boot
3007 * @vm: vm_struct to register
3008 * @align: requested alignment
3010 * This function is used to register kernel vm area before
3011 * vmalloc_init() is called. @vm->size and @vm->flags should contain
3012 * proper values on entry and other fields should be zero. On return,
3013 * vm->addr contains the allocated address.
3015 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
3017 void __init vm_area_register_early(struct vm_struct *vm, size_t align)
3019 unsigned long addr = ALIGN(VMALLOC_START, align);
3020 struct vm_struct *cur, **p;
3022 BUG_ON(vmap_initialized);
3024 for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) {
3025 if ((unsigned long)cur->addr - addr >= vm->size)
3027 addr = ALIGN((unsigned long)cur->addr + cur->size, align);
3030 BUG_ON(addr > VMALLOC_END - vm->size);
3031 vm->addr = (void *)addr;
3034 kasan_populate_early_vm_area_shadow(vm->addr, vm->size);
3037 static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
3038 struct vmap_area *va, unsigned long flags, const void *caller)
3041 vm->addr = (void *)va->va_start;
3042 vm->size = va->va_end - va->va_start;
3043 vm->caller = caller;
3047 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
3048 unsigned long flags, const void *caller)
3050 struct vmap_node *vn = addr_to_node(va->va_start);
3052 spin_lock(&vn->busy.lock);
3053 setup_vmalloc_vm_locked(vm, va, flags, caller);
3054 spin_unlock(&vn->busy.lock);
3057 static void clear_vm_uninitialized_flag(struct vm_struct *vm)
3060 * Before removing VM_UNINITIALIZED,
3061 * we should make sure that vm has proper values.
3062 * Pair with smp_rmb() in show_numa_info().
3065 vm->flags &= ~VM_UNINITIALIZED;
3068 static struct vm_struct *__get_vm_area_node(unsigned long size,
3069 unsigned long align, unsigned long shift, unsigned long flags,
3070 unsigned long start, unsigned long end, int node,
3071 gfp_t gfp_mask, const void *caller)
3073 struct vmap_area *va;
3074 struct vm_struct *area;
3075 unsigned long requested_size = size;
3077 BUG_ON(in_interrupt());
3078 size = ALIGN(size, 1ul << shift);
3079 if (unlikely(!size))
3082 if (flags & VM_IOREMAP)
3083 align = 1ul << clamp_t(int, get_count_order_long(size),
3084 PAGE_SHIFT, IOREMAP_MAX_ORDER);
3086 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
3087 if (unlikely(!area))
3090 if (!(flags & VM_NO_GUARD))
3093 va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0);
3099 setup_vmalloc_vm(area, va, flags, caller);
3102 * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a
3103 * best-effort approach, as they can be mapped outside of vmalloc code.
3104 * For VM_ALLOC mappings, the pages are marked as accessible after
3105 * getting mapped in __vmalloc_node_range().
3106 * With hardware tag-based KASAN, marking is skipped for
3107 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
3109 if (!(flags & VM_ALLOC))
3110 area->addr = kasan_unpoison_vmalloc(area->addr, requested_size,
3111 KASAN_VMALLOC_PROT_NORMAL);
3116 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
3117 unsigned long start, unsigned long end,
3120 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
3121 NUMA_NO_NODE, GFP_KERNEL, caller);
3125 * get_vm_area - reserve a contiguous kernel virtual area
3126 * @size: size of the area
3127 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
3129 * Search an area of @size in the kernel virtual mapping area,
3130 * and reserved it for out purposes. Returns the area descriptor
3131 * on success or %NULL on failure.
3133 * Return: the area descriptor on success or %NULL on failure.
3135 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
3137 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
3138 VMALLOC_START, VMALLOC_END,
3139 NUMA_NO_NODE, GFP_KERNEL,
3140 __builtin_return_address(0));
3143 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
3146 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
3147 VMALLOC_START, VMALLOC_END,
3148 NUMA_NO_NODE, GFP_KERNEL, caller);
3152 * find_vm_area - find a continuous kernel virtual area
3153 * @addr: base address
3155 * Search for the kernel VM area starting at @addr, and return it.
3156 * It is up to the caller to do all required locking to keep the returned
3159 * Return: the area descriptor on success or %NULL on failure.
3161 struct vm_struct *find_vm_area(const void *addr)
3163 struct vmap_area *va;
3165 va = find_vmap_area((unsigned long)addr);
3173 * remove_vm_area - find and remove a continuous kernel virtual area
3174 * @addr: base address
3176 * Search for the kernel VM area starting at @addr, and remove it.
3177 * This function returns the found VM area, but using it is NOT safe
3178 * on SMP machines, except for its size or flags.
3180 * Return: the area descriptor on success or %NULL on failure.
3182 struct vm_struct *remove_vm_area(const void *addr)
3184 struct vmap_area *va;
3185 struct vm_struct *vm;
3189 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
3193 va = find_unlink_vmap_area((unsigned long)addr);
3198 debug_check_no_locks_freed(vm->addr, get_vm_area_size(vm));
3199 debug_check_no_obj_freed(vm->addr, get_vm_area_size(vm));
3200 kasan_free_module_shadow(vm);
3201 kasan_poison_vmalloc(vm->addr, get_vm_area_size(vm));
3203 free_unmap_vmap_area(va);
3207 static inline void set_area_direct_map(const struct vm_struct *area,
3208 int (*set_direct_map)(struct page *page))
3212 /* HUGE_VMALLOC passes small pages to set_direct_map */
3213 for (i = 0; i < area->nr_pages; i++)
3214 if (page_address(area->pages[i]))
3215 set_direct_map(area->pages[i]);
3219 * Flush the vm mapping and reset the direct map.
3221 static void vm_reset_perms(struct vm_struct *area)
3223 unsigned long start = ULONG_MAX, end = 0;
3224 unsigned int page_order = vm_area_page_order(area);
3229 * Find the start and end range of the direct mappings to make sure that
3230 * the vm_unmap_aliases() flush includes the direct map.
3232 for (i = 0; i < area->nr_pages; i += 1U << page_order) {
3233 unsigned long addr = (unsigned long)page_address(area->pages[i]);
3236 unsigned long page_size;
3238 page_size = PAGE_SIZE << page_order;
3239 start = min(addr, start);
3240 end = max(addr + page_size, end);
3246 * Set direct map to something invalid so that it won't be cached if
3247 * there are any accesses after the TLB flush, then flush the TLB and
3248 * reset the direct map permissions to the default.
3250 set_area_direct_map(area, set_direct_map_invalid_noflush);
3251 _vm_unmap_aliases(start, end, flush_dmap);
3252 set_area_direct_map(area, set_direct_map_default_noflush);
3255 static void delayed_vfree_work(struct work_struct *w)
3257 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
3258 struct llist_node *t, *llnode;
3260 llist_for_each_safe(llnode, t, llist_del_all(&p->list))
3265 * vfree_atomic - release memory allocated by vmalloc()
3266 * @addr: memory base address
3268 * This one is just like vfree() but can be called in any atomic context
3271 void vfree_atomic(const void *addr)
3273 struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
3276 kmemleak_free(addr);
3279 * Use raw_cpu_ptr() because this can be called from preemptible
3280 * context. Preemption is absolutely fine here, because the llist_add()
3281 * implementation is lockless, so it works even if we are adding to
3282 * another cpu's list. schedule_work() should be fine with this too.
3284 if (addr && llist_add((struct llist_node *)addr, &p->list))
3285 schedule_work(&p->wq);
3289 * vfree - Release memory allocated by vmalloc()
3290 * @addr: Memory base address
3292 * Free the virtually continuous memory area starting at @addr, as obtained
3293 * from one of the vmalloc() family of APIs. This will usually also free the
3294 * physical memory underlying the virtual allocation, but that memory is
3295 * reference counted, so it will not be freed until the last user goes away.
3297 * If @addr is NULL, no operation is performed.
3300 * May sleep if called *not* from interrupt context.
3301 * Must not be called in NMI context (strictly speaking, it could be
3302 * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
3303 * conventions for vfree() arch-dependent would be a really bad idea).
3305 void vfree(const void *addr)
3307 struct vm_struct *vm;
3310 if (unlikely(in_interrupt())) {
3316 kmemleak_free(addr);
3322 vm = remove_vm_area(addr);
3323 if (unlikely(!vm)) {
3324 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
3329 if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS))
3331 for (i = 0; i < vm->nr_pages; i++) {
3332 struct page *page = vm->pages[i];
3335 mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
3337 * High-order allocs for huge vmallocs are split, so
3338 * can be freed as an array of order-0 allocations
3343 atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages);
3347 EXPORT_SYMBOL(vfree);
3350 * vunmap - release virtual mapping obtained by vmap()
3351 * @addr: memory base address
3353 * Free the virtually contiguous memory area starting at @addr,
3354 * which was created from the page array passed to vmap().
3356 * Must not be called in interrupt context.
3358 void vunmap(const void *addr)
3360 struct vm_struct *vm;
3362 BUG_ON(in_interrupt());
3367 vm = remove_vm_area(addr);
3368 if (unlikely(!vm)) {
3369 WARN(1, KERN_ERR "Trying to vunmap() nonexistent vm area (%p)\n",
3375 EXPORT_SYMBOL(vunmap);
3378 * vmap - map an array of pages into virtually contiguous space
3379 * @pages: array of page pointers
3380 * @count: number of pages to map
3381 * @flags: vm_area->flags
3382 * @prot: page protection for the mapping
3384 * Maps @count pages from @pages into contiguous kernel virtual space.
3385 * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
3386 * (which must be kmalloc or vmalloc memory) and one reference per pages in it
3387 * are transferred from the caller to vmap(), and will be freed / dropped when
3388 * vfree() is called on the return value.
3390 * Return: the address of the area or %NULL on failure
3392 void *vmap(struct page **pages, unsigned int count,
3393 unsigned long flags, pgprot_t prot)
3395 struct vm_struct *area;
3397 unsigned long size; /* In bytes */
3401 if (WARN_ON_ONCE(flags & VM_FLUSH_RESET_PERMS))
3405 * Your top guard is someone else's bottom guard. Not having a top
3406 * guard compromises someone else's mappings too.
3408 if (WARN_ON_ONCE(flags & VM_NO_GUARD))
3409 flags &= ~VM_NO_GUARD;
3411 if (count > totalram_pages())
3414 size = (unsigned long)count << PAGE_SHIFT;
3415 area = get_vm_area_caller(size, flags, __builtin_return_address(0));
3419 addr = (unsigned long)area->addr;
3420 if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
3421 pages, PAGE_SHIFT) < 0) {
3426 if (flags & VM_MAP_PUT_PAGES) {
3427 area->pages = pages;
3428 area->nr_pages = count;
3432 EXPORT_SYMBOL(vmap);
3434 #ifdef CONFIG_VMAP_PFN
3435 struct vmap_pfn_data {
3436 unsigned long *pfns;
3441 static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
3443 struct vmap_pfn_data *data = private;
3444 unsigned long pfn = data->pfns[data->idx];
3447 if (WARN_ON_ONCE(pfn_valid(pfn)))
3450 ptent = pte_mkspecial(pfn_pte(pfn, data->prot));
3451 set_pte_at(&init_mm, addr, pte, ptent);
3458 * vmap_pfn - map an array of PFNs into virtually contiguous space
3459 * @pfns: array of PFNs
3460 * @count: number of pages to map
3461 * @prot: page protection for the mapping
3463 * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
3464 * the start address of the mapping.
3466 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
3468 struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) };
3469 struct vm_struct *area;
3471 area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
3472 __builtin_return_address(0));
3475 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
3476 count * PAGE_SIZE, vmap_pfn_apply, &data)) {
3481 flush_cache_vmap((unsigned long)area->addr,
3482 (unsigned long)area->addr + count * PAGE_SIZE);
3486 EXPORT_SYMBOL_GPL(vmap_pfn);
3487 #endif /* CONFIG_VMAP_PFN */
3489 static inline unsigned int
3490 vm_area_alloc_pages(gfp_t gfp, int nid,
3491 unsigned int order, unsigned int nr_pages, struct page **pages)
3493 unsigned int nr_allocated = 0;
3494 gfp_t alloc_gfp = gfp;
3495 bool nofail = false;
3500 * For order-0 pages we make use of bulk allocator, if
3501 * the page array is partly or not at all populated due
3502 * to fails, fallback to a single page allocator that is
3506 /* bulk allocator doesn't support nofail req. officially */
3507 gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL;
3509 while (nr_allocated < nr_pages) {
3510 unsigned int nr, nr_pages_request;
3513 * A maximum allowed request is hard-coded and is 100
3514 * pages per call. That is done in order to prevent a
3515 * long preemption off scenario in the bulk-allocator
3516 * so the range is [1:100].
3518 nr_pages_request = min(100U, nr_pages - nr_allocated);
3520 /* memory allocation should consider mempolicy, we can't
3521 * wrongly use nearest node when nid == NUMA_NO_NODE,
3522 * otherwise memory may be allocated in only one node,
3523 * but mempolicy wants to alloc memory by interleaving.
3525 if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
3526 nr = alloc_pages_bulk_array_mempolicy(bulk_gfp,
3528 pages + nr_allocated);
3531 nr = alloc_pages_bulk_array_node(bulk_gfp, nid,
3533 pages + nr_allocated);
3539 * If zero or pages were obtained partly,
3540 * fallback to a single page allocator.
3542 if (nr != nr_pages_request)
3545 } else if (gfp & __GFP_NOFAIL) {
3547 * Higher order nofail allocations are really expensive and
3548 * potentially dangerous (pre-mature OOM, disruptive reclaim
3549 * and compaction etc.
3551 alloc_gfp &= ~__GFP_NOFAIL;
3555 /* High-order pages or fallback path if "bulk" fails. */
3556 while (nr_allocated < nr_pages) {
3557 if (fatal_signal_pending(current))
3560 if (nid == NUMA_NO_NODE)
3561 page = alloc_pages(alloc_gfp, order);
3563 page = alloc_pages_node(nid, alloc_gfp, order);
3564 if (unlikely(!page)) {
3568 /* fall back to the zero order allocations */
3569 alloc_gfp |= __GFP_NOFAIL;
3575 * Higher order allocations must be able to be treated as
3576 * indepdenent small pages by callers (as they can with
3577 * small-page vmallocs). Some drivers do their own refcounting
3578 * on vmalloc_to_page() pages, some use page->mapping,
3582 split_page(page, order);
3585 * Careful, we allocate and map page-order pages, but
3586 * tracking is done per PAGE_SIZE page so as to keep the
3587 * vm_struct APIs independent of the physical/mapped size.
3589 for (i = 0; i < (1U << order); i++)
3590 pages[nr_allocated + i] = page + i;
3593 nr_allocated += 1U << order;
3596 return nr_allocated;
3599 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
3600 pgprot_t prot, unsigned int page_shift,
3603 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
3604 bool nofail = gfp_mask & __GFP_NOFAIL;
3605 unsigned long addr = (unsigned long)area->addr;
3606 unsigned long size = get_vm_area_size(area);
3607 unsigned long array_size;
3608 unsigned int nr_small_pages = size >> PAGE_SHIFT;
3609 unsigned int page_order;
3613 array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
3615 if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
3616 gfp_mask |= __GFP_HIGHMEM;
3618 /* Please note that the recursion is strictly bounded. */
3619 if (array_size > PAGE_SIZE) {
3620 area->pages = __vmalloc_node(array_size, 1, nested_gfp, node,
3623 area->pages = kmalloc_node(array_size, nested_gfp, node);
3627 warn_alloc(gfp_mask, NULL,
3628 "vmalloc error: size %lu, failed to allocated page array size %lu",
3629 nr_small_pages * PAGE_SIZE, array_size);
3634 set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
3635 page_order = vm_area_page_order(area);
3637 area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN,
3638 node, page_order, nr_small_pages, area->pages);
3640 atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
3641 if (gfp_mask & __GFP_ACCOUNT) {
3644 for (i = 0; i < area->nr_pages; i++)
3645 mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1);
3649 * If not enough pages were obtained to accomplish an
3650 * allocation request, free them via vfree() if any.
3652 if (area->nr_pages != nr_small_pages) {
3654 * vm_area_alloc_pages() can fail due to insufficient memory but
3657 * - a pending fatal signal
3658 * - insufficient huge page-order pages
3660 * Since we always retry allocations at order-0 in the huge page
3661 * case a warning for either is spurious.
3663 if (!fatal_signal_pending(current) && page_order == 0)
3664 warn_alloc(gfp_mask, NULL,
3665 "vmalloc error: size %lu, failed to allocate pages",
3666 area->nr_pages * PAGE_SIZE);
3671 * page tables allocations ignore external gfp mask, enforce it
3674 if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
3675 flags = memalloc_nofs_save();
3676 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
3677 flags = memalloc_noio_save();
3680 ret = vmap_pages_range(addr, addr + size, prot, area->pages,
3682 if (nofail && (ret < 0))
3683 schedule_timeout_uninterruptible(1);
3684 } while (nofail && (ret < 0));
3686 if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
3687 memalloc_nofs_restore(flags);
3688 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
3689 memalloc_noio_restore(flags);
3692 warn_alloc(gfp_mask, NULL,
3693 "vmalloc error: size %lu, failed to map pages",
3694 area->nr_pages * PAGE_SIZE);
3706 * __vmalloc_node_range - allocate virtually contiguous memory
3707 * @size: allocation size
3708 * @align: desired alignment
3709 * @start: vm area range start
3710 * @end: vm area range end
3711 * @gfp_mask: flags for the page level allocator
3712 * @prot: protection mask for the allocated pages
3713 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
3714 * @node: node to use for allocation or NUMA_NO_NODE
3715 * @caller: caller's return address
3717 * Allocate enough pages to cover @size from the page level
3718 * allocator with @gfp_mask flags. Please note that the full set of gfp
3719 * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all
3721 * Zone modifiers are not supported. From the reclaim modifiers
3722 * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported)
3723 * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and
3724 * __GFP_RETRY_MAYFAIL are not supported).
3726 * __GFP_NOWARN can be used to suppress failures messages.
3728 * Map them into contiguous kernel virtual space, using a pagetable
3729 * protection of @prot.
3731 * Return: the address of the area or %NULL on failure
3733 void *__vmalloc_node_range(unsigned long size, unsigned long align,
3734 unsigned long start, unsigned long end, gfp_t gfp_mask,
3735 pgprot_t prot, unsigned long vm_flags, int node,
3738 struct vm_struct *area;
3740 kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE;
3741 unsigned long real_size = size;
3742 unsigned long real_align = align;
3743 unsigned int shift = PAGE_SHIFT;
3745 if (WARN_ON_ONCE(!size))
3748 if ((size >> PAGE_SHIFT) > totalram_pages()) {
3749 warn_alloc(gfp_mask, NULL,
3750 "vmalloc error: size %lu, exceeds total pages",
3755 if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) {
3756 unsigned long size_per_node;
3759 * Try huge pages. Only try for PAGE_KERNEL allocations,
3760 * others like modules don't yet expect huge pages in
3761 * their allocations due to apply_to_page_range not
3765 size_per_node = size;
3766 if (node == NUMA_NO_NODE)
3767 size_per_node /= num_online_nodes();
3768 if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE)
3771 shift = arch_vmap_pte_supported_shift(size_per_node);
3773 align = max(real_align, 1UL << shift);
3774 size = ALIGN(real_size, 1UL << shift);
3778 area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
3779 VM_UNINITIALIZED | vm_flags, start, end, node,
3782 bool nofail = gfp_mask & __GFP_NOFAIL;
3783 warn_alloc(gfp_mask, NULL,
3784 "vmalloc error: size %lu, vm_struct allocation failed%s",
3785 real_size, (nofail) ? ". Retrying." : "");
3787 schedule_timeout_uninterruptible(1);
3794 * Prepare arguments for __vmalloc_area_node() and
3795 * kasan_unpoison_vmalloc().
3797 if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) {
3798 if (kasan_hw_tags_enabled()) {
3800 * Modify protection bits to allow tagging.
3801 * This must be done before mapping.
3803 prot = arch_vmap_pgprot_tagged(prot);
3806 * Skip page_alloc poisoning and zeroing for physical
3807 * pages backing VM_ALLOC mapping. Memory is instead
3808 * poisoned and zeroed by kasan_unpoison_vmalloc().
3810 gfp_mask |= __GFP_SKIP_KASAN | __GFP_SKIP_ZERO;
3813 /* Take note that the mapping is PAGE_KERNEL. */
3814 kasan_flags |= KASAN_VMALLOC_PROT_NORMAL;
3817 /* Allocate physical pages and map them into vmalloc space. */
3818 ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
3823 * Mark the pages as accessible, now that they are mapped.
3824 * The condition for setting KASAN_VMALLOC_INIT should complement the
3825 * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check
3826 * to make sure that memory is initialized under the same conditions.
3827 * Tag-based KASAN modes only assign tags to normal non-executable
3828 * allocations, see __kasan_unpoison_vmalloc().
3830 kasan_flags |= KASAN_VMALLOC_VM_ALLOC;
3831 if (!want_init_on_free() && want_init_on_alloc(gfp_mask) &&
3832 (gfp_mask & __GFP_SKIP_ZERO))
3833 kasan_flags |= KASAN_VMALLOC_INIT;
3834 /* KASAN_VMALLOC_PROT_NORMAL already set if required. */
3835 area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags);
3838 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
3839 * flag. It means that vm_struct is not fully initialized.
3840 * Now, it is fully initialized, so remove this flag here.
3842 clear_vm_uninitialized_flag(area);
3844 size = PAGE_ALIGN(size);
3845 if (!(vm_flags & VM_DEFER_KMEMLEAK))
3846 kmemleak_vmalloc(area, size, gfp_mask);
3851 if (shift > PAGE_SHIFT) {
3862 * __vmalloc_node - allocate virtually contiguous memory
3863 * @size: allocation size
3864 * @align: desired alignment
3865 * @gfp_mask: flags for the page level allocator
3866 * @node: node to use for allocation or NUMA_NO_NODE
3867 * @caller: caller's return address
3869 * Allocate enough pages to cover @size from the page level allocator with
3870 * @gfp_mask flags. Map them into contiguous kernel virtual space.
3872 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
3873 * and __GFP_NOFAIL are not supported
3875 * Any use of gfp flags outside of GFP_KERNEL should be consulted
3878 * Return: pointer to the allocated memory or %NULL on error
3880 void *__vmalloc_node(unsigned long size, unsigned long align,
3881 gfp_t gfp_mask, int node, const void *caller)
3883 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
3884 gfp_mask, PAGE_KERNEL, 0, node, caller);
3887 * This is only for performance analysis of vmalloc and stress purpose.
3888 * It is required by vmalloc test module, therefore do not use it other
3891 #ifdef CONFIG_TEST_VMALLOC_MODULE
3892 EXPORT_SYMBOL_GPL(__vmalloc_node);
3895 void *__vmalloc(unsigned long size, gfp_t gfp_mask)
3897 return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
3898 __builtin_return_address(0));
3900 EXPORT_SYMBOL(__vmalloc);
3903 * vmalloc - allocate virtually contiguous memory
3904 * @size: allocation size
3906 * Allocate enough pages to cover @size from the page level
3907 * allocator and map them into contiguous kernel virtual space.
3909 * For tight control over page level allocator and protection flags
3910 * use __vmalloc() instead.
3912 * Return: pointer to the allocated memory or %NULL on error
3914 void *vmalloc(unsigned long size)
3916 return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
3917 __builtin_return_address(0));
3919 EXPORT_SYMBOL(vmalloc);
3922 * vmalloc_huge - allocate virtually contiguous memory, allow huge pages
3923 * @size: allocation size
3924 * @gfp_mask: flags for the page level allocator
3926 * Allocate enough pages to cover @size from the page level
3927 * allocator and map them into contiguous kernel virtual space.
3928 * If @size is greater than or equal to PMD_SIZE, allow using
3929 * huge pages for the memory
3931 * Return: pointer to the allocated memory or %NULL on error
3933 void *vmalloc_huge(unsigned long size, gfp_t gfp_mask)
3935 return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
3936 gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
3937 NUMA_NO_NODE, __builtin_return_address(0));
3939 EXPORT_SYMBOL_GPL(vmalloc_huge);
3942 * vzalloc - allocate virtually contiguous memory with zero fill
3943 * @size: allocation size
3945 * Allocate enough pages to cover @size from the page level
3946 * allocator and map them into contiguous kernel virtual space.
3947 * The memory allocated is set to zero.
3949 * For tight control over page level allocator and protection flags
3950 * use __vmalloc() instead.
3952 * Return: pointer to the allocated memory or %NULL on error
3954 void *vzalloc(unsigned long size)
3956 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
3957 __builtin_return_address(0));
3959 EXPORT_SYMBOL(vzalloc);
3962 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
3963 * @size: allocation size
3965 * The resulting memory area is zeroed so it can be mapped to userspace
3966 * without leaking data.
3968 * Return: pointer to the allocated memory or %NULL on error
3970 void *vmalloc_user(unsigned long size)
3972 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
3973 GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
3974 VM_USERMAP, NUMA_NO_NODE,
3975 __builtin_return_address(0));
3977 EXPORT_SYMBOL(vmalloc_user);
3980 * vmalloc_node - allocate memory on a specific node
3981 * @size: allocation size
3984 * Allocate enough pages to cover @size from the page level
3985 * allocator and map them into contiguous kernel virtual space.
3987 * For tight control over page level allocator and protection flags
3988 * use __vmalloc() instead.
3990 * Return: pointer to the allocated memory or %NULL on error
3992 void *vmalloc_node(unsigned long size, int node)
3994 return __vmalloc_node(size, 1, GFP_KERNEL, node,
3995 __builtin_return_address(0));
3997 EXPORT_SYMBOL(vmalloc_node);
4000 * vzalloc_node - allocate memory on a specific node with zero fill
4001 * @size: allocation size
4004 * Allocate enough pages to cover @size from the page level
4005 * allocator and map them into contiguous kernel virtual space.
4006 * The memory allocated is set to zero.
4008 * Return: pointer to the allocated memory or %NULL on error
4010 void *vzalloc_node(unsigned long size, int node)
4012 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node,
4013 __builtin_return_address(0));
4015 EXPORT_SYMBOL(vzalloc_node);
4017 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
4018 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
4019 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
4020 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
4023 * 64b systems should always have either DMA or DMA32 zones. For others
4024 * GFP_DMA32 should do the right thing and use the normal zone.
4026 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
4030 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
4031 * @size: allocation size
4033 * Allocate enough 32bit PA addressable pages to cover @size from the
4034 * page level allocator and map them into contiguous kernel virtual space.
4036 * Return: pointer to the allocated memory or %NULL on error
4038 void *vmalloc_32(unsigned long size)
4040 return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
4041 __builtin_return_address(0));
4043 EXPORT_SYMBOL(vmalloc_32);
4046 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
4047 * @size: allocation size
4049 * The resulting memory area is 32bit addressable and zeroed so it can be
4050 * mapped to userspace without leaking data.
4052 * Return: pointer to the allocated memory or %NULL on error
4054 void *vmalloc_32_user(unsigned long size)
4056 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
4057 GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
4058 VM_USERMAP, NUMA_NO_NODE,
4059 __builtin_return_address(0));
4061 EXPORT_SYMBOL(vmalloc_32_user);
4064 * Atomically zero bytes in the iterator.
4066 * Returns the number of zeroed bytes.
4068 static size_t zero_iter(struct iov_iter *iter, size_t count)
4070 size_t remains = count;
4072 while (remains > 0) {
4075 num = min_t(size_t, remains, PAGE_SIZE);
4076 copied = copy_page_to_iter_nofault(ZERO_PAGE(0), 0, num, iter);
4083 return count - remains;
4087 * small helper routine, copy contents to iter from addr.
4088 * If the page is not present, fill zero.
4090 * Returns the number of copied bytes.
4092 static size_t aligned_vread_iter(struct iov_iter *iter,
4093 const char *addr, size_t count)
4095 size_t remains = count;
4098 while (remains > 0) {
4099 unsigned long offset, length;
4102 offset = offset_in_page(addr);
4103 length = PAGE_SIZE - offset;
4104 if (length > remains)
4106 page = vmalloc_to_page(addr);
4108 * To do safe access to this _mapped_ area, we need lock. But
4109 * adding lock here means that we need to add overhead of
4110 * vmalloc()/vfree() calls for this _debug_ interface, rarely
4111 * used. Instead of that, we'll use an local mapping via
4112 * copy_page_to_iter_nofault() and accept a small overhead in
4113 * this access function.
4116 copied = copy_page_to_iter_nofault(page, offset,
4119 copied = zero_iter(iter, length);
4124 if (copied != length)
4128 return count - remains;
4132 * Read from a vm_map_ram region of memory.
4134 * Returns the number of copied bytes.
4136 static size_t vmap_ram_vread_iter(struct iov_iter *iter, const char *addr,
4137 size_t count, unsigned long flags)
4140 struct vmap_block *vb;
4142 unsigned long offset;
4143 unsigned int rs, re;
4147 * If it's area created by vm_map_ram() interface directly, but
4148 * not further subdividing and delegating management to vmap_block,
4151 if (!(flags & VMAP_BLOCK))
4152 return aligned_vread_iter(iter, addr, count);
4157 * Area is split into regions and tracked with vmap_block, read out
4158 * each region and zero fill the hole between regions.
4160 xa = addr_to_vb_xa((unsigned long) addr);
4161 vb = xa_load(xa, addr_to_vb_idx((unsigned long)addr));
4165 spin_lock(&vb->lock);
4166 if (bitmap_empty(vb->used_map, VMAP_BBMAP_BITS)) {
4167 spin_unlock(&vb->lock);
4171 for_each_set_bitrange(rs, re, vb->used_map, VMAP_BBMAP_BITS) {
4177 start = vmap_block_vaddr(vb->va->va_start, rs);
4180 size_t to_zero = min_t(size_t, start - addr, remains);
4181 size_t zeroed = zero_iter(iter, to_zero);
4186 if (remains == 0 || zeroed != to_zero)
4190 /*it could start reading from the middle of used region*/
4191 offset = offset_in_page(addr);
4192 n = ((re - rs + 1) << PAGE_SHIFT) - offset;
4196 copied = aligned_vread_iter(iter, start + offset, n);
4205 spin_unlock(&vb->lock);
4208 /* zero-fill the left dirty or free regions */
4209 return count - remains + zero_iter(iter, remains);
4211 /* We couldn't copy/zero everything */
4212 spin_unlock(&vb->lock);
4213 return count - remains;
4217 * vread_iter() - read vmalloc area in a safe way to an iterator.
4218 * @iter: the iterator to which data should be written.
4219 * @addr: vm address.
4220 * @count: number of bytes to be read.
4222 * This function checks that addr is a valid vmalloc'ed area, and
4223 * copy data from that area to a given buffer. If the given memory range
4224 * of [addr...addr+count) includes some valid address, data is copied to
4225 * proper area of @buf. If there are memory holes, they'll be zero-filled.
4226 * IOREMAP area is treated as memory hole and no copy is done.
4228 * If [addr...addr+count) doesn't includes any intersects with alive
4229 * vm_struct area, returns 0. @buf should be kernel's buffer.
4231 * Note: In usual ops, vread() is never necessary because the caller
4232 * should know vmalloc() area is valid and can use memcpy().
4233 * This is for routines which have to access vmalloc area without
4234 * any information, as /proc/kcore.
4236 * Return: number of bytes for which addr and buf should be increased
4237 * (same number as @count) or %0 if [addr...addr+count) doesn't
4238 * include any intersection with valid vmalloc area
4240 long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
4242 struct vmap_node *vn;
4243 struct vmap_area *va;
4244 struct vm_struct *vm;
4246 size_t n, size, flags, remains;
4249 addr = kasan_reset_tag(addr);
4251 /* Don't allow overflow */
4252 if ((unsigned long) addr + count < count)
4253 count = -(unsigned long) addr;
4257 vn = find_vmap_area_exceed_addr_lock((unsigned long) addr, &va);
4261 /* no intersects with alive vmap_area */
4262 if ((unsigned long)addr + remains <= va->va_start)
4272 flags = va->flags & VMAP_FLAGS_MASK;
4274 * VMAP_BLOCK indicates a sub-type of vm_map_ram area, need
4275 * be set together with VMAP_RAM.
4277 WARN_ON(flags == VMAP_BLOCK);
4282 if (vm && (vm->flags & VM_UNINITIALIZED))
4285 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
4288 vaddr = (char *) va->va_start;
4289 size = vm ? get_vm_area_size(vm) : va_size(va);
4291 if (addr >= vaddr + size)
4295 size_t to_zero = min_t(size_t, vaddr - addr, remains);
4296 size_t zeroed = zero_iter(iter, to_zero);
4301 if (remains == 0 || zeroed != to_zero)
4305 n = vaddr + size - addr;
4309 if (flags & VMAP_RAM)
4310 copied = vmap_ram_vread_iter(iter, addr, n, flags);
4311 else if (!(vm && (vm->flags & (VM_IOREMAP | VM_SPARSE))))
4312 copied = aligned_vread_iter(iter, addr, n);
4313 else /* IOREMAP | SPARSE area is treated as memory hole */
4314 copied = zero_iter(iter, n);
4324 spin_unlock(&vn->busy.lock);
4325 } while ((vn = find_vmap_area_exceed_addr_lock(next, &va)));
4329 spin_unlock(&vn->busy.lock);
4331 /* zero-fill memory holes */
4332 return count - remains + zero_iter(iter, remains);
4334 /* Nothing remains, or We couldn't copy/zero everything. */
4336 spin_unlock(&vn->busy.lock);
4338 return count - remains;
4342 * remap_vmalloc_range_partial - map vmalloc pages to userspace
4343 * @vma: vma to cover
4344 * @uaddr: target user address to start at
4345 * @kaddr: virtual address of vmalloc kernel memory
4346 * @pgoff: offset from @kaddr to start at
4347 * @size: size of map area
4349 * Returns: 0 for success, -Exxx on failure
4351 * This function checks that @kaddr is a valid vmalloc'ed area,
4352 * and that it is big enough to cover the range starting at
4353 * @uaddr in @vma. Will return failure if that criteria isn't
4356 * Similar to remap_pfn_range() (see mm/memory.c)
4358 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
4359 void *kaddr, unsigned long pgoff,
4362 struct vm_struct *area;
4364 unsigned long end_index;
4366 if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
4369 size = PAGE_ALIGN(size);
4371 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
4374 area = find_vm_area(kaddr);
4378 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
4381 if (check_add_overflow(size, off, &end_index) ||
4382 end_index > get_vm_area_size(area))
4387 struct page *page = vmalloc_to_page(kaddr);
4390 ret = vm_insert_page(vma, uaddr, page);
4399 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
4405 * remap_vmalloc_range - map vmalloc pages to userspace
4406 * @vma: vma to cover (map full range of vma)
4407 * @addr: vmalloc memory
4408 * @pgoff: number of pages into addr before first page to map
4410 * Returns: 0 for success, -Exxx on failure
4412 * This function checks that addr is a valid vmalloc'ed area, and
4413 * that it is big enough to cover the vma. Will return failure if
4414 * that criteria isn't met.
4416 * Similar to remap_pfn_range() (see mm/memory.c)
4418 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
4419 unsigned long pgoff)
4421 return remap_vmalloc_range_partial(vma, vma->vm_start,
4423 vma->vm_end - vma->vm_start);
4425 EXPORT_SYMBOL(remap_vmalloc_range);
4427 void free_vm_area(struct vm_struct *area)
4429 struct vm_struct *ret;
4430 ret = remove_vm_area(area->addr);
4431 BUG_ON(ret != area);
4434 EXPORT_SYMBOL_GPL(free_vm_area);
4437 static struct vmap_area *node_to_va(struct rb_node *n)
4439 return rb_entry_safe(n, struct vmap_area, rb_node);
4443 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
4444 * @addr: target address
4446 * Returns: vmap_area if it is found. If there is no such area
4447 * the first highest(reverse order) vmap_area is returned
4448 * i.e. va->va_start < addr && va->va_end < addr or NULL
4449 * if there are no any areas before @addr.
4451 static struct vmap_area *
4452 pvm_find_va_enclose_addr(unsigned long addr)
4454 struct vmap_area *va, *tmp;
4457 n = free_vmap_area_root.rb_node;
4461 tmp = rb_entry(n, struct vmap_area, rb_node);
4462 if (tmp->va_start <= addr) {
4464 if (tmp->va_end >= addr)
4477 * pvm_determine_end_from_reverse - find the highest aligned address
4478 * of free block below VMALLOC_END
4480 * in - the VA we start the search(reverse order);
4481 * out - the VA with the highest aligned end address.
4482 * @align: alignment for required highest address
4484 * Returns: determined end address within vmap_area
4486 static unsigned long
4487 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
4489 unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
4493 list_for_each_entry_from_reverse((*va),
4494 &free_vmap_area_list, list) {
4495 addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
4496 if ((*va)->va_start < addr)
4505 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
4506 * @offsets: array containing offset of each area
4507 * @sizes: array containing size of each area
4508 * @nr_vms: the number of areas to allocate
4509 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
4511 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
4512 * vm_structs on success, %NULL on failure
4514 * Percpu allocator wants to use congruent vm areas so that it can
4515 * maintain the offsets among percpu areas. This function allocates
4516 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
4517 * be scattered pretty far, distance between two areas easily going up
4518 * to gigabytes. To avoid interacting with regular vmallocs, these
4519 * areas are allocated from top.
4521 * Despite its complicated look, this allocator is rather simple. It
4522 * does everything top-down and scans free blocks from the end looking
4523 * for matching base. While scanning, if any of the areas do not fit the
4524 * base address is pulled down to fit the area. Scanning is repeated till
4525 * all the areas fit and then all necessary data structures are inserted
4526 * and the result is returned.
4528 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
4529 const size_t *sizes, int nr_vms,
4532 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
4533 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
4534 struct vmap_area **vas, *va;
4535 struct vm_struct **vms;
4536 int area, area2, last_area, term_area;
4537 unsigned long base, start, size, end, last_end, orig_start, orig_end;
4538 bool purged = false;
4540 /* verify parameters and allocate data structures */
4541 BUG_ON(offset_in_page(align) || !is_power_of_2(align));
4542 for (last_area = 0, area = 0; area < nr_vms; area++) {
4543 start = offsets[area];
4544 end = start + sizes[area];
4546 /* is everything aligned properly? */
4547 BUG_ON(!IS_ALIGNED(offsets[area], align));
4548 BUG_ON(!IS_ALIGNED(sizes[area], align));
4550 /* detect the area with the highest address */
4551 if (start > offsets[last_area])
4554 for (area2 = area + 1; area2 < nr_vms; area2++) {
4555 unsigned long start2 = offsets[area2];
4556 unsigned long end2 = start2 + sizes[area2];
4558 BUG_ON(start2 < end && start < end2);
4561 last_end = offsets[last_area] + sizes[last_area];
4563 if (vmalloc_end - vmalloc_start < last_end) {
4568 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
4569 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
4573 for (area = 0; area < nr_vms; area++) {
4574 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
4575 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
4576 if (!vas[area] || !vms[area])
4580 spin_lock(&free_vmap_area_lock);
4582 /* start scanning - we scan from the top, begin with the last area */
4583 area = term_area = last_area;
4584 start = offsets[area];
4585 end = start + sizes[area];
4587 va = pvm_find_va_enclose_addr(vmalloc_end);
4588 base = pvm_determine_end_from_reverse(&va, align) - end;
4592 * base might have underflowed, add last_end before
4595 if (base + last_end < vmalloc_start + last_end)
4599 * Fitting base has not been found.
4605 * If required width exceeds current VA block, move
4606 * base downwards and then recheck.
4608 if (base + end > va->va_end) {
4609 base = pvm_determine_end_from_reverse(&va, align) - end;
4615 * If this VA does not fit, move base downwards and recheck.
4617 if (base + start < va->va_start) {
4618 va = node_to_va(rb_prev(&va->rb_node));
4619 base = pvm_determine_end_from_reverse(&va, align) - end;
4625 * This area fits, move on to the previous one. If
4626 * the previous one is the terminal one, we're done.
4628 area = (area + nr_vms - 1) % nr_vms;
4629 if (area == term_area)
4632 start = offsets[area];
4633 end = start + sizes[area];
4634 va = pvm_find_va_enclose_addr(base + end);
4637 /* we've found a fitting base, insert all va's */
4638 for (area = 0; area < nr_vms; area++) {
4641 start = base + offsets[area];
4644 va = pvm_find_va_enclose_addr(start);
4645 if (WARN_ON_ONCE(va == NULL))
4646 /* It is a BUG(), but trigger recovery instead. */
4649 ret = va_clip(&free_vmap_area_root,
4650 &free_vmap_area_list, va, start, size);
4651 if (WARN_ON_ONCE(unlikely(ret)))
4652 /* It is a BUG(), but trigger recovery instead. */
4655 /* Allocated area. */
4657 va->va_start = start;
4658 va->va_end = start + size;
4661 spin_unlock(&free_vmap_area_lock);
4663 /* populate the kasan shadow space */
4664 for (area = 0; area < nr_vms; area++) {
4665 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
4666 goto err_free_shadow;
4669 /* insert all vm's */
4670 for (area = 0; area < nr_vms; area++) {
4671 struct vmap_node *vn = addr_to_node(vas[area]->va_start);
4673 spin_lock(&vn->busy.lock);
4674 insert_vmap_area(vas[area], &vn->busy.root, &vn->busy.head);
4675 setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
4677 spin_unlock(&vn->busy.lock);
4681 * Mark allocated areas as accessible. Do it now as a best-effort
4682 * approach, as they can be mapped outside of vmalloc code.
4683 * With hardware tag-based KASAN, marking is skipped for
4684 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
4686 for (area = 0; area < nr_vms; area++)
4687 vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
4688 vms[area]->size, KASAN_VMALLOC_PROT_NORMAL);
4695 * Remove previously allocated areas. There is no
4696 * need in removing these areas from the busy tree,
4697 * because they are inserted only on the final step
4698 * and when pcpu_get_vm_areas() is success.
4701 orig_start = vas[area]->va_start;
4702 orig_end = vas[area]->va_end;
4703 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
4704 &free_vmap_area_list);
4706 kasan_release_vmalloc(orig_start, orig_end,
4707 va->va_start, va->va_end);
4712 spin_unlock(&free_vmap_area_lock);
4714 reclaim_and_purge_vmap_areas();
4717 /* Before "retry", check if we recover. */
4718 for (area = 0; area < nr_vms; area++) {
4722 vas[area] = kmem_cache_zalloc(
4723 vmap_area_cachep, GFP_KERNEL);
4732 for (area = 0; area < nr_vms; area++) {
4734 kmem_cache_free(vmap_area_cachep, vas[area]);
4744 spin_lock(&free_vmap_area_lock);
4746 * We release all the vmalloc shadows, even the ones for regions that
4747 * hadn't been successfully added. This relies on kasan_release_vmalloc
4748 * being able to tolerate this case.
4750 for (area = 0; area < nr_vms; area++) {
4751 orig_start = vas[area]->va_start;
4752 orig_end = vas[area]->va_end;
4753 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
4754 &free_vmap_area_list);
4756 kasan_release_vmalloc(orig_start, orig_end,
4757 va->va_start, va->va_end);
4761 spin_unlock(&free_vmap_area_lock);
4768 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
4769 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
4770 * @nr_vms: the number of allocated areas
4772 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
4774 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
4778 for (i = 0; i < nr_vms; i++)
4779 free_vm_area(vms[i]);
4782 #endif /* CONFIG_SMP */
4784 #ifdef CONFIG_PRINTK
4785 bool vmalloc_dump_obj(void *object)
4788 struct vm_struct *vm;
4789 struct vmap_area *va;
4790 struct vmap_node *vn;
4792 unsigned int nr_pages;
4794 addr = PAGE_ALIGN((unsigned long) object);
4795 vn = addr_to_node(addr);
4797 if (!spin_trylock(&vn->busy.lock))
4800 va = __find_vmap_area(addr, &vn->busy.root);
4801 if (!va || !va->vm) {
4802 spin_unlock(&vn->busy.lock);
4807 addr = (unsigned long) vm->addr;
4808 caller = vm->caller;
4809 nr_pages = vm->nr_pages;
4810 spin_unlock(&vn->busy.lock);
4812 pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
4813 nr_pages, addr, caller);
4819 #ifdef CONFIG_PROC_FS
4820 static void show_numa_info(struct seq_file *m, struct vm_struct *v)
4822 if (IS_ENABLED(CONFIG_NUMA)) {
4823 unsigned int nr, *counters = m->private;
4824 unsigned int step = 1U << vm_area_page_order(v);
4829 if (v->flags & VM_UNINITIALIZED)
4831 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
4834 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
4836 for (nr = 0; nr < v->nr_pages; nr += step)
4837 counters[page_to_nid(v->pages[nr])] += step;
4838 for_each_node_state(nr, N_HIGH_MEMORY)
4840 seq_printf(m, " N%u=%u", nr, counters[nr]);
4844 static void show_purge_info(struct seq_file *m)
4846 struct vmap_node *vn;
4847 struct vmap_area *va;
4850 for (i = 0; i < nr_vmap_nodes; i++) {
4851 vn = &vmap_nodes[i];
4853 spin_lock(&vn->lazy.lock);
4854 list_for_each_entry(va, &vn->lazy.head, list) {
4855 seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
4856 (void *)va->va_start, (void *)va->va_end,
4857 va->va_end - va->va_start);
4859 spin_unlock(&vn->lazy.lock);
4863 static int vmalloc_info_show(struct seq_file *m, void *p)
4865 struct vmap_node *vn;
4866 struct vmap_area *va;
4867 struct vm_struct *v;
4870 for (i = 0; i < nr_vmap_nodes; i++) {
4871 vn = &vmap_nodes[i];
4873 spin_lock(&vn->busy.lock);
4874 list_for_each_entry(va, &vn->busy.head, list) {
4876 if (va->flags & VMAP_RAM)
4877 seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
4878 (void *)va->va_start, (void *)va->va_end,
4879 va->va_end - va->va_start);
4886 seq_printf(m, "0x%pK-0x%pK %7ld",
4887 v->addr, v->addr + v->size, v->size);
4890 seq_printf(m, " %pS", v->caller);
4893 seq_printf(m, " pages=%d", v->nr_pages);
4896 seq_printf(m, " phys=%pa", &v->phys_addr);
4898 if (v->flags & VM_IOREMAP)
4899 seq_puts(m, " ioremap");
4901 if (v->flags & VM_SPARSE)
4902 seq_puts(m, " sparse");
4904 if (v->flags & VM_ALLOC)
4905 seq_puts(m, " vmalloc");
4907 if (v->flags & VM_MAP)
4908 seq_puts(m, " vmap");
4910 if (v->flags & VM_USERMAP)
4911 seq_puts(m, " user");
4913 if (v->flags & VM_DMA_COHERENT)
4914 seq_puts(m, " dma-coherent");
4916 if (is_vmalloc_addr(v->pages))
4917 seq_puts(m, " vpages");
4919 show_numa_info(m, v);
4922 spin_unlock(&vn->busy.lock);
4926 * As a final step, dump "unpurged" areas.
4932 static int __init proc_vmalloc_init(void)
4934 void *priv_data = NULL;
4936 if (IS_ENABLED(CONFIG_NUMA))
4937 priv_data = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
4939 proc_create_single_data("vmallocinfo",
4940 0400, NULL, vmalloc_info_show, priv_data);
4944 module_init(proc_vmalloc_init);
4948 static void __init vmap_init_free_space(void)
4950 unsigned long vmap_start = 1;
4951 const unsigned long vmap_end = ULONG_MAX;
4952 struct vmap_area *free;
4953 struct vm_struct *busy;
4957 * -|-----|.....|-----|-----|-----|.....|-
4959 * |<--------------------------------->|
4961 for (busy = vmlist; busy; busy = busy->next) {
4962 if ((unsigned long) busy->addr - vmap_start > 0) {
4963 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
4964 if (!WARN_ON_ONCE(!free)) {
4965 free->va_start = vmap_start;
4966 free->va_end = (unsigned long) busy->addr;
4968 insert_vmap_area_augment(free, NULL,
4969 &free_vmap_area_root,
4970 &free_vmap_area_list);
4974 vmap_start = (unsigned long) busy->addr + busy->size;
4977 if (vmap_end - vmap_start > 0) {
4978 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
4979 if (!WARN_ON_ONCE(!free)) {
4980 free->va_start = vmap_start;
4981 free->va_end = vmap_end;
4983 insert_vmap_area_augment(free, NULL,
4984 &free_vmap_area_root,
4985 &free_vmap_area_list);
4990 static void vmap_init_nodes(void)
4992 struct vmap_node *vn;
4995 #if BITS_PER_LONG == 64
4997 * A high threshold of max nodes is fixed and bound to 128,
4998 * thus a scale factor is 1 for systems where number of cores
4999 * are less or equal to specified threshold.
5001 * As for NUMA-aware notes. For bigger systems, for example
5002 * NUMA with multi-sockets, where we can end-up with thousands
5003 * of cores in total, a "sub-numa-clustering" should be added.
5005 * In this case a NUMA domain is considered as a single entity
5006 * with dedicated sub-nodes in it which describe one group or
5007 * set of cores. Therefore a per-domain purging is supposed to
5008 * be added as well as a per-domain balancing.
5010 n = clamp_t(unsigned int, num_possible_cpus(), 1, 128);
5013 vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT | __GFP_NOWARN);
5015 /* Node partition is 16 pages. */
5016 vmap_zone_size = (1 << 4) * PAGE_SIZE;
5020 pr_err("Failed to allocate an array. Disable a node layer\n");
5025 for (n = 0; n < nr_vmap_nodes; n++) {
5026 vn = &vmap_nodes[n];
5027 vn->busy.root = RB_ROOT;
5028 INIT_LIST_HEAD(&vn->busy.head);
5029 spin_lock_init(&vn->busy.lock);
5031 vn->lazy.root = RB_ROOT;
5032 INIT_LIST_HEAD(&vn->lazy.head);
5033 spin_lock_init(&vn->lazy.lock);
5035 for (i = 0; i < MAX_VA_SIZE_PAGES; i++) {
5036 INIT_LIST_HEAD(&vn->pool[i].head);
5037 WRITE_ONCE(vn->pool[i].len, 0);
5040 spin_lock_init(&vn->pool_lock);
5044 static unsigned long
5045 vmap_node_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
5047 unsigned long count;
5048 struct vmap_node *vn;
5051 for (count = 0, i = 0; i < nr_vmap_nodes; i++) {
5052 vn = &vmap_nodes[i];
5054 for (j = 0; j < MAX_VA_SIZE_PAGES; j++)
5055 count += READ_ONCE(vn->pool[j].len);
5058 return count ? count : SHRINK_EMPTY;
5061 static unsigned long
5062 vmap_node_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
5066 for (i = 0; i < nr_vmap_nodes; i++)
5067 decay_va_pool_node(&vmap_nodes[i], true);
5072 void __init vmalloc_init(void)
5074 struct shrinker *vmap_node_shrinker;
5075 struct vmap_area *va;
5076 struct vmap_node *vn;
5077 struct vm_struct *tmp;
5081 * Create the cache for vmap_area objects.
5083 vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
5085 for_each_possible_cpu(i) {
5086 struct vmap_block_queue *vbq;
5087 struct vfree_deferred *p;
5089 vbq = &per_cpu(vmap_block_queue, i);
5090 spin_lock_init(&vbq->lock);
5091 INIT_LIST_HEAD(&vbq->free);
5092 p = &per_cpu(vfree_deferred, i);
5093 init_llist_head(&p->list);
5094 INIT_WORK(&p->wq, delayed_vfree_work);
5095 xa_init(&vbq->vmap_blocks);
5099 * Setup nodes before importing vmlist.
5103 /* Import existing vmlist entries. */
5104 for (tmp = vmlist; tmp; tmp = tmp->next) {
5105 va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
5106 if (WARN_ON_ONCE(!va))
5109 va->va_start = (unsigned long)tmp->addr;
5110 va->va_end = va->va_start + tmp->size;
5113 vn = addr_to_node(va->va_start);
5114 insert_vmap_area(va, &vn->busy.root, &vn->busy.head);
5118 * Now we can initialize a free vmap space.
5120 vmap_init_free_space();
5121 vmap_initialized = true;
5123 vmap_node_shrinker = shrinker_alloc(0, "vmap-node");
5124 if (!vmap_node_shrinker) {
5125 pr_err("Failed to allocate vmap-node shrinker!\n");
5129 vmap_node_shrinker->count_objects = vmap_node_shrink_count;
5130 vmap_node_shrinker->scan_objects = vmap_node_shrink_scan;
5131 shrinker_register(vmap_node_shrinker);