1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* internal.h: mm/ internal definitions
4 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
7 #ifndef __MM_INTERNAL_H
8 #define __MM_INTERNAL_H
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15 #include <linux/swapops.h>
16 #include <linux/tracepoint-defs.h>
21 * The set of flags that only affect watermark checking and reclaim
22 * behaviour. This is used by the MM to obey the caller constraints
23 * about IO, FS and watermark checking while ignoring placement
24 * hints such as HIGHMEM usage.
26 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
27 __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
28 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
31 /* The GFP flags allowed during early boot */
32 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
34 /* Control allocation cpuset and node placement constraints */
35 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
37 /* Do not use these with a slab allocator */
38 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
41 * Different from WARN_ON_ONCE(), no warning will be issued
42 * when we specify __GFP_NOWARN.
44 #define WARN_ON_ONCE_GFP(cond, gfp) ({ \
45 static bool __section(".data.once") __warned; \
46 int __ret_warn_once = !!(cond); \
48 if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
52 unlikely(__ret_warn_once); \
55 void page_writeback_init(void);
58 * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
59 * its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit
60 * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE). Hugetlb currently
61 * leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
63 #define ENTIRELY_MAPPED 0x800000
64 #define FOLIO_PAGES_MAPPED (ENTIRELY_MAPPED - 1)
67 * Flags passed to __show_mem() and show_free_areas() to suppress output in
70 #define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
73 * How many individual pages have an elevated _mapcount. Excludes
74 * the folio's entire_mapcount.
76 * Don't use this function outside of debugging code.
78 static inline int folio_nr_pages_mapped(const struct folio *folio)
80 return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
84 * Retrieve the first entry of a folio based on a provided entry within the
85 * folio. We cannot rely on folio->swap as there is no guarantee that it has
86 * been initialized. Used for calling arch_swap_restore()
88 static inline swp_entry_t folio_swap(swp_entry_t entry,
89 const struct folio *folio)
92 .val = ALIGN_DOWN(entry.val, folio_nr_pages(folio)),
98 static inline void *folio_raw_mapping(const struct folio *folio)
100 unsigned long mapping = (unsigned long)folio->mapping;
102 return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
107 /* Flags for folio_pte_batch(). */
108 typedef int __bitwise fpb_t;
110 /* Compare PTEs after pte_mkclean(), ignoring the dirty bit. */
111 #define FPB_IGNORE_DIRTY ((__force fpb_t)BIT(0))
113 /* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */
114 #define FPB_IGNORE_SOFT_DIRTY ((__force fpb_t)BIT(1))
116 static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
118 if (flags & FPB_IGNORE_DIRTY)
119 pte = pte_mkclean(pte);
120 if (likely(flags & FPB_IGNORE_SOFT_DIRTY))
121 pte = pte_clear_soft_dirty(pte);
122 return pte_wrprotect(pte_mkold(pte));
126 * folio_pte_batch - detect a PTE batch for a large folio
127 * @folio: The large folio to detect a PTE batch for.
128 * @addr: The user virtual address the first page is mapped at.
129 * @start_ptep: Page table pointer for the first entry.
130 * @pte: Page table entry for the first page.
131 * @max_nr: The maximum number of table entries to consider.
132 * @flags: Flags to modify the PTE batch semantics.
133 * @any_writable: Optional pointer to indicate whether any entry except the
134 * first one is writable.
135 * @any_young: Optional pointer to indicate whether any entry except the
136 * first one is young.
137 * @any_dirty: Optional pointer to indicate whether any entry except the
138 * first one is dirty.
140 * Detect a PTE batch: consecutive (present) PTEs that map consecutive
141 * pages of the same large folio.
143 * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
144 * the accessed bit, writable bit, dirty bit (with FPB_IGNORE_DIRTY) and
145 * soft-dirty bit (with FPB_IGNORE_SOFT_DIRTY).
147 * start_ptep must map any page of the folio. max_nr must be at least one and
148 * must be limited by the caller so scanning cannot exceed a single page table.
150 * Return: the number of table entries in the batch.
152 static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
153 pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags,
154 bool *any_writable, bool *any_young, bool *any_dirty)
156 unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
157 const pte_t *end_ptep = start_ptep + max_nr;
158 pte_t expected_pte, *ptep;
159 bool writable, young, dirty;
163 *any_writable = false;
169 VM_WARN_ON_FOLIO(!pte_present(pte), folio);
170 VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
171 VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio);
173 nr = pte_batch_hint(start_ptep, pte);
174 expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);
175 ptep = start_ptep + nr;
177 while (ptep < end_ptep) {
178 pte = ptep_get(ptep);
180 writable = !!pte_write(pte);
182 young = !!pte_young(pte);
184 dirty = !!pte_dirty(pte);
185 pte = __pte_batch_clear_ignored(pte, flags);
187 if (!pte_same(pte, expected_pte))
191 * Stop immediately once we reached the end of the folio. In
192 * corner cases the next PFN might fall into a different
195 if (pte_pfn(pte) >= folio_end_pfn)
199 *any_writable |= writable;
205 nr = pte_batch_hint(ptep, pte);
206 expected_pte = pte_advance_pfn(expected_pte, nr);
210 return min(ptep - start_ptep, max_nr);
214 * pte_next_swp_offset - Increment the swap entry offset field of a swap pte.
215 * @pte: The initial pte state; is_swap_pte(pte) must be true and
216 * non_swap_entry() must be false.
218 * Increments the swap offset, while maintaining all other fields, including
219 * swap type, and any swp pte bits. The resulting pte is returned.
221 static inline pte_t pte_next_swp_offset(pte_t pte)
223 swp_entry_t entry = pte_to_swp_entry(pte);
224 pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry),
225 (swp_offset(entry) + 1)));
227 if (pte_swp_soft_dirty(pte))
228 new = pte_swp_mksoft_dirty(new);
229 if (pte_swp_exclusive(pte))
230 new = pte_swp_mkexclusive(new);
231 if (pte_swp_uffd_wp(pte))
232 new = pte_swp_mkuffd_wp(new);
238 * swap_pte_batch - detect a PTE batch for a set of contiguous swap entries
239 * @start_ptep: Page table pointer for the first entry.
240 * @max_nr: The maximum number of table entries to consider.
241 * @pte: Page table entry for the first entry.
243 * Detect a batch of contiguous swap entries: consecutive (non-present) PTEs
244 * containing swap entries all with consecutive offsets and targeting the same
245 * swap type, all with matching swp pte bits.
247 * max_nr must be at least one and must be limited by the caller so scanning
248 * cannot exceed a single page table.
250 * Return: the number of table entries in the batch.
252 static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte)
254 pte_t expected_pte = pte_next_swp_offset(pte);
255 const pte_t *end_ptep = start_ptep + max_nr;
256 pte_t *ptep = start_ptep + 1;
258 VM_WARN_ON(max_nr < 1);
259 VM_WARN_ON(!is_swap_pte(pte));
260 VM_WARN_ON(non_swap_entry(pte_to_swp_entry(pte)));
262 while (ptep < end_ptep) {
263 pte = ptep_get(ptep);
265 if (!pte_same(pte, expected_pte))
268 expected_pte = pte_next_swp_offset(expected_pte);
272 return ptep - start_ptep;
274 #endif /* CONFIG_MMU */
276 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
278 static inline void acct_reclaim_writeback(struct folio *folio)
280 pg_data_t *pgdat = folio_pgdat(folio);
281 int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
284 __acct_reclaim_writeback(pgdat, folio, nr_throttled);
287 static inline void wake_throttle_isolated(pg_data_t *pgdat)
289 wait_queue_head_t *wqh;
291 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
292 if (waitqueue_active(wqh))
296 vm_fault_t vmf_anon_prepare(struct vm_fault *vmf);
297 vm_fault_t do_swap_page(struct vm_fault *vmf);
298 void folio_rotate_reclaimable(struct folio *folio);
299 bool __folio_end_writeback(struct folio *folio);
300 void deactivate_file_folio(struct folio *folio);
301 void folio_activate(struct folio *folio);
303 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
304 struct vm_area_struct *start_vma, unsigned long floor,
305 unsigned long ceiling, bool mm_wr_locked);
306 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
309 void unmap_page_range(struct mmu_gather *tlb,
310 struct vm_area_struct *vma,
311 unsigned long addr, unsigned long end,
312 struct zap_details *details);
314 void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
316 void force_page_cache_ra(struct readahead_control *, unsigned long nr);
317 static inline void force_page_cache_readahead(struct address_space *mapping,
318 struct file *file, pgoff_t index, unsigned long nr_to_read)
320 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
321 force_page_cache_ra(&ractl, nr_to_read);
324 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
325 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
326 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
327 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
328 void filemap_free_folio(struct address_space *mapping, struct folio *folio);
329 int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
330 bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
332 long mapping_evict_folio(struct address_space *mapping, struct folio *folio);
333 unsigned long mapping_try_invalidate(struct address_space *mapping,
334 pgoff_t start, pgoff_t end, unsigned long *nr_failed);
337 * folio_evictable - Test whether a folio is evictable.
338 * @folio: The folio to test.
340 * Test whether @folio is evictable -- i.e., should be placed on
341 * active/inactive lists vs unevictable list.
343 * Reasons folio might not be evictable:
344 * 1. folio's mapping marked unevictable
345 * 2. One of the pages in the folio is part of an mlocked VMA
347 static inline bool folio_evictable(struct folio *folio)
351 /* Prevent address_space of inode and swap cache from being freed */
353 ret = !mapping_unevictable(folio_mapping(folio)) &&
354 !folio_test_mlocked(folio);
360 * Turn a non-refcounted page (->_refcount == 0) into refcounted with
363 static inline void set_page_refcounted(struct page *page)
365 VM_BUG_ON_PAGE(PageTail(page), page);
366 VM_BUG_ON_PAGE(page_ref_count(page), page);
367 set_page_count(page, 1);
371 * Return true if a folio needs ->release_folio() calling upon it.
373 static inline bool folio_needs_release(struct folio *folio)
375 struct address_space *mapping = folio_mapping(folio);
377 return folio_has_private(folio) ||
378 (mapping && mapping_release_always(mapping));
381 extern unsigned long highest_memmap_pfn;
384 * Maximum number of reclaim retries without progress before the OOM
385 * killer is consider the only way forward.
387 #define MAX_RECLAIM_RETRIES 16
392 bool isolate_lru_page(struct page *page);
393 bool folio_isolate_lru(struct folio *folio);
394 void putback_lru_page(struct page *page);
395 void folio_putback_lru(struct folio *folio);
396 extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
401 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
406 #define K(x) ((x) << (PAGE_SHIFT-10))
408 extern char * const zone_names[MAX_NR_ZONES];
410 /* perform sanity checks on struct pages being allocated or freed */
411 DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
413 extern int min_free_kbytes;
415 void setup_per_zone_wmarks(void);
416 void calculate_min_free_kbytes(void);
417 int __meminit init_per_zone_wmark_min(void);
418 void page_alloc_sysctl_init(void);
421 * Structure for holding the mostly immutable allocation parameters passed
422 * between functions involved in allocations, including the alloc_pages*
423 * family of functions.
425 * nodemask, migratetype and highest_zoneidx are initialized only once in
426 * __alloc_pages() and then never change.
428 * zonelist, preferred_zone and highest_zoneidx are set first in
429 * __alloc_pages() for the fast path, and might be later changed
430 * in __alloc_pages_slowpath(). All other functions pass the whole structure
431 * by a const pointer.
433 struct alloc_context {
434 struct zonelist *zonelist;
435 nodemask_t *nodemask;
436 struct zoneref *preferred_zoneref;
440 * highest_zoneidx represents highest usable zone index of
441 * the allocation request. Due to the nature of the zone,
442 * memory on lower zone than the highest_zoneidx will be
443 * protected by lowmem_reserve[highest_zoneidx].
445 * highest_zoneidx is also used by reclaim/compaction to limit
446 * the target zone since higher zone than this index cannot be
447 * usable for this allocation request.
449 enum zone_type highest_zoneidx;
450 bool spread_dirty_pages;
454 * This function returns the order of a free page in the buddy system. In
455 * general, page_zone(page)->lock must be held by the caller to prevent the
456 * page from being allocated in parallel and returning garbage as the order.
457 * If a caller does not hold page_zone(page)->lock, it must guarantee that the
458 * page cannot be allocated or merged in parallel. Alternatively, it must
459 * handle invalid values gracefully, and use buddy_order_unsafe() below.
461 static inline unsigned int buddy_order(struct page *page)
463 /* PageBuddy() must be checked by the caller */
464 return page_private(page);
468 * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
469 * PageBuddy() should be checked first by the caller to minimize race window,
470 * and invalid values must be handled gracefully.
472 * READ_ONCE is used so that if the caller assigns the result into a local
473 * variable and e.g. tests it for valid range before using, the compiler cannot
474 * decide to remove the variable and inline the page_private(page) multiple
475 * times, potentially observing different values in the tests and the actual
478 #define buddy_order_unsafe(page) READ_ONCE(page_private(page))
481 * This function checks whether a page is free && is the buddy
482 * we can coalesce a page and its buddy if
483 * (a) the buddy is not in a hole (check before calling!) &&
484 * (b) the buddy is in the buddy system &&
485 * (c) a page and its buddy have the same order &&
486 * (d) a page and its buddy are in the same zone.
488 * For recording whether a page is in the buddy system, we set PageBuddy.
489 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
491 * For recording page's order, we use page_private(page).
493 static inline bool page_is_buddy(struct page *page, struct page *buddy,
496 if (!page_is_guard(buddy) && !PageBuddy(buddy))
499 if (buddy_order(buddy) != order)
503 * zone check is done late to avoid uselessly calculating
504 * zone/node ids for pages that could never merge.
506 if (page_zone_id(page) != page_zone_id(buddy))
509 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
515 * Locate the struct page for both the matching buddy in our
516 * pair (buddy1) and the combined O(n+1) page they form (page).
518 * 1) Any buddy B1 will have an order O twin B2 which satisfies
519 * the following equation:
521 * For example, if the starting buddy (buddy2) is #8 its order
523 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
525 * 2) Any buddy B will have an order O+1 parent P which
526 * satisfies the following equation:
529 * Assumption: *_mem_map is contiguous at least up to MAX_PAGE_ORDER
531 static inline unsigned long
532 __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
534 return page_pfn ^ (1 << order);
538 * Find the buddy of @page and validate it.
539 * @page: The input page
540 * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
541 * function is used in the performance-critical __free_one_page().
542 * @order: The order of the page
543 * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
546 * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
547 * not the same as @page. The validation is necessary before use it.
549 * Return: the found buddy page or NULL if not found.
551 static inline struct page *find_buddy_page_pfn(struct page *page,
552 unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
554 unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
557 buddy = page + (__buddy_pfn - pfn);
559 *buddy_pfn = __buddy_pfn;
561 if (page_is_buddy(page, buddy, order))
566 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
567 unsigned long end_pfn, struct zone *zone);
569 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
570 unsigned long end_pfn, struct zone *zone)
572 if (zone->contiguous)
573 return pfn_to_page(start_pfn);
575 return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
578 void set_zone_contiguous(struct zone *zone);
580 static inline void clear_zone_contiguous(struct zone *zone)
582 zone->contiguous = false;
585 extern int __isolate_free_page(struct page *page, unsigned int order);
586 extern void __putback_isolated_page(struct page *page, unsigned int order,
588 extern void memblock_free_pages(struct page *page, unsigned long pfn,
590 extern void __free_pages_core(struct page *page, unsigned int order);
593 * This will have no effect, other than possibly generating a warning, if the
594 * caller passes in a non-large folio.
596 static inline void folio_set_order(struct folio *folio, unsigned int order)
598 if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
601 folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order;
603 folio->_folio_nr_pages = 1U << order;
607 void folio_undo_large_rmappable(struct folio *folio);
609 static inline struct folio *page_rmappable_folio(struct page *page)
611 struct folio *folio = (struct folio *)page;
613 if (folio && folio_test_large(folio))
614 folio_set_large_rmappable(folio);
618 static inline void prep_compound_head(struct page *page, unsigned int order)
620 struct folio *folio = (struct folio *)page;
622 folio_set_order(folio, order);
623 atomic_set(&folio->_large_mapcount, -1);
624 atomic_set(&folio->_entire_mapcount, -1);
625 atomic_set(&folio->_nr_pages_mapped, 0);
626 atomic_set(&folio->_pincount, 0);
628 INIT_LIST_HEAD(&folio->_deferred_list);
631 static inline void prep_compound_tail(struct page *head, int tail_idx)
633 struct page *p = head + tail_idx;
635 p->mapping = TAIL_MAPPING;
636 set_compound_head(p, head);
637 set_page_private(p, 0);
640 extern void prep_compound_page(struct page *page, unsigned int order);
642 extern void post_alloc_hook(struct page *page, unsigned int order,
644 extern bool free_pages_prepare(struct page *page, unsigned int order);
646 extern int user_min_free_kbytes;
648 void free_unref_page(struct page *page, unsigned int order);
649 void free_unref_folios(struct folio_batch *fbatch);
651 extern void zone_pcp_reset(struct zone *zone);
652 extern void zone_pcp_disable(struct zone *zone);
653 extern void zone_pcp_enable(struct zone *zone);
654 extern void zone_pcp_init(struct zone *zone);
656 extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
657 phys_addr_t min_addr,
658 int nid, bool exact_nid);
660 void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
661 unsigned long, enum meminit_context, struct vmem_altmap *, int);
663 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
669 * compact_control is used to track pages being migrated and the free pages
670 * they are being migrated to during memory compaction. The free_pfn starts
671 * at the end of a zone and migrate_pfn begins at the start. Movable pages
672 * are moved to the end of a zone during a compaction run and the run
673 * completes when free_pfn <= migrate_pfn
675 struct compact_control {
676 struct list_head freepages[NR_PAGE_ORDERS]; /* List of free pages to migrate to */
677 struct list_head migratepages; /* List of pages being migrated */
678 unsigned int nr_freepages; /* Number of isolated free pages */
679 unsigned int nr_migratepages; /* Number of pages to migrate */
680 unsigned long free_pfn; /* isolate_freepages search base */
682 * Acts as an in/out parameter to page isolation for migration.
683 * isolate_migratepages uses it as a search base.
684 * isolate_migratepages_block will update the value to the next pfn
685 * after the last isolated one.
687 unsigned long migrate_pfn;
688 unsigned long fast_start_pfn; /* a pfn to start linear scan from */
690 unsigned long total_migrate_scanned;
691 unsigned long total_free_scanned;
692 unsigned short fast_search_fail;/* failures to use free list searches */
693 short search_order; /* order to start a fast search at */
694 const gfp_t gfp_mask; /* gfp mask of a direct compactor */
695 int order; /* order a direct compactor needs */
696 int migratetype; /* migratetype of direct compactor */
697 const unsigned int alloc_flags; /* alloc flags of a direct compactor */
698 const int highest_zoneidx; /* zone index of a direct compactor */
699 enum migrate_mode mode; /* Async or sync migration mode */
700 bool ignore_skip_hint; /* Scan blocks even if marked skip */
701 bool no_set_skip_hint; /* Don't mark blocks for skipping */
702 bool ignore_block_suitable; /* Scan blocks considered unsuitable */
703 bool direct_compaction; /* False from kcompactd or /proc/... */
704 bool proactive_compaction; /* kcompactd proactive compaction */
705 bool whole_zone; /* Whole zone should/has been scanned */
706 bool contended; /* Signal lock contention */
707 bool finish_pageblock; /* Scan the remainder of a pageblock. Used
708 * when there are potentially transient
709 * isolation or migration failures to
710 * ensure forward progress.
712 bool alloc_contig; /* alloc_contig_range allocation */
716 * Used in direct compaction when a page should be taken from the freelists
717 * immediately when one is created during the free path.
719 struct capture_control {
720 struct compact_control *cc;
725 isolate_freepages_range(struct compact_control *cc,
726 unsigned long start_pfn, unsigned long end_pfn);
728 isolate_migratepages_range(struct compact_control *cc,
729 unsigned long low_pfn, unsigned long end_pfn);
731 int __alloc_contig_migrate_range(struct compact_control *cc,
732 unsigned long start, unsigned long end,
735 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
736 void init_cma_reserved_pageblock(struct page *page);
738 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
740 int find_suitable_fallback(struct free_area *area, unsigned int order,
741 int migratetype, bool only_stealable, bool *can_steal);
743 static inline bool free_area_empty(struct free_area *area, int migratetype)
745 return list_empty(&area->free_list[migratetype]);
749 * These three helpers classifies VMAs for virtual memory accounting.
753 * Executable code area - executable, not writable, not stack
755 static inline bool is_exec_mapping(vm_flags_t flags)
757 return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
761 * Stack area (including shadow stacks)
763 * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
764 * do_mmap() forbids all other combinations.
766 static inline bool is_stack_mapping(vm_flags_t flags)
768 return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
772 * Data area - private, writable, not stack
774 static inline bool is_data_mapping(vm_flags_t flags)
776 return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
780 struct anon_vma *folio_anon_vma(struct folio *folio);
783 void unmap_mapping_folio(struct folio *folio);
784 extern long populate_vma_page_range(struct vm_area_struct *vma,
785 unsigned long start, unsigned long end, int *locked);
786 extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
787 unsigned long end, bool write, int *locked);
788 extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
789 unsigned long bytes);
792 * NOTE: This function can't tell whether the folio is "fully mapped" in the
794 * "fully mapped" means all the pages of folio is associated with the page
795 * table of range while this function just check whether the folio range is
796 * within the range [start, end). Function caller needs to do page table
797 * check if it cares about the page table association.
799 * Typical usage (like mlock or madvise) is:
800 * Caller knows at least 1 page of folio is associated with page table of VMA
801 * and the range [start, end) is intersect with the VMA range. Caller wants
802 * to know whether the folio is fully associated with the range. It calls
803 * this function to check whether the folio is in the range first. Then checks
804 * the page table to know whether the folio is fully mapped to the range.
807 folio_within_range(struct folio *folio, struct vm_area_struct *vma,
808 unsigned long start, unsigned long end)
811 unsigned long vma_pglen = vma_pages(vma);
813 VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio);
817 if (start < vma->vm_start)
818 start = vma->vm_start;
820 if (end > vma->vm_end)
823 pgoff = folio_pgoff(folio);
825 /* if folio start address is not in vma range */
826 if (!in_range(pgoff, vma->vm_pgoff, vma_pglen))
829 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
831 return !(addr < start || end - addr < folio_size(folio));
835 folio_within_vma(struct folio *folio, struct vm_area_struct *vma)
837 return folio_within_range(folio, vma, vma->vm_start, vma->vm_end);
841 * mlock_vma_folio() and munlock_vma_folio():
842 * should be called with vma's mmap_lock held for read or write,
843 * under page table lock for the pte/pmd being added or removed.
845 * mlock is usually called at the end of folio_add_*_rmap_*(), munlock at
846 * the end of folio_remove_rmap_*(); but new anon folios are managed by
847 * folio_add_lru_vma() calling mlock_new_folio().
849 void mlock_folio(struct folio *folio);
850 static inline void mlock_vma_folio(struct folio *folio,
851 struct vm_area_struct *vma)
854 * The VM_SPECIAL check here serves two purposes.
855 * 1) VM_IO check prevents migration from double-counting during mlock.
856 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
857 * is never left set on a VM_SPECIAL vma, there is an interval while
858 * file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
859 * still be set while VM_SPECIAL bits are added: so ignore it then.
861 if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED))
865 void munlock_folio(struct folio *folio);
866 static inline void munlock_vma_folio(struct folio *folio,
867 struct vm_area_struct *vma)
870 * munlock if the function is called. Ideally, we should only
871 * do munlock if any page of folio is unmapped from VMA and
872 * cause folio not fully mapped to VMA.
874 * But it's not easy to confirm that's the situation. So we
875 * always munlock the folio and page reclaim will correct it
878 if (unlikely(vma->vm_flags & VM_LOCKED))
879 munlock_folio(folio);
882 void mlock_new_folio(struct folio *folio);
883 bool need_mlock_drain(int cpu);
884 void mlock_drain_local(void);
885 void mlock_drain_remote(int cpu);
887 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
890 * vma_address - Find the virtual address a page range is mapped at
891 * @vma: The vma which maps this object.
892 * @pgoff: The page offset within its object.
893 * @nr_pages: The number of pages to consider.
895 * If any page in this range is mapped by this VMA, return the first address
896 * where any of these pages appear. Otherwise, return -EFAULT.
898 static inline unsigned long vma_address(struct vm_area_struct *vma,
899 pgoff_t pgoff, unsigned long nr_pages)
901 unsigned long address;
903 if (pgoff >= vma->vm_pgoff) {
904 address = vma->vm_start +
905 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
906 /* Check for address beyond vma (or wrapped through 0?) */
907 if (address < vma->vm_start || address >= vma->vm_end)
909 } else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
910 /* Test above avoids possibility of wrap to 0 on 32-bit */
911 address = vma->vm_start;
919 * Then at what user virtual address will none of the range be found in vma?
920 * Assumes that vma_address() already returned a good starting address.
922 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
924 struct vm_area_struct *vma = pvmw->vma;
926 unsigned long address;
928 /* Common case, plus ->pgoff is invalid for KSM */
929 if (pvmw->nr_pages == 1)
930 return pvmw->address + PAGE_SIZE;
932 pgoff = pvmw->pgoff + pvmw->nr_pages;
933 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
934 /* Check for address beyond vma (or wrapped through 0?) */
935 if (address < vma->vm_start || address > vma->vm_end)
936 address = vma->vm_end;
940 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
943 int flags = vmf->flags;
949 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
950 * anything, so we only pin the file and drop the mmap_lock if only
951 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
953 if (fault_flag_allow_retry_first(flags) &&
954 !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
955 fpin = get_file(vmf->vma->vm_file);
956 release_fault_lock(vmf);
960 #else /* !CONFIG_MMU */
961 static inline void unmap_mapping_folio(struct folio *folio) { }
962 static inline void mlock_new_folio(struct folio *folio) { }
963 static inline bool need_mlock_drain(int cpu) { return false; }
964 static inline void mlock_drain_local(void) { }
965 static inline void mlock_drain_remote(int cpu) { }
966 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
969 #endif /* !CONFIG_MMU */
971 /* Memory initialisation debug and verification */
972 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
973 DECLARE_STATIC_KEY_TRUE(deferred_pages);
975 bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
976 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
984 #ifdef CONFIG_DEBUG_MEMORY_INIT
986 extern int mminit_loglevel;
988 #define mminit_dprintk(level, prefix, fmt, arg...) \
990 if (level < mminit_loglevel) { \
991 if (level <= MMINIT_WARNING) \
992 pr_warn("mminit::" prefix " " fmt, ##arg); \
994 printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
998 extern void mminit_verify_pageflags_layout(void);
999 extern void mminit_verify_zonelist(void);
1002 static inline void mminit_dprintk(enum mminit_level level,
1003 const char *prefix, const char *fmt, ...)
1007 static inline void mminit_verify_pageflags_layout(void)
1011 static inline void mminit_verify_zonelist(void)
1014 #endif /* CONFIG_DEBUG_MEMORY_INIT */
1016 #define NODE_RECLAIM_NOSCAN -2
1017 #define NODE_RECLAIM_FULL -1
1018 #define NODE_RECLAIM_SOME 0
1019 #define NODE_RECLAIM_SUCCESS 1
1022 extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
1023 extern int find_next_best_node(int node, nodemask_t *used_node_mask);
1025 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
1028 return NODE_RECLAIM_NOSCAN;
1030 static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
1032 return NUMA_NO_NODE;
1037 * mm/memory-failure.c
1039 void shake_folio(struct folio *folio);
1040 extern int hwpoison_filter(struct page *p);
1042 extern u32 hwpoison_filter_dev_major;
1043 extern u32 hwpoison_filter_dev_minor;
1044 extern u64 hwpoison_filter_flags_mask;
1045 extern u64 hwpoison_filter_flags_value;
1046 extern u64 hwpoison_filter_memcg;
1047 extern u32 hwpoison_filter_enable;
1049 extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
1050 unsigned long, unsigned long,
1051 unsigned long, unsigned long);
1053 extern void set_pageblock_order(void);
1054 unsigned long reclaim_pages(struct list_head *folio_list);
1055 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
1056 struct list_head *folio_list);
1057 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
1058 #define ALLOC_WMARK_MIN WMARK_MIN
1059 #define ALLOC_WMARK_LOW WMARK_LOW
1060 #define ALLOC_WMARK_HIGH WMARK_HIGH
1061 #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
1063 /* Mask to get the watermark bits */
1064 #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
1067 * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
1068 * cannot assume a reduced access to memory reserves is sufficient for
1072 #define ALLOC_OOM 0x08
1074 #define ALLOC_OOM ALLOC_NO_WATERMARKS
1077 #define ALLOC_NON_BLOCK 0x10 /* Caller cannot block. Allow access
1078 * to 25% of the min watermark or
1079 * 62.5% if __GFP_HIGH is set.
1081 #define ALLOC_MIN_RESERVE 0x20 /* __GFP_HIGH set. Allow access to 50%
1082 * of the min watermark.
1084 #define ALLOC_CPUSET 0x40 /* check for correct cpuset */
1085 #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
1086 #ifdef CONFIG_ZONE_DMA32
1087 #define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */
1089 #define ALLOC_NOFRAGMENT 0x0
1091 #define ALLOC_HIGHATOMIC 0x200 /* Allows access to MIGRATE_HIGHATOMIC */
1092 #define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
1094 /* Flags that allow allocations below the min watermark. */
1095 #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
1098 struct tlbflush_unmap_batch;
1102 * only for MM internal work items which do not depend on
1103 * any allocations or locks which might depend on allocations
1105 extern struct workqueue_struct *mm_percpu_wq;
1107 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1108 void try_to_unmap_flush(void);
1109 void try_to_unmap_flush_dirty(void);
1110 void flush_tlb_batched_pending(struct mm_struct *mm);
1112 static inline void try_to_unmap_flush(void)
1115 static inline void try_to_unmap_flush_dirty(void)
1118 static inline void flush_tlb_batched_pending(struct mm_struct *mm)
1121 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
1123 extern const struct trace_print_flags pageflag_names[];
1124 extern const struct trace_print_flags pagetype_names[];
1125 extern const struct trace_print_flags vmaflag_names[];
1126 extern const struct trace_print_flags gfpflag_names[];
1128 static inline bool is_migrate_highatomic(enum migratetype migratetype)
1130 return migratetype == MIGRATE_HIGHATOMIC;
1133 void setup_zone_pageset(struct zone *zone);
1135 struct migration_target_control {
1136 int nid; /* preferred node id */
1139 enum migrate_reason reason;
1145 size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
1146 struct folio *folio, loff_t fpos, size_t size);
1152 void __init vmalloc_init(void);
1153 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1154 pgprot_t prot, struct page **pages, unsigned int page_shift);
1156 static inline void vmalloc_init(void)
1161 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1162 pgprot_t prot, struct page **pages, unsigned int page_shift)
1168 int __must_check __vmap_pages_range_noflush(unsigned long addr,
1169 unsigned long end, pgprot_t prot,
1170 struct page **pages, unsigned int page_shift);
1172 void vunmap_range_noflush(unsigned long start, unsigned long end);
1174 void __vunmap_range_noflush(unsigned long start, unsigned long end);
1176 int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
1177 unsigned long addr, int page_nid, int *flags);
1179 void free_zone_device_folio(struct folio *folio);
1180 int migrate_device_coherent_page(struct page *page);
1185 int __must_check try_grab_folio(struct folio *folio, int refs,
1186 unsigned int flags);
1191 void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1192 pud_t *pud, bool write);
1193 void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1194 pmd_t *pmd, bool write);
1199 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
1200 struct vm_area_struct *vma,
1201 unsigned long delta);
1204 /* mark page accessed */
1205 FOLL_TOUCH = 1 << 16,
1206 /* a retry, previous pass started an IO */
1207 FOLL_TRIED = 1 << 17,
1208 /* we are working on non-current tsk/mm */
1209 FOLL_REMOTE = 1 << 18,
1210 /* pages must be released via unpin_user_page */
1212 /* gup_fast: prevent fall-back to slow gup */
1213 FOLL_FAST_ONLY = 1 << 20,
1214 /* allow unlocking the mmap lock */
1215 FOLL_UNLOCKABLE = 1 << 21,
1216 /* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */
1217 FOLL_MADV_POPULATE = 1 << 22,
1220 #define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
1221 FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
1225 * Indicates for which pages that are write-protected in the page table,
1226 * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
1227 * GUP pin will remain consistent with the pages mapped into the page tables
1230 * Temporary unmapping of PageAnonExclusive() pages or clearing of
1231 * PageAnonExclusive() has to protect against concurrent GUP:
1232 * * Ordinary GUP: Using the PT lock
1233 * * GUP-fast and fork(): mm->write_protect_seq
1234 * * GUP-fast and KSM or temporary unmapping (swap, migration): see
1235 * folio_try_share_anon_rmap_*()
1237 * Must be called with the (sub)page that's actually referenced via the
1238 * page table entry, which might not necessarily be the head page for a
1241 * If the vma is NULL, we're coming from the GUP-fast path and might have
1242 * to fallback to the slow path just to lookup the vma.
1244 static inline bool gup_must_unshare(struct vm_area_struct *vma,
1245 unsigned int flags, struct page *page)
1248 * FOLL_WRITE is implicitly handled correctly as the page table entry
1249 * has to be writable -- and if it references (part of) an anonymous
1250 * folio, that part is required to be marked exclusive.
1252 if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
1255 * Note: PageAnon(page) is stable until the page is actually getting
1258 if (!PageAnon(page)) {
1260 * We only care about R/O long-term pining: R/O short-term
1261 * pinning does not have the semantics to observe successive
1262 * changes through the process page tables.
1264 if (!(flags & FOLL_LONGTERM))
1267 /* We really need the vma ... */
1272 * ... because we only care about writable private ("COW")
1273 * mappings where we have to break COW early.
1275 return is_cow_mapping(vma->vm_flags);
1278 /* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */
1279 if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
1283 * Note that PageKsm() pages cannot be exclusive, and consequently,
1284 * cannot get pinned.
1286 return !PageAnonExclusive(page);
1289 extern bool mirrored_kernelcore;
1290 extern bool memblock_has_mirror(void);
1292 static __always_inline void vma_set_range(struct vm_area_struct *vma,
1293 unsigned long start, unsigned long end,
1296 vma->vm_start = start;
1298 vma->vm_pgoff = pgoff;
1301 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
1304 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
1305 * enablements, because when without soft-dirty being compiled in,
1306 * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
1307 * will be constantly true.
1309 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
1313 * Soft-dirty is kind of special: its tracking is enabled when the
1314 * vma flags not set.
1316 return !(vma->vm_flags & VM_SOFTDIRTY);
1319 static inline void vma_iter_config(struct vma_iterator *vmi,
1320 unsigned long index, unsigned long last)
1322 __mas_set_range(&vmi->mas, index, last - 1);
1325 static inline void vma_iter_reset(struct vma_iterator *vmi)
1327 mas_reset(&vmi->mas);
1331 struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
1333 return mas_prev_range(&vmi->mas, min);
1337 struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
1339 return mas_next_range(&vmi->mas, max);
1342 static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
1343 unsigned long max, unsigned long size)
1345 return mas_empty_area(&vmi->mas, min, max - 1, size);
1348 static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
1349 unsigned long max, unsigned long size)
1351 return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
1355 * VMA Iterator functions shared between nommu and mmap
1357 static inline int vma_iter_prealloc(struct vma_iterator *vmi,
1358 struct vm_area_struct *vma)
1360 return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
1363 static inline void vma_iter_clear(struct vma_iterator *vmi)
1365 mas_store_prealloc(&vmi->mas, NULL);
1368 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
1370 return mas_walk(&vmi->mas);
1373 /* Store a VMA with preallocated memory */
1374 static inline void vma_iter_store(struct vma_iterator *vmi,
1375 struct vm_area_struct *vma)
1378 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
1379 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
1380 vmi->mas.index > vma->vm_start)) {
1381 pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
1382 vmi->mas.index, vma->vm_start, vma->vm_start,
1383 vma->vm_end, vmi->mas.index, vmi->mas.last);
1385 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
1386 vmi->mas.last < vma->vm_start)) {
1387 pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
1388 vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
1389 vmi->mas.index, vmi->mas.last);
1393 if (vmi->mas.status != ma_start &&
1394 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1395 vma_iter_invalidate(vmi);
1397 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
1398 mas_store_prealloc(&vmi->mas, vma);
1401 static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
1402 struct vm_area_struct *vma, gfp_t gfp)
1404 if (vmi->mas.status != ma_start &&
1405 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1406 vma_iter_invalidate(vmi);
1408 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
1409 mas_store_gfp(&vmi->mas, vma, gfp);
1410 if (unlikely(mas_is_err(&vmi->mas)))
1417 * VMA lock generalization
1419 struct vma_prepare {
1420 struct vm_area_struct *vma;
1421 struct vm_area_struct *adj_next;
1423 struct address_space *mapping;
1424 struct anon_vma *anon_vma;
1425 struct vm_area_struct *insert;
1426 struct vm_area_struct *remove;
1427 struct vm_area_struct *remove2;
1430 void __meminit __init_single_page(struct page *page, unsigned long pfn,
1431 unsigned long zone, int nid);
1433 /* shrinker related functions */
1434 unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
1438 static inline int can_do_mseal(unsigned long flags)
1446 bool can_modify_mm(struct mm_struct *mm, unsigned long start,
1448 bool can_modify_mm_madv(struct mm_struct *mm, unsigned long start,
1449 unsigned long end, int behavior);
1451 static inline int can_do_mseal(unsigned long flags)
1456 static inline bool can_modify_mm(struct mm_struct *mm, unsigned long start,
1462 static inline bool can_modify_mm_madv(struct mm_struct *mm, unsigned long start,
1463 unsigned long end, int behavior)
1469 #ifdef CONFIG_SHRINKER_DEBUG
1470 static inline __printf(2, 0) int shrinker_debugfs_name_alloc(
1471 struct shrinker *shrinker, const char *fmt, va_list ap)
1473 shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
1475 return shrinker->name ? 0 : -ENOMEM;
1478 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1480 kfree_const(shrinker->name);
1481 shrinker->name = NULL;
1484 extern int shrinker_debugfs_add(struct shrinker *shrinker);
1485 extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1487 extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1489 #else /* CONFIG_SHRINKER_DEBUG */
1490 static inline int shrinker_debugfs_add(struct shrinker *shrinker)
1494 static inline int shrinker_debugfs_name_alloc(struct shrinker *shrinker,
1495 const char *fmt, va_list ap)
1499 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1502 static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1508 static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1512 #endif /* CONFIG_SHRINKER_DEBUG */
1514 /* Only track the nodes of mappings with shadow entries */
1515 void workingset_update_node(struct xa_node *node);
1516 extern struct list_lru shadow_nodes;
1518 #endif /* __MM_INTERNAL_H */