1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* internal.h: mm/ internal definitions
4 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
7 #ifndef __MM_INTERNAL_H
8 #define __MM_INTERNAL_H
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15 #include <linux/swapops.h>
16 #include <linux/tracepoint-defs.h>
21 * The set of flags that only affect watermark checking and reclaim
22 * behaviour. This is used by the MM to obey the caller constraints
23 * about IO, FS and watermark checking while ignoring placement
24 * hints such as HIGHMEM usage.
26 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
27 __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
28 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
31 /* The GFP flags allowed during early boot */
32 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
34 /* Control allocation cpuset and node placement constraints */
35 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
37 /* Do not use these with a slab allocator */
38 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
41 * Different from WARN_ON_ONCE(), no warning will be issued
42 * when we specify __GFP_NOWARN.
44 #define WARN_ON_ONCE_GFP(cond, gfp) ({ \
45 static bool __section(".data.once") __warned; \
46 int __ret_warn_once = !!(cond); \
48 if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
52 unlikely(__ret_warn_once); \
55 void page_writeback_init(void);
58 * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
59 * its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit
60 * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE). Hugetlb currently
61 * leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
63 #define ENTIRELY_MAPPED 0x800000
64 #define FOLIO_PAGES_MAPPED (ENTIRELY_MAPPED - 1)
67 * Flags passed to __show_mem() and show_free_areas() to suppress output in
70 #define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
73 * How many individual pages have an elevated _mapcount. Excludes
74 * the folio's entire_mapcount.
76 * Don't use this function outside of debugging code.
78 static inline int folio_nr_pages_mapped(const struct folio *folio)
80 return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
84 * Retrieve the first entry of a folio based on a provided entry within the
85 * folio. We cannot rely on folio->swap as there is no guarantee that it has
86 * been initialized. Used for calling arch_swap_restore()
88 static inline swp_entry_t folio_swap(swp_entry_t entry,
89 const struct folio *folio)
92 .val = ALIGN_DOWN(entry.val, folio_nr_pages(folio)),
98 static inline void *folio_raw_mapping(const struct folio *folio)
100 unsigned long mapping = (unsigned long)folio->mapping;
102 return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
107 /* Flags for folio_pte_batch(). */
108 typedef int __bitwise fpb_t;
110 /* Compare PTEs after pte_mkclean(), ignoring the dirty bit. */
111 #define FPB_IGNORE_DIRTY ((__force fpb_t)BIT(0))
113 /* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */
114 #define FPB_IGNORE_SOFT_DIRTY ((__force fpb_t)BIT(1))
116 static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
118 if (flags & FPB_IGNORE_DIRTY)
119 pte = pte_mkclean(pte);
120 if (likely(flags & FPB_IGNORE_SOFT_DIRTY))
121 pte = pte_clear_soft_dirty(pte);
122 return pte_wrprotect(pte_mkold(pte));
126 * folio_pte_batch - detect a PTE batch for a large folio
127 * @folio: The large folio to detect a PTE batch for.
128 * @addr: The user virtual address the first page is mapped at.
129 * @start_ptep: Page table pointer for the first entry.
130 * @pte: Page table entry for the first page.
131 * @max_nr: The maximum number of table entries to consider.
132 * @flags: Flags to modify the PTE batch semantics.
133 * @any_writable: Optional pointer to indicate whether any entry except the
134 * first one is writable.
135 * @any_young: Optional pointer to indicate whether any entry except the
136 * first one is young.
137 * @any_dirty: Optional pointer to indicate whether any entry except the
138 * first one is dirty.
140 * Detect a PTE batch: consecutive (present) PTEs that map consecutive
141 * pages of the same large folio.
143 * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
144 * the accessed bit, writable bit, dirty bit (with FPB_IGNORE_DIRTY) and
145 * soft-dirty bit (with FPB_IGNORE_SOFT_DIRTY).
147 * start_ptep must map any page of the folio. max_nr must be at least one and
148 * must be limited by the caller so scanning cannot exceed a single page table.
150 * Return: the number of table entries in the batch.
152 static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
153 pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags,
154 bool *any_writable, bool *any_young, bool *any_dirty)
156 unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
157 const pte_t *end_ptep = start_ptep + max_nr;
158 pte_t expected_pte, *ptep;
159 bool writable, young, dirty;
163 *any_writable = false;
169 VM_WARN_ON_FOLIO(!pte_present(pte), folio);
170 VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
171 VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio);
173 nr = pte_batch_hint(start_ptep, pte);
174 expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);
175 ptep = start_ptep + nr;
177 while (ptep < end_ptep) {
178 pte = ptep_get(ptep);
180 writable = !!pte_write(pte);
182 young = !!pte_young(pte);
184 dirty = !!pte_dirty(pte);
185 pte = __pte_batch_clear_ignored(pte, flags);
187 if (!pte_same(pte, expected_pte))
191 * Stop immediately once we reached the end of the folio. In
192 * corner cases the next PFN might fall into a different
195 if (pte_pfn(pte) >= folio_end_pfn)
199 *any_writable |= writable;
205 nr = pte_batch_hint(ptep, pte);
206 expected_pte = pte_advance_pfn(expected_pte, nr);
210 return min(ptep - start_ptep, max_nr);
214 * pte_move_swp_offset - Move the swap entry offset field of a swap pte
215 * forward or backward by delta
216 * @pte: The initial pte state; is_swap_pte(pte) must be true and
217 * non_swap_entry() must be false.
218 * @delta: The direction and the offset we are moving; forward if delta
219 * is positive; backward if delta is negative
221 * Moves the swap offset, while maintaining all other fields, including
222 * swap type, and any swp pte bits. The resulting pte is returned.
224 static inline pte_t pte_move_swp_offset(pte_t pte, long delta)
226 swp_entry_t entry = pte_to_swp_entry(pte);
227 pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry),
228 (swp_offset(entry) + delta)));
230 if (pte_swp_soft_dirty(pte))
231 new = pte_swp_mksoft_dirty(new);
232 if (pte_swp_exclusive(pte))
233 new = pte_swp_mkexclusive(new);
234 if (pte_swp_uffd_wp(pte))
235 new = pte_swp_mkuffd_wp(new);
242 * pte_next_swp_offset - Increment the swap entry offset field of a swap pte.
243 * @pte: The initial pte state; is_swap_pte(pte) must be true and
244 * non_swap_entry() must be false.
246 * Increments the swap offset, while maintaining all other fields, including
247 * swap type, and any swp pte bits. The resulting pte is returned.
249 static inline pte_t pte_next_swp_offset(pte_t pte)
251 return pte_move_swp_offset(pte, 1);
255 * swap_pte_batch - detect a PTE batch for a set of contiguous swap entries
256 * @start_ptep: Page table pointer for the first entry.
257 * @max_nr: The maximum number of table entries to consider.
258 * @pte: Page table entry for the first entry.
260 * Detect a batch of contiguous swap entries: consecutive (non-present) PTEs
261 * containing swap entries all with consecutive offsets and targeting the same
262 * swap type, all with matching swp pte bits.
264 * max_nr must be at least one and must be limited by the caller so scanning
265 * cannot exceed a single page table.
267 * Return: the number of table entries in the batch.
269 static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte)
271 pte_t expected_pte = pte_next_swp_offset(pte);
272 const pte_t *end_ptep = start_ptep + max_nr;
273 pte_t *ptep = start_ptep + 1;
275 VM_WARN_ON(max_nr < 1);
276 VM_WARN_ON(!is_swap_pte(pte));
277 VM_WARN_ON(non_swap_entry(pte_to_swp_entry(pte)));
279 while (ptep < end_ptep) {
280 pte = ptep_get(ptep);
282 if (!pte_same(pte, expected_pte))
285 expected_pte = pte_next_swp_offset(expected_pte);
289 return ptep - start_ptep;
291 #endif /* CONFIG_MMU */
293 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
295 static inline void acct_reclaim_writeback(struct folio *folio)
297 pg_data_t *pgdat = folio_pgdat(folio);
298 int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
301 __acct_reclaim_writeback(pgdat, folio, nr_throttled);
304 static inline void wake_throttle_isolated(pg_data_t *pgdat)
306 wait_queue_head_t *wqh;
308 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
309 if (waitqueue_active(wqh))
313 vm_fault_t vmf_anon_prepare(struct vm_fault *vmf);
314 vm_fault_t do_swap_page(struct vm_fault *vmf);
315 void folio_rotate_reclaimable(struct folio *folio);
316 bool __folio_end_writeback(struct folio *folio);
317 void deactivate_file_folio(struct folio *folio);
318 void folio_activate(struct folio *folio);
320 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
321 struct vm_area_struct *start_vma, unsigned long floor,
322 unsigned long ceiling, bool mm_wr_locked);
323 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
326 void unmap_page_range(struct mmu_gather *tlb,
327 struct vm_area_struct *vma,
328 unsigned long addr, unsigned long end,
329 struct zap_details *details);
331 void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
333 void force_page_cache_ra(struct readahead_control *, unsigned long nr);
334 static inline void force_page_cache_readahead(struct address_space *mapping,
335 struct file *file, pgoff_t index, unsigned long nr_to_read)
337 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
338 force_page_cache_ra(&ractl, nr_to_read);
341 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
342 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
343 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
344 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
345 void filemap_free_folio(struct address_space *mapping, struct folio *folio);
346 int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
347 bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
349 long mapping_evict_folio(struct address_space *mapping, struct folio *folio);
350 unsigned long mapping_try_invalidate(struct address_space *mapping,
351 pgoff_t start, pgoff_t end, unsigned long *nr_failed);
354 * folio_evictable - Test whether a folio is evictable.
355 * @folio: The folio to test.
357 * Test whether @folio is evictable -- i.e., should be placed on
358 * active/inactive lists vs unevictable list.
360 * Reasons folio might not be evictable:
361 * 1. folio's mapping marked unevictable
362 * 2. One of the pages in the folio is part of an mlocked VMA
364 static inline bool folio_evictable(struct folio *folio)
368 /* Prevent address_space of inode and swap cache from being freed */
370 ret = !mapping_unevictable(folio_mapping(folio)) &&
371 !folio_test_mlocked(folio);
377 * Turn a non-refcounted page (->_refcount == 0) into refcounted with
380 static inline void set_page_refcounted(struct page *page)
382 VM_BUG_ON_PAGE(PageTail(page), page);
383 VM_BUG_ON_PAGE(page_ref_count(page), page);
384 set_page_count(page, 1);
388 * Return true if a folio needs ->release_folio() calling upon it.
390 static inline bool folio_needs_release(struct folio *folio)
392 struct address_space *mapping = folio_mapping(folio);
394 return folio_has_private(folio) ||
395 (mapping && mapping_release_always(mapping));
398 extern unsigned long highest_memmap_pfn;
401 * Maximum number of reclaim retries without progress before the OOM
402 * killer is consider the only way forward.
404 #define MAX_RECLAIM_RETRIES 16
409 bool isolate_lru_page(struct page *page);
410 bool folio_isolate_lru(struct folio *folio);
411 void putback_lru_page(struct page *page);
412 void folio_putback_lru(struct folio *folio);
413 extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
418 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
423 #define K(x) ((x) << (PAGE_SHIFT-10))
425 extern char * const zone_names[MAX_NR_ZONES];
427 /* perform sanity checks on struct pages being allocated or freed */
428 DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
430 extern int min_free_kbytes;
432 void setup_per_zone_wmarks(void);
433 void calculate_min_free_kbytes(void);
434 int __meminit init_per_zone_wmark_min(void);
435 void page_alloc_sysctl_init(void);
438 * Structure for holding the mostly immutable allocation parameters passed
439 * between functions involved in allocations, including the alloc_pages*
440 * family of functions.
442 * nodemask, migratetype and highest_zoneidx are initialized only once in
443 * __alloc_pages() and then never change.
445 * zonelist, preferred_zone and highest_zoneidx are set first in
446 * __alloc_pages() for the fast path, and might be later changed
447 * in __alloc_pages_slowpath(). All other functions pass the whole structure
448 * by a const pointer.
450 struct alloc_context {
451 struct zonelist *zonelist;
452 nodemask_t *nodemask;
453 struct zoneref *preferred_zoneref;
457 * highest_zoneidx represents highest usable zone index of
458 * the allocation request. Due to the nature of the zone,
459 * memory on lower zone than the highest_zoneidx will be
460 * protected by lowmem_reserve[highest_zoneidx].
462 * highest_zoneidx is also used by reclaim/compaction to limit
463 * the target zone since higher zone than this index cannot be
464 * usable for this allocation request.
466 enum zone_type highest_zoneidx;
467 bool spread_dirty_pages;
471 * This function returns the order of a free page in the buddy system. In
472 * general, page_zone(page)->lock must be held by the caller to prevent the
473 * page from being allocated in parallel and returning garbage as the order.
474 * If a caller does not hold page_zone(page)->lock, it must guarantee that the
475 * page cannot be allocated or merged in parallel. Alternatively, it must
476 * handle invalid values gracefully, and use buddy_order_unsafe() below.
478 static inline unsigned int buddy_order(struct page *page)
480 /* PageBuddy() must be checked by the caller */
481 return page_private(page);
485 * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
486 * PageBuddy() should be checked first by the caller to minimize race window,
487 * and invalid values must be handled gracefully.
489 * READ_ONCE is used so that if the caller assigns the result into a local
490 * variable and e.g. tests it for valid range before using, the compiler cannot
491 * decide to remove the variable and inline the page_private(page) multiple
492 * times, potentially observing different values in the tests and the actual
495 #define buddy_order_unsafe(page) READ_ONCE(page_private(page))
498 * This function checks whether a page is free && is the buddy
499 * we can coalesce a page and its buddy if
500 * (a) the buddy is not in a hole (check before calling!) &&
501 * (b) the buddy is in the buddy system &&
502 * (c) a page and its buddy have the same order &&
503 * (d) a page and its buddy are in the same zone.
505 * For recording whether a page is in the buddy system, we set PageBuddy.
506 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
508 * For recording page's order, we use page_private(page).
510 static inline bool page_is_buddy(struct page *page, struct page *buddy,
513 if (!page_is_guard(buddy) && !PageBuddy(buddy))
516 if (buddy_order(buddy) != order)
520 * zone check is done late to avoid uselessly calculating
521 * zone/node ids for pages that could never merge.
523 if (page_zone_id(page) != page_zone_id(buddy))
526 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
532 * Locate the struct page for both the matching buddy in our
533 * pair (buddy1) and the combined O(n+1) page they form (page).
535 * 1) Any buddy B1 will have an order O twin B2 which satisfies
536 * the following equation:
538 * For example, if the starting buddy (buddy2) is #8 its order
540 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
542 * 2) Any buddy B will have an order O+1 parent P which
543 * satisfies the following equation:
546 * Assumption: *_mem_map is contiguous at least up to MAX_PAGE_ORDER
548 static inline unsigned long
549 __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
551 return page_pfn ^ (1 << order);
555 * Find the buddy of @page and validate it.
556 * @page: The input page
557 * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
558 * function is used in the performance-critical __free_one_page().
559 * @order: The order of the page
560 * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
563 * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
564 * not the same as @page. The validation is necessary before use it.
566 * Return: the found buddy page or NULL if not found.
568 static inline struct page *find_buddy_page_pfn(struct page *page,
569 unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
571 unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
574 buddy = page + (__buddy_pfn - pfn);
576 *buddy_pfn = __buddy_pfn;
578 if (page_is_buddy(page, buddy, order))
583 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
584 unsigned long end_pfn, struct zone *zone);
586 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
587 unsigned long end_pfn, struct zone *zone)
589 if (zone->contiguous)
590 return pfn_to_page(start_pfn);
592 return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
595 void set_zone_contiguous(struct zone *zone);
597 static inline void clear_zone_contiguous(struct zone *zone)
599 zone->contiguous = false;
602 extern int __isolate_free_page(struct page *page, unsigned int order);
603 extern void __putback_isolated_page(struct page *page, unsigned int order,
605 extern void memblock_free_pages(struct page *page, unsigned long pfn,
607 extern void __free_pages_core(struct page *page, unsigned int order,
608 enum meminit_context context);
611 * This will have no effect, other than possibly generating a warning, if the
612 * caller passes in a non-large folio.
614 static inline void folio_set_order(struct folio *folio, unsigned int order)
616 if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
619 folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order;
621 folio->_folio_nr_pages = 1U << order;
625 void __folio_undo_large_rmappable(struct folio *folio);
626 static inline void folio_undo_large_rmappable(struct folio *folio)
628 if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio))
632 * At this point, there is no one trying to add the folio to
633 * deferred_list. If folio is not in deferred_list, it's safe
634 * to check without acquiring the split_queue_lock.
636 if (data_race(list_empty(&folio->_deferred_list)))
639 __folio_undo_large_rmappable(folio);
642 static inline struct folio *page_rmappable_folio(struct page *page)
644 struct folio *folio = (struct folio *)page;
646 if (folio && folio_test_large(folio))
647 folio_set_large_rmappable(folio);
651 static inline void prep_compound_head(struct page *page, unsigned int order)
653 struct folio *folio = (struct folio *)page;
655 folio_set_order(folio, order);
656 atomic_set(&folio->_large_mapcount, -1);
657 atomic_set(&folio->_entire_mapcount, -1);
658 atomic_set(&folio->_nr_pages_mapped, 0);
659 atomic_set(&folio->_pincount, 0);
661 INIT_LIST_HEAD(&folio->_deferred_list);
664 static inline void prep_compound_tail(struct page *head, int tail_idx)
666 struct page *p = head + tail_idx;
668 p->mapping = TAIL_MAPPING;
669 set_compound_head(p, head);
670 set_page_private(p, 0);
673 extern void prep_compound_page(struct page *page, unsigned int order);
675 extern void post_alloc_hook(struct page *page, unsigned int order,
677 extern bool free_pages_prepare(struct page *page, unsigned int order);
679 extern int user_min_free_kbytes;
681 void free_unref_page(struct page *page, unsigned int order);
682 void free_unref_folios(struct folio_batch *fbatch);
684 extern void zone_pcp_reset(struct zone *zone);
685 extern void zone_pcp_disable(struct zone *zone);
686 extern void zone_pcp_enable(struct zone *zone);
687 extern void zone_pcp_init(struct zone *zone);
689 extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
690 phys_addr_t min_addr,
691 int nid, bool exact_nid);
693 void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
694 unsigned long, enum meminit_context, struct vmem_altmap *, int);
696 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
702 * compact_control is used to track pages being migrated and the free pages
703 * they are being migrated to during memory compaction. The free_pfn starts
704 * at the end of a zone and migrate_pfn begins at the start. Movable pages
705 * are moved to the end of a zone during a compaction run and the run
706 * completes when free_pfn <= migrate_pfn
708 struct compact_control {
709 struct list_head freepages[NR_PAGE_ORDERS]; /* List of free pages to migrate to */
710 struct list_head migratepages; /* List of pages being migrated */
711 unsigned int nr_freepages; /* Number of isolated free pages */
712 unsigned int nr_migratepages; /* Number of pages to migrate */
713 unsigned long free_pfn; /* isolate_freepages search base */
715 * Acts as an in/out parameter to page isolation for migration.
716 * isolate_migratepages uses it as a search base.
717 * isolate_migratepages_block will update the value to the next pfn
718 * after the last isolated one.
720 unsigned long migrate_pfn;
721 unsigned long fast_start_pfn; /* a pfn to start linear scan from */
723 unsigned long total_migrate_scanned;
724 unsigned long total_free_scanned;
725 unsigned short fast_search_fail;/* failures to use free list searches */
726 short search_order; /* order to start a fast search at */
727 const gfp_t gfp_mask; /* gfp mask of a direct compactor */
728 int order; /* order a direct compactor needs */
729 int migratetype; /* migratetype of direct compactor */
730 const unsigned int alloc_flags; /* alloc flags of a direct compactor */
731 const int highest_zoneidx; /* zone index of a direct compactor */
732 enum migrate_mode mode; /* Async or sync migration mode */
733 bool ignore_skip_hint; /* Scan blocks even if marked skip */
734 bool no_set_skip_hint; /* Don't mark blocks for skipping */
735 bool ignore_block_suitable; /* Scan blocks considered unsuitable */
736 bool direct_compaction; /* False from kcompactd or /proc/... */
737 bool proactive_compaction; /* kcompactd proactive compaction */
738 bool whole_zone; /* Whole zone should/has been scanned */
739 bool contended; /* Signal lock contention */
740 bool finish_pageblock; /* Scan the remainder of a pageblock. Used
741 * when there are potentially transient
742 * isolation or migration failures to
743 * ensure forward progress.
745 bool alloc_contig; /* alloc_contig_range allocation */
749 * Used in direct compaction when a page should be taken from the freelists
750 * immediately when one is created during the free path.
752 struct capture_control {
753 struct compact_control *cc;
758 isolate_freepages_range(struct compact_control *cc,
759 unsigned long start_pfn, unsigned long end_pfn);
761 isolate_migratepages_range(struct compact_control *cc,
762 unsigned long low_pfn, unsigned long end_pfn);
764 int __alloc_contig_migrate_range(struct compact_control *cc,
765 unsigned long start, unsigned long end,
768 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
769 void init_cma_reserved_pageblock(struct page *page);
771 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
773 int find_suitable_fallback(struct free_area *area, unsigned int order,
774 int migratetype, bool only_stealable, bool *can_steal);
776 static inline bool free_area_empty(struct free_area *area, int migratetype)
778 return list_empty(&area->free_list[migratetype]);
782 * These three helpers classifies VMAs for virtual memory accounting.
786 * Executable code area - executable, not writable, not stack
788 static inline bool is_exec_mapping(vm_flags_t flags)
790 return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
794 * Stack area (including shadow stacks)
796 * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
797 * do_mmap() forbids all other combinations.
799 static inline bool is_stack_mapping(vm_flags_t flags)
801 return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
805 * Data area - private, writable, not stack
807 static inline bool is_data_mapping(vm_flags_t flags)
809 return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
813 struct anon_vma *folio_anon_vma(struct folio *folio);
816 void unmap_mapping_folio(struct folio *folio);
817 extern long populate_vma_page_range(struct vm_area_struct *vma,
818 unsigned long start, unsigned long end, int *locked);
819 extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
820 unsigned long end, bool write, int *locked);
821 extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
822 unsigned long bytes);
825 * NOTE: This function can't tell whether the folio is "fully mapped" in the
827 * "fully mapped" means all the pages of folio is associated with the page
828 * table of range while this function just check whether the folio range is
829 * within the range [start, end). Function caller needs to do page table
830 * check if it cares about the page table association.
832 * Typical usage (like mlock or madvise) is:
833 * Caller knows at least 1 page of folio is associated with page table of VMA
834 * and the range [start, end) is intersect with the VMA range. Caller wants
835 * to know whether the folio is fully associated with the range. It calls
836 * this function to check whether the folio is in the range first. Then checks
837 * the page table to know whether the folio is fully mapped to the range.
840 folio_within_range(struct folio *folio, struct vm_area_struct *vma,
841 unsigned long start, unsigned long end)
844 unsigned long vma_pglen = vma_pages(vma);
846 VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio);
850 if (start < vma->vm_start)
851 start = vma->vm_start;
853 if (end > vma->vm_end)
856 pgoff = folio_pgoff(folio);
858 /* if folio start address is not in vma range */
859 if (!in_range(pgoff, vma->vm_pgoff, vma_pglen))
862 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
864 return !(addr < start || end - addr < folio_size(folio));
868 folio_within_vma(struct folio *folio, struct vm_area_struct *vma)
870 return folio_within_range(folio, vma, vma->vm_start, vma->vm_end);
874 * mlock_vma_folio() and munlock_vma_folio():
875 * should be called with vma's mmap_lock held for read or write,
876 * under page table lock for the pte/pmd being added or removed.
878 * mlock is usually called at the end of folio_add_*_rmap_*(), munlock at
879 * the end of folio_remove_rmap_*(); but new anon folios are managed by
880 * folio_add_lru_vma() calling mlock_new_folio().
882 void mlock_folio(struct folio *folio);
883 static inline void mlock_vma_folio(struct folio *folio,
884 struct vm_area_struct *vma)
887 * The VM_SPECIAL check here serves two purposes.
888 * 1) VM_IO check prevents migration from double-counting during mlock.
889 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
890 * is never left set on a VM_SPECIAL vma, there is an interval while
891 * file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
892 * still be set while VM_SPECIAL bits are added: so ignore it then.
894 if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED))
898 void munlock_folio(struct folio *folio);
899 static inline void munlock_vma_folio(struct folio *folio,
900 struct vm_area_struct *vma)
903 * munlock if the function is called. Ideally, we should only
904 * do munlock if any page of folio is unmapped from VMA and
905 * cause folio not fully mapped to VMA.
907 * But it's not easy to confirm that's the situation. So we
908 * always munlock the folio and page reclaim will correct it
911 if (unlikely(vma->vm_flags & VM_LOCKED))
912 munlock_folio(folio);
915 void mlock_new_folio(struct folio *folio);
916 bool need_mlock_drain(int cpu);
917 void mlock_drain_local(void);
918 void mlock_drain_remote(int cpu);
920 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
923 * vma_address - Find the virtual address a page range is mapped at
924 * @vma: The vma which maps this object.
925 * @pgoff: The page offset within its object.
926 * @nr_pages: The number of pages to consider.
928 * If any page in this range is mapped by this VMA, return the first address
929 * where any of these pages appear. Otherwise, return -EFAULT.
931 static inline unsigned long vma_address(struct vm_area_struct *vma,
932 pgoff_t pgoff, unsigned long nr_pages)
934 unsigned long address;
936 if (pgoff >= vma->vm_pgoff) {
937 address = vma->vm_start +
938 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
939 /* Check for address beyond vma (or wrapped through 0?) */
940 if (address < vma->vm_start || address >= vma->vm_end)
942 } else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
943 /* Test above avoids possibility of wrap to 0 on 32-bit */
944 address = vma->vm_start;
952 * Then at what user virtual address will none of the range be found in vma?
953 * Assumes that vma_address() already returned a good starting address.
955 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
957 struct vm_area_struct *vma = pvmw->vma;
959 unsigned long address;
961 /* Common case, plus ->pgoff is invalid for KSM */
962 if (pvmw->nr_pages == 1)
963 return pvmw->address + PAGE_SIZE;
965 pgoff = pvmw->pgoff + pvmw->nr_pages;
966 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
967 /* Check for address beyond vma (or wrapped through 0?) */
968 if (address < vma->vm_start || address > vma->vm_end)
969 address = vma->vm_end;
973 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
976 int flags = vmf->flags;
982 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
983 * anything, so we only pin the file and drop the mmap_lock if only
984 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
986 if (fault_flag_allow_retry_first(flags) &&
987 !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
988 fpin = get_file(vmf->vma->vm_file);
989 release_fault_lock(vmf);
993 #else /* !CONFIG_MMU */
994 static inline void unmap_mapping_folio(struct folio *folio) { }
995 static inline void mlock_new_folio(struct folio *folio) { }
996 static inline bool need_mlock_drain(int cpu) { return false; }
997 static inline void mlock_drain_local(void) { }
998 static inline void mlock_drain_remote(int cpu) { }
999 static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
1002 #endif /* !CONFIG_MMU */
1004 /* Memory initialisation debug and verification */
1005 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1006 DECLARE_STATIC_KEY_TRUE(deferred_pages);
1008 bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
1009 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1017 #ifdef CONFIG_DEBUG_MEMORY_INIT
1019 extern int mminit_loglevel;
1021 #define mminit_dprintk(level, prefix, fmt, arg...) \
1023 if (level < mminit_loglevel) { \
1024 if (level <= MMINIT_WARNING) \
1025 pr_warn("mminit::" prefix " " fmt, ##arg); \
1027 printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
1031 extern void mminit_verify_pageflags_layout(void);
1032 extern void mminit_verify_zonelist(void);
1035 static inline void mminit_dprintk(enum mminit_level level,
1036 const char *prefix, const char *fmt, ...)
1040 static inline void mminit_verify_pageflags_layout(void)
1044 static inline void mminit_verify_zonelist(void)
1047 #endif /* CONFIG_DEBUG_MEMORY_INIT */
1049 #define NODE_RECLAIM_NOSCAN -2
1050 #define NODE_RECLAIM_FULL -1
1051 #define NODE_RECLAIM_SOME 0
1052 #define NODE_RECLAIM_SUCCESS 1
1055 extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
1056 extern int find_next_best_node(int node, nodemask_t *used_node_mask);
1058 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
1061 return NODE_RECLAIM_NOSCAN;
1063 static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
1065 return NUMA_NO_NODE;
1070 * mm/memory-failure.c
1072 void shake_folio(struct folio *folio);
1073 extern int hwpoison_filter(struct page *p);
1075 extern u32 hwpoison_filter_dev_major;
1076 extern u32 hwpoison_filter_dev_minor;
1077 extern u64 hwpoison_filter_flags_mask;
1078 extern u64 hwpoison_filter_flags_value;
1079 extern u64 hwpoison_filter_memcg;
1080 extern u32 hwpoison_filter_enable;
1081 #define MAGIC_HWPOISON 0x48575053U /* HWPS */
1082 void SetPageHWPoisonTakenOff(struct page *page);
1083 void ClearPageHWPoisonTakenOff(struct page *page);
1084 bool take_page_off_buddy(struct page *page);
1085 bool put_page_back_buddy(struct page *page);
1086 struct task_struct *task_early_kill(struct task_struct *tsk, int force_early);
1087 void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
1088 struct vm_area_struct *vma, struct list_head *to_kill,
1089 unsigned long ksm_addr);
1090 unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
1092 extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
1093 unsigned long, unsigned long,
1094 unsigned long, unsigned long);
1096 extern void set_pageblock_order(void);
1097 struct folio *alloc_migrate_folio(struct folio *src, unsigned long private);
1098 unsigned long reclaim_pages(struct list_head *folio_list);
1099 unsigned int reclaim_clean_pages_from_list(struct zone *zone,
1100 struct list_head *folio_list);
1101 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
1102 #define ALLOC_WMARK_MIN WMARK_MIN
1103 #define ALLOC_WMARK_LOW WMARK_LOW
1104 #define ALLOC_WMARK_HIGH WMARK_HIGH
1105 #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
1107 /* Mask to get the watermark bits */
1108 #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
1111 * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
1112 * cannot assume a reduced access to memory reserves is sufficient for
1116 #define ALLOC_OOM 0x08
1118 #define ALLOC_OOM ALLOC_NO_WATERMARKS
1121 #define ALLOC_NON_BLOCK 0x10 /* Caller cannot block. Allow access
1122 * to 25% of the min watermark or
1123 * 62.5% if __GFP_HIGH is set.
1125 #define ALLOC_MIN_RESERVE 0x20 /* __GFP_HIGH set. Allow access to 50%
1126 * of the min watermark.
1128 #define ALLOC_CPUSET 0x40 /* check for correct cpuset */
1129 #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
1130 #ifdef CONFIG_ZONE_DMA32
1131 #define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */
1133 #define ALLOC_NOFRAGMENT 0x0
1135 #define ALLOC_HIGHATOMIC 0x200 /* Allows access to MIGRATE_HIGHATOMIC */
1136 #define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
1138 /* Flags that allow allocations below the min watermark. */
1139 #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
1142 struct tlbflush_unmap_batch;
1146 * only for MM internal work items which do not depend on
1147 * any allocations or locks which might depend on allocations
1149 extern struct workqueue_struct *mm_percpu_wq;
1151 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1152 void try_to_unmap_flush(void);
1153 void try_to_unmap_flush_dirty(void);
1154 void flush_tlb_batched_pending(struct mm_struct *mm);
1156 static inline void try_to_unmap_flush(void)
1159 static inline void try_to_unmap_flush_dirty(void)
1162 static inline void flush_tlb_batched_pending(struct mm_struct *mm)
1165 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
1167 extern const struct trace_print_flags pageflag_names[];
1168 extern const struct trace_print_flags pagetype_names[];
1169 extern const struct trace_print_flags vmaflag_names[];
1170 extern const struct trace_print_flags gfpflag_names[];
1172 static inline bool is_migrate_highatomic(enum migratetype migratetype)
1174 return migratetype == MIGRATE_HIGHATOMIC;
1177 void setup_zone_pageset(struct zone *zone);
1179 struct migration_target_control {
1180 int nid; /* preferred node id */
1183 enum migrate_reason reason;
1189 size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
1190 struct folio *folio, loff_t fpos, size_t size);
1196 void __init vmalloc_init(void);
1197 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1198 pgprot_t prot, struct page **pages, unsigned int page_shift);
1200 static inline void vmalloc_init(void)
1205 int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1206 pgprot_t prot, struct page **pages, unsigned int page_shift)
1212 int __must_check __vmap_pages_range_noflush(unsigned long addr,
1213 unsigned long end, pgprot_t prot,
1214 struct page **pages, unsigned int page_shift);
1216 void vunmap_range_noflush(unsigned long start, unsigned long end);
1218 void __vunmap_range_noflush(unsigned long start, unsigned long end);
1220 int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
1221 unsigned long addr, int page_nid, int *flags);
1223 void free_zone_device_folio(struct folio *folio);
1224 int migrate_device_coherent_page(struct page *page);
1229 int __must_check try_grab_folio(struct folio *folio, int refs,
1230 unsigned int flags);
1235 void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1236 pud_t *pud, bool write);
1237 void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1238 pmd_t *pmd, bool write);
1243 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
1244 struct vm_area_struct *vma,
1245 unsigned long delta);
1248 /* mark page accessed */
1249 FOLL_TOUCH = 1 << 16,
1250 /* a retry, previous pass started an IO */
1251 FOLL_TRIED = 1 << 17,
1252 /* we are working on non-current tsk/mm */
1253 FOLL_REMOTE = 1 << 18,
1254 /* pages must be released via unpin_user_page */
1256 /* gup_fast: prevent fall-back to slow gup */
1257 FOLL_FAST_ONLY = 1 << 20,
1258 /* allow unlocking the mmap lock */
1259 FOLL_UNLOCKABLE = 1 << 21,
1260 /* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */
1261 FOLL_MADV_POPULATE = 1 << 22,
1264 #define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
1265 FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
1269 * Indicates for which pages that are write-protected in the page table,
1270 * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
1271 * GUP pin will remain consistent with the pages mapped into the page tables
1274 * Temporary unmapping of PageAnonExclusive() pages or clearing of
1275 * PageAnonExclusive() has to protect against concurrent GUP:
1276 * * Ordinary GUP: Using the PT lock
1277 * * GUP-fast and fork(): mm->write_protect_seq
1278 * * GUP-fast and KSM or temporary unmapping (swap, migration): see
1279 * folio_try_share_anon_rmap_*()
1281 * Must be called with the (sub)page that's actually referenced via the
1282 * page table entry, which might not necessarily be the head page for a
1285 * If the vma is NULL, we're coming from the GUP-fast path and might have
1286 * to fallback to the slow path just to lookup the vma.
1288 static inline bool gup_must_unshare(struct vm_area_struct *vma,
1289 unsigned int flags, struct page *page)
1292 * FOLL_WRITE is implicitly handled correctly as the page table entry
1293 * has to be writable -- and if it references (part of) an anonymous
1294 * folio, that part is required to be marked exclusive.
1296 if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
1299 * Note: PageAnon(page) is stable until the page is actually getting
1302 if (!PageAnon(page)) {
1304 * We only care about R/O long-term pining: R/O short-term
1305 * pinning does not have the semantics to observe successive
1306 * changes through the process page tables.
1308 if (!(flags & FOLL_LONGTERM))
1311 /* We really need the vma ... */
1316 * ... because we only care about writable private ("COW")
1317 * mappings where we have to break COW early.
1319 return is_cow_mapping(vma->vm_flags);
1322 /* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */
1323 if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
1327 * Note that PageKsm() pages cannot be exclusive, and consequently,
1328 * cannot get pinned.
1330 return !PageAnonExclusive(page);
1333 extern bool mirrored_kernelcore;
1334 extern bool memblock_has_mirror(void);
1336 static __always_inline void vma_set_range(struct vm_area_struct *vma,
1337 unsigned long start, unsigned long end,
1340 vma->vm_start = start;
1342 vma->vm_pgoff = pgoff;
1345 static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
1348 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
1349 * enablements, because when without soft-dirty being compiled in,
1350 * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
1351 * will be constantly true.
1353 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
1357 * Soft-dirty is kind of special: its tracking is enabled when the
1358 * vma flags not set.
1360 return !(vma->vm_flags & VM_SOFTDIRTY);
1363 static inline bool pmd_needs_soft_dirty_wp(struct vm_area_struct *vma, pmd_t pmd)
1365 return vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd);
1368 static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct *vma, pte_t pte)
1370 return vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte);
1373 static inline void vma_iter_config(struct vma_iterator *vmi,
1374 unsigned long index, unsigned long last)
1376 __mas_set_range(&vmi->mas, index, last - 1);
1379 static inline void vma_iter_reset(struct vma_iterator *vmi)
1381 mas_reset(&vmi->mas);
1385 struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
1387 return mas_prev_range(&vmi->mas, min);
1391 struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
1393 return mas_next_range(&vmi->mas, max);
1396 static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
1397 unsigned long max, unsigned long size)
1399 return mas_empty_area(&vmi->mas, min, max - 1, size);
1402 static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
1403 unsigned long max, unsigned long size)
1405 return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
1409 * VMA Iterator functions shared between nommu and mmap
1411 static inline int vma_iter_prealloc(struct vma_iterator *vmi,
1412 struct vm_area_struct *vma)
1414 return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
1417 static inline void vma_iter_clear(struct vma_iterator *vmi)
1419 mas_store_prealloc(&vmi->mas, NULL);
1422 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
1424 return mas_walk(&vmi->mas);
1427 /* Store a VMA with preallocated memory */
1428 static inline void vma_iter_store(struct vma_iterator *vmi,
1429 struct vm_area_struct *vma)
1432 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
1433 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
1434 vmi->mas.index > vma->vm_start)) {
1435 pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
1436 vmi->mas.index, vma->vm_start, vma->vm_start,
1437 vma->vm_end, vmi->mas.index, vmi->mas.last);
1439 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
1440 vmi->mas.last < vma->vm_start)) {
1441 pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
1442 vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
1443 vmi->mas.index, vmi->mas.last);
1447 if (vmi->mas.status != ma_start &&
1448 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1449 vma_iter_invalidate(vmi);
1451 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
1452 mas_store_prealloc(&vmi->mas, vma);
1455 static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
1456 struct vm_area_struct *vma, gfp_t gfp)
1458 if (vmi->mas.status != ma_start &&
1459 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1460 vma_iter_invalidate(vmi);
1462 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
1463 mas_store_gfp(&vmi->mas, vma, gfp);
1464 if (unlikely(mas_is_err(&vmi->mas)))
1471 * VMA lock generalization
1473 struct vma_prepare {
1474 struct vm_area_struct *vma;
1475 struct vm_area_struct *adj_next;
1477 struct address_space *mapping;
1478 struct anon_vma *anon_vma;
1479 struct vm_area_struct *insert;
1480 struct vm_area_struct *remove;
1481 struct vm_area_struct *remove2;
1484 void __meminit __init_single_page(struct page *page, unsigned long pfn,
1485 unsigned long zone, int nid);
1487 /* shrinker related functions */
1488 unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
1492 static inline int can_do_mseal(unsigned long flags)
1500 bool can_modify_mm(struct mm_struct *mm, unsigned long start,
1502 bool can_modify_mm_madv(struct mm_struct *mm, unsigned long start,
1503 unsigned long end, int behavior);
1505 static inline int can_do_mseal(unsigned long flags)
1510 static inline bool can_modify_mm(struct mm_struct *mm, unsigned long start,
1516 static inline bool can_modify_mm_madv(struct mm_struct *mm, unsigned long start,
1517 unsigned long end, int behavior)
1523 #ifdef CONFIG_SHRINKER_DEBUG
1524 static inline __printf(2, 0) int shrinker_debugfs_name_alloc(
1525 struct shrinker *shrinker, const char *fmt, va_list ap)
1527 shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
1529 return shrinker->name ? 0 : -ENOMEM;
1532 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1534 kfree_const(shrinker->name);
1535 shrinker->name = NULL;
1538 extern int shrinker_debugfs_add(struct shrinker *shrinker);
1539 extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1541 extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1543 #else /* CONFIG_SHRINKER_DEBUG */
1544 static inline int shrinker_debugfs_add(struct shrinker *shrinker)
1548 static inline int shrinker_debugfs_name_alloc(struct shrinker *shrinker,
1549 const char *fmt, va_list ap)
1553 static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1556 static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1562 static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1566 #endif /* CONFIG_SHRINKER_DEBUG */
1568 /* Only track the nodes of mappings with shadow entries */
1569 void workingset_update_node(struct xa_node *node);
1570 extern struct list_lru shadow_nodes;
1572 struct unlink_vma_file_batch {
1574 struct vm_area_struct *vmas[8];
1577 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *);
1578 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *, struct vm_area_struct *);
1579 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *);
1581 #endif /* __MM_INTERNAL_H */