]> Git Repo - linux.git/blame - mm/internal.h
Merge tag 'spi-v6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi
[linux.git] / mm / internal.h
CommitLineData
2874c5fd 1/* SPDX-License-Identifier: GPL-2.0-or-later */
1da177e4
LT
2/* internal.h: mm/ internal definitions
3 *
4 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells ([email protected])
1da177e4 6 */
0f8053a5
NP
7#ifndef __MM_INTERNAL_H
8#define __MM_INTERNAL_H
9
29f175d1 10#include <linux/fs.h>
0f8053a5 11#include <linux/mm.h>
e9b61f19 12#include <linux/pagemap.h>
2aff7a47 13#include <linux/rmap.h>
a62fb92a
RR
14#include <linux/swap.h>
15#include <linux/swapops.h>
edf14cdb 16#include <linux/tracepoint-defs.h>
1da177e4 17
0e499ed3
MWO
18struct folio_batch;
19
dd56b046
MG
20/*
21 * The set of flags that only affect watermark checking and reclaim
22 * behaviour. This is used by the MM to obey the caller constraints
23 * about IO, FS and watermark checking while ignoring placement
24 * hints such as HIGHMEM usage.
25 */
26#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
dcda9b04 27 __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
e838a45f 28 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
2973d822 29 __GFP_NOLOCKDEP)
dd56b046
MG
30
31/* The GFP flags allowed during early boot */
32#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
33
34/* Control allocation cpuset and node placement constraints */
35#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
36
37/* Do not use these with a slab allocator */
38#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
39
3f913fc5
QZ
40/*
41 * Different from WARN_ON_ONCE(), no warning will be issued
42 * when we specify __GFP_NOWARN.
43 */
44#define WARN_ON_ONCE_GFP(cond, gfp) ({ \
45 static bool __section(".data.once") __warned; \
46 int __ret_warn_once = !!(cond); \
47 \
48 if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
49 __warned = true; \
50 WARN_ON(1); \
51 } \
52 unlikely(__ret_warn_once); \
53})
54
62906027
NP
55void page_writeback_init(void);
56
eec20426
MWO
57/*
58 * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
e78a13fd 59 * its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit
eec20426
MWO
60 * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE). Hugetlb currently
61 * leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
62 */
e78a13fd
DH
63#define ENTIRELY_MAPPED 0x800000
64#define FOLIO_PAGES_MAPPED (ENTIRELY_MAPPED - 1)
eec20426 65
1279aa06
KW
66/*
67 * Flags passed to __show_mem() and show_free_areas() to suppress output in
68 * various contexts.
69 */
70#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
71
eec20426
MWO
72/*
73 * How many individual pages have an elevated _mapcount. Excludes
74 * the folio's entire_mapcount.
05c5323b
DH
75 *
76 * Don't use this function outside of debugging code.
eec20426 77 */
b84fd283 78static inline int folio_nr_pages_mapped(const struct folio *folio)
eec20426
MWO
79{
80 return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
81}
82
f238b8c3
BS
83/*
84 * Retrieve the first entry of a folio based on a provided entry within the
85 * folio. We cannot rely on folio->swap as there is no guarantee that it has
86 * been initialized. Used for calling arch_swap_restore()
87 */
b84fd283
MWO
88static inline swp_entry_t folio_swap(swp_entry_t entry,
89 const struct folio *folio)
f238b8c3
BS
90{
91 swp_entry_t swap = {
92 .val = ALIGN_DOWN(entry.val, folio_nr_pages(folio)),
93 };
94
95 return swap;
96}
97
b84fd283 98static inline void *folio_raw_mapping(const struct folio *folio)
64601000
MWO
99{
100 unsigned long mapping = (unsigned long)folio->mapping;
101
102 return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
103}
104
ac96cc4d
BS
105#ifdef CONFIG_MMU
106
107/* Flags for folio_pte_batch(). */
108typedef int __bitwise fpb_t;
109
110/* Compare PTEs after pte_mkclean(), ignoring the dirty bit. */
111#define FPB_IGNORE_DIRTY ((__force fpb_t)BIT(0))
112
113/* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */
114#define FPB_IGNORE_SOFT_DIRTY ((__force fpb_t)BIT(1))
115
116static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
117{
118 if (flags & FPB_IGNORE_DIRTY)
119 pte = pte_mkclean(pte);
120 if (likely(flags & FPB_IGNORE_SOFT_DIRTY))
121 pte = pte_clear_soft_dirty(pte);
122 return pte_wrprotect(pte_mkold(pte));
123}
124
125/**
126 * folio_pte_batch - detect a PTE batch for a large folio
127 * @folio: The large folio to detect a PTE batch for.
128 * @addr: The user virtual address the first page is mapped at.
129 * @start_ptep: Page table pointer for the first entry.
130 * @pte: Page table entry for the first page.
131 * @max_nr: The maximum number of table entries to consider.
132 * @flags: Flags to modify the PTE batch semantics.
133 * @any_writable: Optional pointer to indicate whether any entry except the
134 * first one is writable.
3931b871
RR
135 * @any_young: Optional pointer to indicate whether any entry except the
136 * first one is young.
96ebdb03
LY
137 * @any_dirty: Optional pointer to indicate whether any entry except the
138 * first one is dirty.
ac96cc4d
BS
139 *
140 * Detect a PTE batch: consecutive (present) PTEs that map consecutive
141 * pages of the same large folio.
142 *
143 * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
144 * the accessed bit, writable bit, dirty bit (with FPB_IGNORE_DIRTY) and
145 * soft-dirty bit (with FPB_IGNORE_SOFT_DIRTY).
146 *
147 * start_ptep must map any page of the folio. max_nr must be at least one and
148 * must be limited by the caller so scanning cannot exceed a single page table.
149 *
150 * Return: the number of table entries in the batch.
151 */
152static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
153 pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags,
96ebdb03 154 bool *any_writable, bool *any_young, bool *any_dirty)
ac96cc4d
BS
155{
156 unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
157 const pte_t *end_ptep = start_ptep + max_nr;
158 pte_t expected_pte, *ptep;
96ebdb03 159 bool writable, young, dirty;
ac96cc4d
BS
160 int nr;
161
162 if (any_writable)
163 *any_writable = false;
3931b871
RR
164 if (any_young)
165 *any_young = false;
96ebdb03
LY
166 if (any_dirty)
167 *any_dirty = false;
ac96cc4d
BS
168
169 VM_WARN_ON_FOLIO(!pte_present(pte), folio);
170 VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
171 VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio);
172
173 nr = pte_batch_hint(start_ptep, pte);
174 expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);
175 ptep = start_ptep + nr;
176
177 while (ptep < end_ptep) {
178 pte = ptep_get(ptep);
179 if (any_writable)
180 writable = !!pte_write(pte);
3931b871
RR
181 if (any_young)
182 young = !!pte_young(pte);
96ebdb03
LY
183 if (any_dirty)
184 dirty = !!pte_dirty(pte);
ac96cc4d
BS
185 pte = __pte_batch_clear_ignored(pte, flags);
186
187 if (!pte_same(pte, expected_pte))
188 break;
189
190 /*
191 * Stop immediately once we reached the end of the folio. In
192 * corner cases the next PFN might fall into a different
193 * folio.
194 */
195 if (pte_pfn(pte) >= folio_end_pfn)
196 break;
197
198 if (any_writable)
199 *any_writable |= writable;
3931b871
RR
200 if (any_young)
201 *any_young |= young;
96ebdb03
LY
202 if (any_dirty)
203 *any_dirty |= dirty;
ac96cc4d
BS
204
205 nr = pte_batch_hint(ptep, pte);
206 expected_pte = pte_advance_pfn(expected_pte, nr);
207 ptep += nr;
208 }
209
210 return min(ptep - start_ptep, max_nr);
211}
a62fb92a
RR
212
213/**
3f9abcaa
BS
214 * pte_move_swp_offset - Move the swap entry offset field of a swap pte
215 * forward or backward by delta
a62fb92a
RR
216 * @pte: The initial pte state; is_swap_pte(pte) must be true and
217 * non_swap_entry() must be false.
3f9abcaa
BS
218 * @delta: The direction and the offset we are moving; forward if delta
219 * is positive; backward if delta is negative
a62fb92a 220 *
3f9abcaa 221 * Moves the swap offset, while maintaining all other fields, including
a62fb92a
RR
222 * swap type, and any swp pte bits. The resulting pte is returned.
223 */
3f9abcaa 224static inline pte_t pte_move_swp_offset(pte_t pte, long delta)
a62fb92a
RR
225{
226 swp_entry_t entry = pte_to_swp_entry(pte);
227 pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry),
3f9abcaa 228 (swp_offset(entry) + delta)));
a62fb92a
RR
229
230 if (pte_swp_soft_dirty(pte))
231 new = pte_swp_mksoft_dirty(new);
232 if (pte_swp_exclusive(pte))
233 new = pte_swp_mkexclusive(new);
234 if (pte_swp_uffd_wp(pte))
235 new = pte_swp_mkuffd_wp(new);
236
237 return new;
238}
239
3f9abcaa
BS
240
241/**
242 * pte_next_swp_offset - Increment the swap entry offset field of a swap pte.
243 * @pte: The initial pte state; is_swap_pte(pte) must be true and
244 * non_swap_entry() must be false.
245 *
246 * Increments the swap offset, while maintaining all other fields, including
247 * swap type, and any swp pte bits. The resulting pte is returned.
248 */
249static inline pte_t pte_next_swp_offset(pte_t pte)
250{
251 return pte_move_swp_offset(pte, 1);
252}
253
a62fb92a
RR
254/**
255 * swap_pte_batch - detect a PTE batch for a set of contiguous swap entries
256 * @start_ptep: Page table pointer for the first entry.
257 * @max_nr: The maximum number of table entries to consider.
258 * @pte: Page table entry for the first entry.
259 *
260 * Detect a batch of contiguous swap entries: consecutive (non-present) PTEs
261 * containing swap entries all with consecutive offsets and targeting the same
262 * swap type, all with matching swp pte bits.
263 *
264 * max_nr must be at least one and must be limited by the caller so scanning
265 * cannot exceed a single page table.
266 *
267 * Return: the number of table entries in the batch.
268 */
269static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte)
270{
271 pte_t expected_pte = pte_next_swp_offset(pte);
272 const pte_t *end_ptep = start_ptep + max_nr;
273 pte_t *ptep = start_ptep + 1;
274
275 VM_WARN_ON(max_nr < 1);
276 VM_WARN_ON(!is_swap_pte(pte));
277 VM_WARN_ON(non_swap_entry(pte_to_swp_entry(pte)));
278
279 while (ptep < end_ptep) {
280 pte = ptep_get(ptep);
281
282 if (!pte_same(pte, expected_pte))
283 break;
284
285 expected_pte = pte_next_swp_offset(expected_pte);
286 ptep++;
287 }
288
289 return ptep - start_ptep;
290}
ac96cc4d
BS
291#endif /* CONFIG_MMU */
292
512b7931 293void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
8cd7c588 294 int nr_throttled);
512b7931 295static inline void acct_reclaim_writeback(struct folio *folio)
8cd7c588 296{
512b7931 297 pg_data_t *pgdat = folio_pgdat(folio);
8cd7c588
MG
298 int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
299
300 if (nr_throttled)
512b7931 301 __acct_reclaim_writeback(pgdat, folio, nr_throttled);
8cd7c588
MG
302}
303
d818fca1
MG
304static inline void wake_throttle_isolated(pg_data_t *pgdat)
305{
306 wait_queue_head_t *wqh;
307
308 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
309 if (waitqueue_active(wqh))
310 wake_up(wqh);
311}
312
997f0ecb 313vm_fault_t vmf_anon_prepare(struct vm_fault *vmf);
2b740303 314vm_fault_t do_swap_page(struct vm_fault *vmf);
575ced1c 315void folio_rotate_reclaimable(struct folio *folio);
2580d554 316bool __folio_end_writeback(struct folio *folio);
261b6840 317void deactivate_file_folio(struct folio *folio);
018ee47f 318void folio_activate(struct folio *folio);
8a966ed7 319
fd892593 320void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
763ecb03 321 struct vm_area_struct *start_vma, unsigned long floor,
98e51a22 322 unsigned long ceiling, bool mm_wr_locked);
03c4f204 323void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
42b77728 324
3506659e 325struct zap_details;
aac45363
MH
326void unmap_page_range(struct mmu_gather *tlb,
327 struct vm_area_struct *vma,
328 unsigned long addr, unsigned long end,
329 struct zap_details *details);
330
56a4d67c
MWO
331void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
332 unsigned int order);
fcd9ae4f 333void force_page_cache_ra(struct readahead_control *, unsigned long nr);
7b3df3b9
DH
334static inline void force_page_cache_readahead(struct address_space *mapping,
335 struct file *file, pgoff_t index, unsigned long nr_to_read)
336{
fcd9ae4f
MWO
337 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
338 force_page_cache_ra(&ractl, nr_to_read);
7b3df3b9 339}
29f175d1 340
3392ca12 341unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
51dcbdac 342 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
9fb6beea 343unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
0e499ed3 344 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
78f42660 345void filemap_free_folio(struct address_space *mapping, struct folio *folio);
1e84a3d9 346int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
b9a8a419
MWO
347bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
348 loff_t end);
1e12cbb9 349long mapping_evict_folio(struct address_space *mapping, struct folio *folio);
1a0fc811
MWO
350unsigned long mapping_try_invalidate(struct address_space *mapping,
351 pgoff_t start, pgoff_t end, unsigned long *nr_failed);
5c211ba2 352
1eb6234e 353/**
3eed3ef5
MWO
354 * folio_evictable - Test whether a folio is evictable.
355 * @folio: The folio to test.
1eb6234e 356 *
3eed3ef5
MWO
357 * Test whether @folio is evictable -- i.e., should be placed on
358 * active/inactive lists vs unevictable list.
1eb6234e 359 *
3eed3ef5
MWO
360 * Reasons folio might not be evictable:
361 * 1. folio's mapping marked unevictable
362 * 2. One of the pages in the folio is part of an mlocked VMA
1eb6234e 363 */
3eed3ef5
MWO
364static inline bool folio_evictable(struct folio *folio)
365{
366 bool ret;
367
368 /* Prevent address_space of inode and swap cache from being freed */
369 rcu_read_lock();
370 ret = !mapping_unevictable(folio_mapping(folio)) &&
371 !folio_test_mlocked(folio);
372 rcu_read_unlock();
373 return ret;
374}
375
7835e98b 376/*
0139aa7b 377 * Turn a non-refcounted page (->_refcount == 0) into refcounted with
7835e98b
NP
378 * a count of one.
379 */
380static inline void set_page_refcounted(struct page *page)
381{
309381fe 382 VM_BUG_ON_PAGE(PageTail(page), page);
fe896d18 383 VM_BUG_ON_PAGE(page_ref_count(page), page);
77a8a788 384 set_page_count(page, 1);
77a8a788
NP
385}
386
0201ebf2
DH
387/*
388 * Return true if a folio needs ->release_folio() calling upon it.
389 */
390static inline bool folio_needs_release(struct folio *folio)
391{
b4fa966f
DH
392 struct address_space *mapping = folio_mapping(folio);
393
394 return folio_has_private(folio) ||
395 (mapping && mapping_release_always(mapping));
0201ebf2
DH
396}
397
03f6462a
HD
398extern unsigned long highest_memmap_pfn;
399
c73322d0
JW
400/*
401 * Maximum number of reclaim retries without progress before the OOM
402 * killer is consider the only way forward.
403 */
404#define MAX_RECLAIM_RETRIES 16
405
894bc310
LS
406/*
407 * in mm/vmscan.c:
408 */
f7f9c00d 409bool isolate_lru_page(struct page *page);
be2d5756 410bool folio_isolate_lru(struct folio *folio);
ca6d60f3
MWO
411void putback_lru_page(struct page *page);
412void folio_putback_lru(struct folio *folio);
c3f4a9a2 413extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
62695a84 414
6219049a
BL
415/*
416 * in mm/rmap.c:
417 */
50722804 418pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
6219049a 419
894bc310
LS
420/*
421 * in mm/page_alloc.c
422 */
eb8589b4 423#define K(x) ((x) << (PAGE_SHIFT-10))
3c605096 424
9420f89d
MRI
425extern char * const zone_names[MAX_NR_ZONES];
426
f2fc4b44
MRI
427/* perform sanity checks on struct pages being allocated or freed */
428DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
429
e95d372c
KW
430extern int min_free_kbytes;
431
432void setup_per_zone_wmarks(void);
433void calculate_min_free_kbytes(void);
434int __meminit init_per_zone_wmark_min(void);
435void page_alloc_sysctl_init(void);
f2fc4b44 436
1a6d53a1
VB
437/*
438 * Structure for holding the mostly immutable allocation parameters passed
439 * between functions involved in allocations, including the alloc_pages*
440 * family of functions.
441 *
97a225e6 442 * nodemask, migratetype and highest_zoneidx are initialized only once in
84172f4b 443 * __alloc_pages() and then never change.
1a6d53a1 444 *
97a225e6 445 * zonelist, preferred_zone and highest_zoneidx are set first in
84172f4b 446 * __alloc_pages() for the fast path, and might be later changed
68956ccb 447 * in __alloc_pages_slowpath(). All other functions pass the whole structure
1a6d53a1
VB
448 * by a const pointer.
449 */
450struct alloc_context {
451 struct zonelist *zonelist;
452 nodemask_t *nodemask;
c33d6c06 453 struct zoneref *preferred_zoneref;
1a6d53a1 454 int migratetype;
97a225e6
JK
455
456 /*
457 * highest_zoneidx represents highest usable zone index of
458 * the allocation request. Due to the nature of the zone,
459 * memory on lower zone than the highest_zoneidx will be
460 * protected by lowmem_reserve[highest_zoneidx].
461 *
462 * highest_zoneidx is also used by reclaim/compaction to limit
463 * the target zone since higher zone than this index cannot be
464 * usable for this allocation request.
465 */
466 enum zone_type highest_zoneidx;
c9ab0c4f 467 bool spread_dirty_pages;
1a6d53a1
VB
468};
469
8170ac47
ZY
470/*
471 * This function returns the order of a free page in the buddy system. In
472 * general, page_zone(page)->lock must be held by the caller to prevent the
473 * page from being allocated in parallel and returning garbage as the order.
474 * If a caller does not hold page_zone(page)->lock, it must guarantee that the
475 * page cannot be allocated or merged in parallel. Alternatively, it must
476 * handle invalid values gracefully, and use buddy_order_unsafe() below.
477 */
478static inline unsigned int buddy_order(struct page *page)
479{
480 /* PageBuddy() must be checked by the caller */
481 return page_private(page);
482}
483
484/*
485 * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
486 * PageBuddy() should be checked first by the caller to minimize race window,
487 * and invalid values must be handled gracefully.
488 *
489 * READ_ONCE is used so that if the caller assigns the result into a local
490 * variable and e.g. tests it for valid range before using, the compiler cannot
491 * decide to remove the variable and inline the page_private(page) multiple
492 * times, potentially observing different values in the tests and the actual
493 * use of the result.
494 */
495#define buddy_order_unsafe(page) READ_ONCE(page_private(page))
496
497/*
498 * This function checks whether a page is free && is the buddy
499 * we can coalesce a page and its buddy if
500 * (a) the buddy is not in a hole (check before calling!) &&
501 * (b) the buddy is in the buddy system &&
502 * (c) a page and its buddy have the same order &&
503 * (d) a page and its buddy are in the same zone.
504 *
505 * For recording whether a page is in the buddy system, we set PageBuddy.
506 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
507 *
508 * For recording page's order, we use page_private(page).
509 */
510static inline bool page_is_buddy(struct page *page, struct page *buddy,
511 unsigned int order)
512{
513 if (!page_is_guard(buddy) && !PageBuddy(buddy))
514 return false;
515
516 if (buddy_order(buddy) != order)
517 return false;
518
519 /*
520 * zone check is done late to avoid uselessly calculating
521 * zone/node ids for pages that could never merge.
522 */
523 if (page_zone_id(page) != page_zone_id(buddy))
524 return false;
525
526 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
527
528 return true;
529}
530
3c605096
JK
531/*
532 * Locate the struct page for both the matching buddy in our
533 * pair (buddy1) and the combined O(n+1) page they form (page).
534 *
535 * 1) Any buddy B1 will have an order O twin B2 which satisfies
536 * the following equation:
537 * B2 = B1 ^ (1 << O)
538 * For example, if the starting buddy (buddy2) is #8 its order
539 * 1 buddy is #10:
540 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
541 *
542 * 2) Any buddy B will have an order O+1 parent P which
543 * satisfies the following equation:
544 * P = B & ~(1 << O)
545 *
5e0a760b 546 * Assumption: *_mem_map is contiguous at least up to MAX_PAGE_ORDER
3c605096
JK
547 */
548static inline unsigned long
76741e77 549__find_buddy_pfn(unsigned long page_pfn, unsigned int order)
3c605096 550{
76741e77 551 return page_pfn ^ (1 << order);
3c605096
JK
552}
553
8170ac47
ZY
554/*
555 * Find the buddy of @page and validate it.
556 * @page: The input page
557 * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
558 * function is used in the performance-critical __free_one_page().
559 * @order: The order of the page
560 * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
561 * page_to_pfn().
562 *
563 * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
564 * not the same as @page. The validation is necessary before use it.
565 *
566 * Return: the found buddy page or NULL if not found.
567 */
568static inline struct page *find_buddy_page_pfn(struct page *page,
569 unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
570{
571 unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
572 struct page *buddy;
573
574 buddy = page + (__buddy_pfn - pfn);
575 if (buddy_pfn)
576 *buddy_pfn = __buddy_pfn;
577
578 if (page_is_buddy(page, buddy, order))
579 return buddy;
580 return NULL;
581}
582
7cf91a98
JK
583extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
584 unsigned long end_pfn, struct zone *zone);
585
586static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
587 unsigned long end_pfn, struct zone *zone)
588{
589 if (zone->contiguous)
590 return pfn_to_page(start_pfn);
591
592 return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
593}
594
904d5857
KW
595void set_zone_contiguous(struct zone *zone);
596
597static inline void clear_zone_contiguous(struct zone *zone)
598{
599 zone->contiguous = false;
600}
601
3c605096 602extern int __isolate_free_page(struct page *page, unsigned int order);
624f58d8
AD
603extern void __putback_isolated_page(struct page *page, unsigned int order,
604 int mt);
7c2ee349 605extern void memblock_free_pages(struct page *page, unsigned long pfn,
d70ddd7a 606 unsigned int order);
13c52654
DH
607extern void __free_pages_core(struct page *page, unsigned int order,
608 enum meminit_context context);
9420f89d 609
1e3be485
TS
610/*
611 * This will have no effect, other than possibly generating a warning, if the
612 * caller passes in a non-large folio.
613 */
614static inline void folio_set_order(struct folio *folio, unsigned int order)
615{
616 if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
617 return;
618
ebc1baf5 619 folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order;
1e3be485
TS
620#ifdef CONFIG_64BIT
621 folio->_folio_nr_pages = 1U << order;
622#endif
623}
624
593a10da
KW
625void __folio_undo_large_rmappable(struct folio *folio);
626static inline void folio_undo_large_rmappable(struct folio *folio)
627{
628 if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio))
629 return;
630
631 /*
632 * At this point, there is no one trying to add the folio to
633 * deferred_list. If folio is not in deferred_list, it's safe
634 * to check without acquiring the split_queue_lock.
635 */
636 if (data_race(list_empty(&folio->_deferred_list)))
637 return;
638
639 __folio_undo_large_rmappable(folio);
640}
8dc4a8f1 641
23e48832
HD
642static inline struct folio *page_rmappable_folio(struct page *page)
643{
644 struct folio *folio = (struct folio *)page;
645
85edc15a
MWO
646 if (folio && folio_test_large(folio))
647 folio_set_large_rmappable(folio);
23e48832
HD
648 return folio;
649}
650
9420f89d
MRI
651static inline void prep_compound_head(struct page *page, unsigned int order)
652{
653 struct folio *folio = (struct folio *)page;
654
1e3be485 655 folio_set_order(folio, order);
05c5323b 656 atomic_set(&folio->_large_mapcount, -1);
9420f89d
MRI
657 atomic_set(&folio->_entire_mapcount, -1);
658 atomic_set(&folio->_nr_pages_mapped, 0);
659 atomic_set(&folio->_pincount, 0);
b7b098cf
MWO
660 if (order > 1)
661 INIT_LIST_HEAD(&folio->_deferred_list);
9420f89d
MRI
662}
663
664static inline void prep_compound_tail(struct page *head, int tail_idx)
665{
666 struct page *p = head + tail_idx;
667
668 p->mapping = TAIL_MAPPING;
669 set_compound_head(p, head);
670 set_page_private(p, 0);
671}
672
d00181b9 673extern void prep_compound_page(struct page *page, unsigned int order);
9420f89d 674
46f24fd8
JK
675extern void post_alloc_hook(struct page *page, unsigned int order,
676 gfp_t gfp_flags);
733aea0b
ZY
677extern bool free_pages_prepare(struct page *page, unsigned int order);
678
42aa83cb 679extern int user_min_free_kbytes;
20a0307c 680
90491d87
MWO
681void free_unref_page(struct page *page, unsigned int order);
682void free_unref_folios(struct folio_batch *fbatch);
0966aeb4 683
68265390 684extern void zone_pcp_reset(struct zone *zone);
ec6e8c7e
VB
685extern void zone_pcp_disable(struct zone *zone);
686extern void zone_pcp_enable(struct zone *zone);
9420f89d 687extern void zone_pcp_init(struct zone *zone);
68265390 688
c803b3c8
MR
689extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
690 phys_addr_t min_addr,
691 int nid, bool exact_nid);
692
e95d372c
KW
693void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
694 unsigned long, enum meminit_context, struct vmem_altmap *, int);
b2c9e2fb 695
ff9543fd
MN
696#if defined CONFIG_COMPACTION || defined CONFIG_CMA
697
698/*
699 * in mm/compaction.c
700 */
701/*
702 * compact_control is used to track pages being migrated and the free pages
703 * they are being migrated to during memory compaction. The free_pfn starts
704 * at the end of a zone and migrate_pfn begins at the start. Movable pages
705 * are moved to the end of a zone during a compaction run and the run
706 * completes when free_pfn <= migrate_pfn
707 */
708struct compact_control {
733aea0b 709 struct list_head freepages[NR_PAGE_ORDERS]; /* List of free pages to migrate to */
ff9543fd 710 struct list_head migratepages; /* List of pages being migrated */
c5fbd937
MG
711 unsigned int nr_freepages; /* Number of isolated free pages */
712 unsigned int nr_migratepages; /* Number of pages to migrate */
ff9543fd 713 unsigned long free_pfn; /* isolate_freepages search base */
c2ad7a1f
OS
714 /*
715 * Acts as an in/out parameter to page isolation for migration.
716 * isolate_migratepages uses it as a search base.
717 * isolate_migratepages_block will update the value to the next pfn
718 * after the last isolated one.
719 */
720 unsigned long migrate_pfn;
70b44595 721 unsigned long fast_start_pfn; /* a pfn to start linear scan from */
c5943b9c
MG
722 struct zone *zone;
723 unsigned long total_migrate_scanned;
724 unsigned long total_free_scanned;
dbe2d4e4
MG
725 unsigned short fast_search_fail;/* failures to use free list searches */
726 short search_order; /* order to start a fast search at */
f25ba6dc
VB
727 const gfp_t gfp_mask; /* gfp mask of a direct compactor */
728 int order; /* order a direct compactor needs */
d39773a0 729 int migratetype; /* migratetype of direct compactor */
f25ba6dc 730 const unsigned int alloc_flags; /* alloc flags of a direct compactor */
97a225e6 731 const int highest_zoneidx; /* zone index of a direct compactor */
e0b9daeb 732 enum migrate_mode mode; /* Async or sync migration mode */
bb13ffeb 733 bool ignore_skip_hint; /* Scan blocks even if marked skip */
2583d671 734 bool no_set_skip_hint; /* Don't mark blocks for skipping */
9f7e3387 735 bool ignore_block_suitable; /* Scan blocks considered unsuitable */
accf6242 736 bool direct_compaction; /* False from kcompactd or /proc/... */
facdaa91 737 bool proactive_compaction; /* kcompactd proactive compaction */
06ed2998 738 bool whole_zone; /* Whole zone should/has been scanned */
d56c1584 739 bool contended; /* Signal lock contention */
48731c84
MG
740 bool finish_pageblock; /* Scan the remainder of a pageblock. Used
741 * when there are potentially transient
742 * isolation or migration failures to
743 * ensure forward progress.
744 */
b06eda09 745 bool alloc_contig; /* alloc_contig_range allocation */
ff9543fd
MN
746};
747
5e1f0f09
MG
748/*
749 * Used in direct compaction when a page should be taken from the freelists
750 * immediately when one is created during the free path.
751 */
752struct capture_control {
753 struct compact_control *cc;
754 struct page *page;
755};
756
ff9543fd 757unsigned long
bb13ffeb
MG
758isolate_freepages_range(struct compact_control *cc,
759 unsigned long start_pfn, unsigned long end_pfn);
c2ad7a1f 760int
edc2ca61
VB
761isolate_migratepages_range(struct compact_control *cc,
762 unsigned long low_pfn, unsigned long end_pfn);
b2c9e2fb
ZY
763
764int __alloc_contig_migrate_range(struct compact_control *cc,
c8b36003
RC
765 unsigned long start, unsigned long end,
766 int migratetype);
9420f89d
MRI
767
768/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
769void init_cma_reserved_pageblock(struct page *page);
770
771#endif /* CONFIG_COMPACTION || CONFIG_CMA */
772
2149cdae
JK
773int find_suitable_fallback(struct free_area *area, unsigned int order,
774 int migratetype, bool only_stealable, bool *can_steal);
ff9543fd 775
62f31bd4
MRI
776static inline bool free_area_empty(struct free_area *area, int migratetype)
777{
778 return list_empty(&area->free_list[migratetype]);
779}
780
30bdbb78
KK
781/*
782 * These three helpers classifies VMAs for virtual memory accounting.
783 */
784
785/*
786 * Executable code area - executable, not writable, not stack
787 */
d977d56c
KK
788static inline bool is_exec_mapping(vm_flags_t flags)
789{
30bdbb78 790 return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
d977d56c
KK
791}
792
30bdbb78 793/*
00547ef7 794 * Stack area (including shadow stacks)
30bdbb78
KK
795 *
796 * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
797 * do_mmap() forbids all other combinations.
798 */
d977d56c
KK
799static inline bool is_stack_mapping(vm_flags_t flags)
800{
00547ef7 801 return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
d977d56c
KK
802}
803
30bdbb78
KK
804/*
805 * Data area - private, writable, not stack
806 */
d977d56c
KK
807static inline bool is_data_mapping(vm_flags_t flags)
808{
30bdbb78 809 return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
d977d56c
KK
810}
811
6038def0 812/* mm/util.c */
e05b3453 813struct anon_vma *folio_anon_vma(struct folio *folio);
6038def0 814
af8e3354 815#ifdef CONFIG_MMU
3506659e 816void unmap_mapping_folio(struct folio *folio);
fc05f566 817extern long populate_vma_page_range(struct vm_area_struct *vma,
a78f1ccd 818 unsigned long start, unsigned long end, int *locked);
631426ba
DH
819extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
820 unsigned long end, bool write, int *locked);
b0cc5e89 821extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
3c54a298 822 unsigned long bytes);
28e56657
YF
823
824/*
825 * NOTE: This function can't tell whether the folio is "fully mapped" in the
826 * range.
827 * "fully mapped" means all the pages of folio is associated with the page
828 * table of range while this function just check whether the folio range is
be16dd76 829 * within the range [start, end). Function caller needs to do page table
28e56657
YF
830 * check if it cares about the page table association.
831 *
832 * Typical usage (like mlock or madvise) is:
833 * Caller knows at least 1 page of folio is associated with page table of VMA
834 * and the range [start, end) is intersect with the VMA range. Caller wants
835 * to know whether the folio is fully associated with the range. It calls
836 * this function to check whether the folio is in the range first. Then checks
837 * the page table to know whether the folio is fully mapped to the range.
838 */
839static inline bool
840folio_within_range(struct folio *folio, struct vm_area_struct *vma,
841 unsigned long start, unsigned long end)
842{
843 pgoff_t pgoff, addr;
dd05f5ec 844 unsigned long vma_pglen = vma_pages(vma);
28e56657
YF
845
846 VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio);
847 if (start > end)
848 return false;
849
850 if (start < vma->vm_start)
851 start = vma->vm_start;
852
853 if (end > vma->vm_end)
854 end = vma->vm_end;
855
856 pgoff = folio_pgoff(folio);
857
858 /* if folio start address is not in vma range */
859 if (!in_range(pgoff, vma->vm_pgoff, vma_pglen))
860 return false;
861
862 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
863
864 return !(addr < start || end - addr < folio_size(folio));
865}
866
867static inline bool
868folio_within_vma(struct folio *folio, struct vm_area_struct *vma)
869{
870 return folio_within_range(folio, vma, vma->vm_start, vma->vm_end);
871}
872
b291f000 873/*
7efecffb 874 * mlock_vma_folio() and munlock_vma_folio():
cea86fe2
HD
875 * should be called with vma's mmap_lock held for read or write,
876 * under page table lock for the pte/pmd being added or removed.
b291f000 877 *
4a8ffab0 878 * mlock is usually called at the end of folio_add_*_rmap_*(), munlock at
4d8f7418 879 * the end of folio_remove_rmap_*(); but new anon folios are managed by
96f97c43 880 * folio_add_lru_vma() calling mlock_new_folio().
b291f000 881 */
dcc5d337
MWO
882void mlock_folio(struct folio *folio);
883static inline void mlock_vma_folio(struct folio *folio,
1acbc3f9 884 struct vm_area_struct *vma)
cea86fe2 885{
c8263bd6
HD
886 /*
887 * The VM_SPECIAL check here serves two purposes.
888 * 1) VM_IO check prevents migration from double-counting during mlock.
889 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
890 * is never left set on a VM_SPECIAL vma, there is an interval while
891 * file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
892 * still be set while VM_SPECIAL bits are added: so ignore it then.
893 */
1acbc3f9 894 if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED))
dcc5d337
MWO
895 mlock_folio(folio);
896}
897
96f97c43 898void munlock_folio(struct folio *folio);
96f97c43 899static inline void munlock_vma_folio(struct folio *folio,
1acbc3f9 900 struct vm_area_struct *vma)
cea86fe2 901{
1acbc3f9
YF
902 /*
903 * munlock if the function is called. Ideally, we should only
904 * do munlock if any page of folio is unmapped from VMA and
905 * cause folio not fully mapped to VMA.
906 *
907 * But it's not easy to confirm that's the situation. So we
908 * always munlock the folio and page reclaim will correct it
909 * if it's wrong.
910 */
911 if (unlikely(vma->vm_flags & VM_LOCKED))
96f97c43 912 munlock_folio(folio);
cea86fe2 913}
96f97c43 914
96f97c43
LS
915void mlock_new_folio(struct folio *folio);
916bool need_mlock_drain(int cpu);
917void mlock_drain_local(void);
918void mlock_drain_remote(int cpu);
b291f000 919
f55e1014 920extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
b32967ff 921
412ad5fb 922/**
e0abfbb6
MWO
923 * vma_address - Find the virtual address a page range is mapped at
924 * @vma: The vma which maps this object.
412ad5fb
MWO
925 * @pgoff: The page offset within its object.
926 * @nr_pages: The number of pages to consider.
412ad5fb
MWO
927 *
928 * If any page in this range is mapped by this VMA, return the first address
929 * where any of these pages appear. Otherwise, return -EFAULT.
e9b61f19 930 */
e0abfbb6
MWO
931static inline unsigned long vma_address(struct vm_area_struct *vma,
932 pgoff_t pgoff, unsigned long nr_pages)
e9b61f19 933{
494334e4
HD
934 unsigned long address;
935
494334e4
HD
936 if (pgoff >= vma->vm_pgoff) {
937 address = vma->vm_start +
938 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
939 /* Check for address beyond vma (or wrapped through 0?) */
940 if (address < vma->vm_start || address >= vma->vm_end)
941 address = -EFAULT;
6a8e0596 942 } else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
494334e4
HD
943 /* Test above avoids possibility of wrap to 0 on 32-bit */
944 address = vma->vm_start;
945 } else {
946 address = -EFAULT;
947 }
948 return address;
6a8e0596
MS
949}
950
494334e4 951/*
2aff7a47 952 * Then at what user virtual address will none of the range be found in vma?
494334e4 953 * Assumes that vma_address() already returned a good starting address.
494334e4 954 */
2aff7a47 955static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
e9b61f19 956{
2aff7a47 957 struct vm_area_struct *vma = pvmw->vma;
494334e4
HD
958 pgoff_t pgoff;
959 unsigned long address;
960
2aff7a47
MWO
961 /* Common case, plus ->pgoff is invalid for KSM */
962 if (pvmw->nr_pages == 1)
963 return pvmw->address + PAGE_SIZE;
964
965 pgoff = pvmw->pgoff + pvmw->nr_pages;
494334e4
HD
966 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
967 /* Check for address beyond vma (or wrapped through 0?) */
968 if (address < vma->vm_start || address > vma->vm_end)
969 address = vma->vm_end;
970 return address;
e9b61f19
KS
971}
972
89b15332
JW
973static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
974 struct file *fpin)
975{
976 int flags = vmf->flags;
977
978 if (fpin)
979 return fpin;
980
981 /*
982 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
c1e8d7c6 983 * anything, so we only pin the file and drop the mmap_lock if only
4064b982 984 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
89b15332 985 */
4064b982
PX
986 if (fault_flag_allow_retry_first(flags) &&
987 !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
89b15332 988 fpin = get_file(vmf->vma->vm_file);
0790e1e2 989 release_fault_lock(vmf);
89b15332
JW
990 }
991 return fpin;
992}
af8e3354 993#else /* !CONFIG_MMU */
3506659e 994static inline void unmap_mapping_folio(struct folio *folio) { }
96f97c43
LS
995static inline void mlock_new_folio(struct folio *folio) { }
996static inline bool need_mlock_drain(int cpu) { return false; }
997static inline void mlock_drain_local(void) { }
998static inline void mlock_drain_remote(int cpu) { }
4ad0ae8c
NP
999static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
1000{
1001}
af8e3354 1002#endif /* !CONFIG_MMU */
894bc310 1003
6b74ab97 1004/* Memory initialisation debug and verification */
9420f89d
MRI
1005#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1006DECLARE_STATIC_KEY_TRUE(deferred_pages);
1007
1008bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
1009#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1010
6b74ab97
MG
1011enum mminit_level {
1012 MMINIT_WARNING,
1013 MMINIT_VERIFY,
1014 MMINIT_TRACE
1015};
1016
1017#ifdef CONFIG_DEBUG_MEMORY_INIT
1018
1019extern int mminit_loglevel;
1020
1021#define mminit_dprintk(level, prefix, fmt, arg...) \
1022do { \
1023 if (level < mminit_loglevel) { \
fc5199d1 1024 if (level <= MMINIT_WARNING) \
1170532b 1025 pr_warn("mminit::" prefix " " fmt, ##arg); \
fc5199d1
RV
1026 else \
1027 printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
6b74ab97
MG
1028 } \
1029} while (0)
1030
708614e6 1031extern void mminit_verify_pageflags_layout(void);
68ad8df4 1032extern void mminit_verify_zonelist(void);
6b74ab97
MG
1033#else
1034
1035static inline void mminit_dprintk(enum mminit_level level,
1036 const char *prefix, const char *fmt, ...)
1037{
1038}
1039
708614e6
MG
1040static inline void mminit_verify_pageflags_layout(void)
1041{
1042}
1043
68ad8df4
MG
1044static inline void mminit_verify_zonelist(void)
1045{
1046}
6b74ab97 1047#endif /* CONFIG_DEBUG_MEMORY_INIT */
2dbb51c4 1048
a5f5f91d
MG
1049#define NODE_RECLAIM_NOSCAN -2
1050#define NODE_RECLAIM_FULL -1
1051#define NODE_RECLAIM_SOME 0
1052#define NODE_RECLAIM_SUCCESS 1
7c116f2b 1053
8b09549c
WY
1054#ifdef CONFIG_NUMA
1055extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
79c28a41 1056extern int find_next_best_node(int node, nodemask_t *used_node_mask);
8b09549c
WY
1057#else
1058static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
1059 unsigned int order)
1060{
1061 return NODE_RECLAIM_NOSCAN;
1062}
79c28a41
DH
1063static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
1064{
1065 return NUMA_NO_NODE;
1066}
8b09549c
WY
1067#endif
1068
60f272f6 1069/*
1070 * mm/memory-failure.c
1071 */
fed5348e 1072void shake_folio(struct folio *folio);
31d3d348
WF
1073extern int hwpoison_filter(struct page *p);
1074
7c116f2b
WF
1075extern u32 hwpoison_filter_dev_major;
1076extern u32 hwpoison_filter_dev_minor;
478c5ffc
WF
1077extern u64 hwpoison_filter_flags_mask;
1078extern u64 hwpoison_filter_flags_value;
4fd466eb 1079extern u64 hwpoison_filter_memcg;
1bfe5feb 1080extern u32 hwpoison_filter_enable;
3a78f77f
ML
1081#define MAGIC_HWPOISON 0x48575053U /* HWPS */
1082void SetPageHWPoisonTakenOff(struct page *page);
1083void ClearPageHWPoisonTakenOff(struct page *page);
1084bool take_page_off_buddy(struct page *page);
1085bool put_page_back_buddy(struct page *page);
1086struct task_struct *task_early_kill(struct task_struct *tsk, int force_early);
1087void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
1088 struct vm_area_struct *vma, struct list_head *to_kill,
1089 unsigned long ksm_addr);
1090unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
eb36c587 1091
dc0ef0df 1092extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
eb36c587 1093 unsigned long, unsigned long,
9fbeb5ab 1094 unsigned long, unsigned long);
ca57df79
XQ
1095
1096extern void set_pageblock_order(void);
8f75267d 1097struct folio *alloc_migrate_folio(struct folio *src, unsigned long private);
14f5be2a 1098unsigned long reclaim_pages(struct list_head *folio_list);
730ec8c0 1099unsigned int reclaim_clean_pages_from_list(struct zone *zone,
4bf4f155 1100 struct list_head *folio_list);
d95ea5d1
BZ
1101/* The ALLOC_WMARK bits are used as an index to zone->watermark */
1102#define ALLOC_WMARK_MIN WMARK_MIN
1103#define ALLOC_WMARK_LOW WMARK_LOW
1104#define ALLOC_WMARK_HIGH WMARK_HIGH
1105#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
1106
1107/* Mask to get the watermark bits */
1108#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
1109
cd04ae1e
MH
1110/*
1111 * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
1112 * cannot assume a reduced access to memory reserves is sufficient for
1113 * !MMU
1114 */
1115#ifdef CONFIG_MMU
1116#define ALLOC_OOM 0x08
1117#else
1118#define ALLOC_OOM ALLOC_NO_WATERMARKS
1119#endif
1120
1ebbb218
MG
1121#define ALLOC_NON_BLOCK 0x10 /* Caller cannot block. Allow access
1122 * to 25% of the min watermark or
1123 * 62.5% if __GFP_HIGH is set.
1124 */
524c4807
MG
1125#define ALLOC_MIN_RESERVE 0x20 /* __GFP_HIGH set. Allow access to 50%
1126 * of the min watermark.
1127 */
6bb15450
MG
1128#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
1129#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
1130#ifdef CONFIG_ZONE_DMA32
1131#define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */
1132#else
1133#define ALLOC_NOFRAGMENT 0x0
1134#endif
eb2e2b42 1135#define ALLOC_HIGHATOMIC 0x200 /* Allows access to MIGRATE_HIGHATOMIC */
736838e9 1136#define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
d95ea5d1 1137
ab350885 1138/* Flags that allow allocations below the min watermark. */
1ebbb218 1139#define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
ab350885 1140
72b252ae
MG
1141enum ttu_flags;
1142struct tlbflush_unmap_batch;
1143
ce612879
MH
1144
1145/*
1146 * only for MM internal work items which do not depend on
1147 * any allocations or locks which might depend on allocations
1148 */
1149extern struct workqueue_struct *mm_percpu_wq;
1150
72b252ae
MG
1151#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1152void try_to_unmap_flush(void);
d950c947 1153void try_to_unmap_flush_dirty(void);
3ea27719 1154void flush_tlb_batched_pending(struct mm_struct *mm);
72b252ae
MG
1155#else
1156static inline void try_to_unmap_flush(void)
1157{
1158}
d950c947
MG
1159static inline void try_to_unmap_flush_dirty(void)
1160{
1161}
3ea27719
MG
1162static inline void flush_tlb_batched_pending(struct mm_struct *mm)
1163{
1164}
72b252ae 1165#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
edf14cdb
VB
1166
1167extern const struct trace_print_flags pageflag_names[];
4c85c0be 1168extern const struct trace_print_flags pagetype_names[];
edf14cdb
VB
1169extern const struct trace_print_flags vmaflag_names[];
1170extern const struct trace_print_flags gfpflag_names[];
1171
a6ffdc07
XQ
1172static inline bool is_migrate_highatomic(enum migratetype migratetype)
1173{
1174 return migratetype == MIGRATE_HIGHATOMIC;
1175}
1176
72675e13 1177void setup_zone_pageset(struct zone *zone);
19fc7bed
JK
1178
1179struct migration_target_control {
1180 int nid; /* preferred node id */
1181 nodemask_t *nmask;
1182 gfp_t gfp_mask;
e42dfe4e 1183 enum migrate_reason reason;
19fc7bed
JK
1184};
1185
07073eb0
DH
1186/*
1187 * mm/filemap.c
1188 */
1189size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
1190 struct folio *folio, loff_t fpos, size_t size);
1191
b67177ec
NP
1192/*
1193 * mm/vmalloc.c
1194 */
4ad0ae8c 1195#ifdef CONFIG_MMU
b6714911 1196void __init vmalloc_init(void);
d905ae2b 1197int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
b67177ec 1198 pgprot_t prot, struct page **pages, unsigned int page_shift);
4ad0ae8c 1199#else
b6714911
MRI
1200static inline void vmalloc_init(void)
1201{
1202}
1203
4ad0ae8c 1204static inline
d905ae2b 1205int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
4ad0ae8c
NP
1206 pgprot_t prot, struct page **pages, unsigned int page_shift)
1207{
1208 return -EINVAL;
1209}
1210#endif
1211
d905ae2b
AP
1212int __must_check __vmap_pages_range_noflush(unsigned long addr,
1213 unsigned long end, pgprot_t prot,
1214 struct page **pages, unsigned int page_shift);
b073d7f8 1215
4ad0ae8c 1216void vunmap_range_noflush(unsigned long start, unsigned long end);
b67177ec 1217
b073d7f8
AP
1218void __vunmap_range_noflush(unsigned long start, unsigned long end);
1219
f8fd525b 1220int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
f4c0d836
YS
1221 unsigned long addr, int page_nid, int *flags);
1222
9f100e3b 1223void free_zone_device_folio(struct folio *folio);
b05a79d4 1224int migrate_device_coherent_page(struct page *page);
27674ef6 1225
ece1ed7b
MWO
1226/*
1227 * mm/gup.c
1228 */
f442fa61
YS
1229int __must_check try_grab_folio(struct folio *folio, int refs,
1230 unsigned int flags);
ece1ed7b 1231
8b9c1cc0
DH
1232/*
1233 * mm/huge_memory.c
1234 */
1b167618
PX
1235void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1236 pud_t *pud, bool write);
4418c522
PX
1237void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1238 pmd_t *pmd, bool write);
8b9c1cc0 1239
adb20b0c
LS
1240/*
1241 * mm/mmap.c
1242 */
93bf5d4a
LS
1243struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
1244 struct vm_area_struct *vma,
1245 unsigned long delta);
adb20b0c 1246
2c224108
JG
1247enum {
1248 /* mark page accessed */
1249 FOLL_TOUCH = 1 << 16,
1250 /* a retry, previous pass started an IO */
1251 FOLL_TRIED = 1 << 17,
1252 /* we are working on non-current tsk/mm */
1253 FOLL_REMOTE = 1 << 18,
1254 /* pages must be released via unpin_user_page */
1255 FOLL_PIN = 1 << 19,
1256 /* gup_fast: prevent fall-back to slow gup */
1257 FOLL_FAST_ONLY = 1 << 20,
1258 /* allow unlocking the mmap lock */
1259 FOLL_UNLOCKABLE = 1 << 21,
631426ba
DH
1260 /* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */
1261 FOLL_MADV_POPULATE = 1 << 22,
2c224108
JG
1262};
1263
0f20bba1 1264#define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
631426ba
DH
1265 FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
1266 FOLL_MADV_POPULATE)
0f20bba1 1267
63b60512
JG
1268/*
1269 * Indicates for which pages that are write-protected in the page table,
1270 * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
1271 * GUP pin will remain consistent with the pages mapped into the page tables
1272 * of the MM.
1273 *
1274 * Temporary unmapping of PageAnonExclusive() pages or clearing of
1275 * PageAnonExclusive() has to protect against concurrent GUP:
1276 * * Ordinary GUP: Using the PT lock
1277 * * GUP-fast and fork(): mm->write_protect_seq
1278 * * GUP-fast and KSM or temporary unmapping (swap, migration): see
e3b4b137 1279 * folio_try_share_anon_rmap_*()
63b60512
JG
1280 *
1281 * Must be called with the (sub)page that's actually referenced via the
1282 * page table entry, which might not necessarily be the head page for a
1283 * PTE-mapped THP.
1284 *
1285 * If the vma is NULL, we're coming from the GUP-fast path and might have
1286 * to fallback to the slow path just to lookup the vma.
1287 */
1288static inline bool gup_must_unshare(struct vm_area_struct *vma,
1289 unsigned int flags, struct page *page)
1290{
1291 /*
1292 * FOLL_WRITE is implicitly handled correctly as the page table entry
1293 * has to be writable -- and if it references (part of) an anonymous
1294 * folio, that part is required to be marked exclusive.
1295 */
1296 if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
1297 return false;
1298 /*
1299 * Note: PageAnon(page) is stable until the page is actually getting
1300 * freed.
1301 */
1302 if (!PageAnon(page)) {
1303 /*
1304 * We only care about R/O long-term pining: R/O short-term
1305 * pinning does not have the semantics to observe successive
1306 * changes through the process page tables.
1307 */
1308 if (!(flags & FOLL_LONGTERM))
1309 return false;
1310
1311 /* We really need the vma ... */
1312 if (!vma)
1313 return true;
1314
1315 /*
1316 * ... because we only care about writable private ("COW")
1317 * mappings where we have to break COW early.
1318 */
1319 return is_cow_mapping(vma->vm_flags);
1320 }
1321
e3b4b137 1322 /* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */
25176ad0 1323 if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
63b60512
JG
1324 smp_rmb();
1325
1326 /*
1327 * Note that PageKsm() pages cannot be exclusive, and consequently,
1328 * cannot get pinned.
1329 */
1330 return !PageAnonExclusive(page);
1331}
ece1ed7b 1332
902c2d91 1333extern bool mirrored_kernelcore;
0db31d63 1334extern bool memblock_has_mirror(void);
902c2d91 1335
412c6ef9
YD
1336static __always_inline void vma_set_range(struct vm_area_struct *vma,
1337 unsigned long start, unsigned long end,
1338 pgoff_t pgoff)
1339{
1340 vma->vm_start = start;
1341 vma->vm_end = end;
1342 vma->vm_pgoff = pgoff;
1343}
1344
76aefad6
PX
1345static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
1346{
1347 /*
1348 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
1349 * enablements, because when without soft-dirty being compiled in,
1350 * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
1351 * will be constantly true.
1352 */
1353 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
1354 return false;
1355
1356 /*
1357 * Soft-dirty is kind of special: its tracking is enabled when the
1358 * vma flags not set.
1359 */
1360 return !(vma->vm_flags & VM_SOFTDIRTY);
1361}
1362
f38ee285
BS
1363static inline bool pmd_needs_soft_dirty_wp(struct vm_area_struct *vma, pmd_t pmd)
1364{
1365 return vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd);
1366}
1367
1368static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct *vma, pte_t pte)
1369{
1370 return vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte);
1371}
1372
53bee98d
LH
1373static inline void vma_iter_config(struct vma_iterator *vmi,
1374 unsigned long index, unsigned long last)
1375{
53bee98d
LH
1376 __mas_set_range(&vmi->mas, index, last - 1);
1377}
1378
d4e6b397
YD
1379static inline void vma_iter_reset(struct vma_iterator *vmi)
1380{
1381 mas_reset(&vmi->mas);
1382}
1383
1384static inline
1385struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
1386{
1387 return mas_prev_range(&vmi->mas, min);
1388}
1389
1390static inline
1391struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
1392{
1393 return mas_next_range(&vmi->mas, max);
1394}
1395
1396static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
1397 unsigned long max, unsigned long size)
1398{
1399 return mas_empty_area(&vmi->mas, min, max - 1, size);
1400}
1401
1402static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
1403 unsigned long max, unsigned long size)
1404{
1405 return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
1406}
1407
b62b633e
LH
1408/*
1409 * VMA Iterator functions shared between nommu and mmap
1410 */
b5df0922
LH
1411static inline int vma_iter_prealloc(struct vma_iterator *vmi,
1412 struct vm_area_struct *vma)
b62b633e 1413{
b5df0922 1414 return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
b62b633e
LH
1415}
1416
b5df0922 1417static inline void vma_iter_clear(struct vma_iterator *vmi)
b62b633e 1418{
b62b633e
LH
1419 mas_store_prealloc(&vmi->mas, NULL);
1420}
1421
1422static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
1423{
1424 return mas_walk(&vmi->mas);
1425}
1426
1427/* Store a VMA with preallocated memory */
1428static inline void vma_iter_store(struct vma_iterator *vmi,
1429 struct vm_area_struct *vma)
1430{
1431
1432#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
067311d3 1433 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
36bd9310
LH
1434 vmi->mas.index > vma->vm_start)) {
1435 pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
1436 vmi->mas.index, vma->vm_start, vma->vm_start,
1437 vma->vm_end, vmi->mas.index, vmi->mas.last);
b62b633e 1438 }
067311d3 1439 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
36bd9310
LH
1440 vmi->mas.last < vma->vm_start)) {
1441 pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
1442 vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
1443 vmi->mas.index, vmi->mas.last);
b62b633e
LH
1444 }
1445#endif
1446
067311d3 1447 if (vmi->mas.status != ma_start &&
b62b633e
LH
1448 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1449 vma_iter_invalidate(vmi);
1450
b5df0922 1451 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
b62b633e
LH
1452 mas_store_prealloc(&vmi->mas, vma);
1453}
1454
1455static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
1456 struct vm_area_struct *vma, gfp_t gfp)
1457{
067311d3 1458 if (vmi->mas.status != ma_start &&
b62b633e
LH
1459 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
1460 vma_iter_invalidate(vmi);
1461
b5df0922 1462 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
b62b633e
LH
1463 mas_store_gfp(&vmi->mas, vma, gfp);
1464 if (unlikely(mas_is_err(&vmi->mas)))
1465 return -ENOMEM;
1466
1467 return 0;
1468}
440703e0
LH
1469
1470/*
1471 * VMA lock generalization
1472 */
1473struct vma_prepare {
1474 struct vm_area_struct *vma;
1475 struct vm_area_struct *adj_next;
1476 struct file *file;
1477 struct address_space *mapping;
1478 struct anon_vma *anon_vma;
1479 struct vm_area_struct *insert;
1480 struct vm_area_struct *remove;
1481 struct vm_area_struct *remove2;
1482};
3ee0aa9f 1483
fde1c4ec
UA
1484void __meminit __init_single_page(struct page *page, unsigned long pfn,
1485 unsigned long zone, int nid);
1486
3ee0aa9f 1487/* shrinker related functions */
96f7b2b9
QZ
1488unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
1489 int priority);
3ee0aa9f 1490
8be7258a 1491#ifdef CONFIG_64BIT
8be7258a
JX
1492static inline int can_do_mseal(unsigned long flags)
1493{
1494 if (flags)
1495 return -EINVAL;
1496
1497 return 0;
1498}
1499
1500bool can_modify_mm(struct mm_struct *mm, unsigned long start,
1501 unsigned long end);
1502bool can_modify_mm_madv(struct mm_struct *mm, unsigned long start,
1503 unsigned long end, int behavior);
1504#else
1505static inline int can_do_mseal(unsigned long flags)
1506{
1507 return -EPERM;
1508}
1509
1510static inline bool can_modify_mm(struct mm_struct *mm, unsigned long start,
1511 unsigned long end)
1512{
1513 return true;
1514}
1515
1516static inline bool can_modify_mm_madv(struct mm_struct *mm, unsigned long start,
1517 unsigned long end, int behavior)
1518{
1519 return true;
1520}
1521#endif
1522
3ee0aa9f 1523#ifdef CONFIG_SHRINKER_DEBUG
f04eba13
LM
1524static inline __printf(2, 0) int shrinker_debugfs_name_alloc(
1525 struct shrinker *shrinker, const char *fmt, va_list ap)
c42d50ae
QZ
1526{
1527 shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
1528
1529 return shrinker->name ? 0 : -ENOMEM;
1530}
1531
1532static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1533{
1534 kfree_const(shrinker->name);
1535 shrinker->name = NULL;
1536}
1537
3ee0aa9f
QZ
1538extern int shrinker_debugfs_add(struct shrinker *shrinker);
1539extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1540 int *debugfs_id);
1541extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1542 int debugfs_id);
1543#else /* CONFIG_SHRINKER_DEBUG */
1544static inline int shrinker_debugfs_add(struct shrinker *shrinker)
1545{
1546 return 0;
1547}
c42d50ae
QZ
1548static inline int shrinker_debugfs_name_alloc(struct shrinker *shrinker,
1549 const char *fmt, va_list ap)
1550{
1551 return 0;
1552}
1553static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1554{
1555}
3ee0aa9f
QZ
1556static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1557 int *debugfs_id)
1558{
1559 *debugfs_id = -1;
1560 return NULL;
1561}
1562static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1563 int debugfs_id)
1564{
1565}
1566#endif /* CONFIG_SHRINKER_DEBUG */
1567
b64e74e9
CH
1568/* Only track the nodes of mappings with shadow entries */
1569void workingset_update_node(struct xa_node *node);
1570extern struct list_lru shadow_nodes;
1571
3577dbb1
MG
1572struct unlink_vma_file_batch {
1573 int count;
1574 struct vm_area_struct *vmas[8];
1575};
1576
1577void unlink_file_vma_batch_init(struct unlink_vma_file_batch *);
1578void unlink_file_vma_batch_add(struct unlink_vma_file_batch *, struct vm_area_struct *);
1579void unlink_file_vma_batch_final(struct unlink_vma_file_batch *);
1580
db971418 1581#endif /* __MM_INTERNAL_H */
This page took 1.487034 seconds and 4 git commands to generate.