1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Macros for manipulating and testing page->flags
9 #include <linux/types.h>
10 #include <linux/bug.h>
11 #include <linux/mmdebug.h>
12 #ifndef __GENERATING_BOUNDS_H
13 #include <linux/mm_types.h>
14 #include <generated/bounds.h>
15 #endif /* !__GENERATING_BOUNDS_H */
18 * Various page->flags bits:
20 * PG_reserved is set for special pages. The "struct page" of such a page
21 * should in general not be touched (e.g. set dirty) except by its owner.
22 * Pages marked as PG_reserved include:
23 * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
25 * - Pages reserved or allocated early during boot (before the page allocator
26 * was initialized). This includes (depending on the architecture) the
27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
28 * much more. Once (if ever) freed, PG_reserved is cleared and they will
29 * be given to the page allocator.
30 * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
31 * to read/write these pages might end badly. Don't touch!
33 * - Pages allocated in the context of kexec/kdump (loaded kernel image,
34 * control pages, vmcoreinfo)
35 * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
36 * not marked PG_reserved (as they might be in use by somebody else who does
37 * not respect the caching strategy).
39 * - Pages holding CPU notes for POWER Firmware Assisted Dump
40 * - Device memory (e.g. PMEM, DAX, HMM)
41 * Some PG_reserved pages will be excluded from the hibernation image.
42 * PG_reserved does in general not hinder anybody from dumping or swapping
43 * and is no longer required for remap_pfn_range(). ioremap might require it.
44 * Consequently, PG_reserved for a page mapped into user space can indicate
45 * the zero page, the vDSO, MMIO pages or device memory.
47 * The PG_private bitflag is set on pagecache pages if they contain filesystem
48 * specific data (which is normally at page->private). It can be used by
49 * private allocations for its own usage.
51 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
52 * and cleared when writeback _starts_ or when read _completes_. PG_writeback
53 * is set before writeback starts and cleared when it finishes.
55 * PG_locked also pins a page in pagecache, and blocks truncation of the file
58 * page_waitqueue(page) is a wait queue of all tasks waiting for the page
61 * PG_swapbacked is set when a page uses swap as a backing storage. This are
62 * usually PageAnon or shmem pages but please note that even anonymous pages
63 * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
64 * a result of MADV_FREE).
66 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
67 * file-backed pagecache (see mm/vmscan.c).
69 * PG_error is set to indicate that an I/O error occurred on this page.
71 * PG_arch_1 is an architecture specific page state bit. The generic code
72 * guarantees that this bit is cleared for a page when it first is entered into
75 * PG_hwpoison indicates that a page got corrupted in hardware and contains
76 * data with incorrect ECC bits that triggered a machine check. Accessing is
77 * not safe since it may cause another machine check. Don't touch!
81 * Don't use the pageflags directly. Use the PageFoo macros.
83 * The page flags field is split into two parts, the main flags area
84 * which extends from the low bits upwards, and the fields area which
85 * extends from the high bits downwards.
87 * | FIELD | ... | FLAGS |
91 * The fields area is reserved for fields mapping zone, node (for NUMA) and
92 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
93 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
96 PG_locked, /* Page is locked. Don't touch. */
97 PG_writeback, /* Page is under writeback */
102 PG_head, /* Must be in bit 6 */
103 PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
107 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
110 PG_private, /* If pagecache, has fs-private data */
111 PG_private_2, /* If pagecache, has fs aux data */
112 PG_mappedtodisk, /* Has blocks allocated on-disk */
113 PG_reclaim, /* To be reclaimed asap */
114 PG_swapbacked, /* Page is backed by RAM/swap */
115 PG_unevictable, /* Page is "unevictable" */
117 PG_mlocked, /* Page is vma mlocked */
119 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
120 PG_uncached, /* Page has been mapped as uncached */
122 #ifdef CONFIG_MEMORY_FAILURE
123 PG_hwpoison, /* hardware poisoned page. Don't touch */
125 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
129 #ifdef CONFIG_ARCH_USES_PG_ARCH_X
135 PG_readahead = PG_reclaim,
138 * Depending on the way an anonymous folio can be mapped into a page
139 * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped
140 * THP), PG_anon_exclusive may be set only for the head page or for
141 * tail pages of an anonymous folio. For now, we only expect it to be
142 * set on tail pages for PTE-mapped THP.
144 PG_anon_exclusive = PG_mappedtodisk,
147 PG_checked = PG_owner_priv_1,
150 PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
152 /* Two page bits are conscripted by FS-Cache to maintain local caching
153 * state. These bits are set on pages belonging to the netfs's inodes
154 * when those inodes are being locally cached.
156 PG_fscache = PG_private_2, /* page backed by cache */
159 /* Pinned in Xen as a read-only pagetable page. */
160 PG_pinned = PG_owner_priv_1,
161 /* Pinned as part of domain save (see xen_mm_pin_all()). */
162 PG_savepinned = PG_dirty,
163 /* Has a grant mapping of another (foreign) domain's page. */
164 PG_foreign = PG_owner_priv_1,
165 /* Remapped by swiotlb-xen. */
166 PG_xen_remapped = PG_owner_priv_1,
168 /* non-lru isolated movable page */
169 PG_isolated = PG_reclaim,
171 /* Only valid for buddy pages. Used to track pages that are reported */
172 PG_reported = PG_uptodate,
174 #ifdef CONFIG_MEMORY_HOTPLUG
175 /* For self-hosted memmap pages */
176 PG_vmemmap_self_hosted = PG_owner_priv_1,
180 * Flags only valid for compound pages. Stored in first tail page's
181 * flags word. Cannot use the first 8 flags or any flag marked as
185 /* At least one page in this folio has the hwpoison flag set */
186 PG_has_hwpoisoned = PG_error,
187 PG_large_rmappable = PG_workingset, /* anon or file-backed */
190 #define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1)
192 #ifndef __GENERATING_BOUNDS_H
194 #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
195 DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
198 * Return the real head page struct iff the @page is a fake head page, otherwise
199 * return the @page itself. See Documentation/mm/vmemmap_dedup.rst.
201 static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
203 if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
207 * Only addresses aligned with PAGE_SIZE of struct page may be fake head
208 * struct page. The alignment check aims to avoid access the fields (
209 * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly)
210 * cold cacheline in some cases.
212 if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) &&
213 test_bit(PG_head, &page->flags)) {
215 * We can safely access the field of the @page[1] with PG_head
216 * because the @page is a compound page composed with at least
217 * two contiguous pages.
219 unsigned long head = READ_ONCE(page[1].compound_head);
221 if (likely(head & 1))
222 return (const struct page *)(head - 1);
227 static inline const struct page *page_fixed_fake_head(const struct page *page)
233 static __always_inline int page_is_fake_head(const struct page *page)
235 return page_fixed_fake_head(page) != page;
238 static inline unsigned long _compound_head(const struct page *page)
240 unsigned long head = READ_ONCE(page->compound_head);
242 if (unlikely(head & 1))
244 return (unsigned long)page_fixed_fake_head(page);
247 #define compound_head(page) ((typeof(page))_compound_head(page))
250 * page_folio - Converts from page to folio.
253 * Every page is part of a folio. This function cannot be called on a
256 * Context: No reference, nor lock is required on @page. If the caller
257 * does not hold a reference, this call may race with a folio split, so
258 * it should re-check the folio still contains this page after gaining
259 * a reference on the folio.
260 * Return: The folio which contains this page.
262 #define page_folio(p) (_Generic((p), \
263 const struct page *: (const struct folio *)_compound_head(p), \
264 struct page *: (struct folio *)_compound_head(p)))
267 * folio_page - Return a page from a folio.
269 * @n: The page number to return.
271 * @n is relative to the start of the folio. This function does not
272 * check that the page number lies within @folio; the caller is presumed
273 * to have a reference to the page.
275 #define folio_page(folio, n) nth_page(&(folio)->page, n)
277 static __always_inline int PageTail(const struct page *page)
279 return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page);
282 static __always_inline int PageCompound(const struct page *page)
284 return test_bit(PG_head, &page->flags) ||
285 READ_ONCE(page->compound_head) & 1;
288 #define PAGE_POISON_PATTERN -1l
289 static inline int PagePoisoned(const struct page *page)
291 return READ_ONCE(page->flags) == PAGE_POISON_PATTERN;
294 #ifdef CONFIG_DEBUG_VM
295 void page_init_poison(struct page *page, size_t size);
297 static inline void page_init_poison(struct page *page, size_t size)
302 static const unsigned long *const_folio_flags(const struct folio *folio,
305 const struct page *page = &folio->page;
307 VM_BUG_ON_PGFLAGS(PageTail(page), page);
308 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
309 return &page[n].flags;
312 static unsigned long *folio_flags(struct folio *folio, unsigned n)
314 struct page *page = &folio->page;
316 VM_BUG_ON_PGFLAGS(PageTail(page), page);
317 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
318 return &page[n].flags;
322 * Page flags policies wrt compound pages
325 * check if this struct page poisoned/uninitialized
328 * the page flag is relevant for small, head and tail pages.
331 * for compound page all operations related to the page flag applied to
335 * modifications of the page flag must be done on small or head pages,
336 * checks can be done on tail pages too.
339 * the page flag is not relevant for compound pages.
342 * the page flag is stored in the first tail page.
344 #define PF_POISONED_CHECK(page) ({ \
345 VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \
347 #define PF_ANY(page, enforce) PF_POISONED_CHECK(page)
348 #define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page))
349 #define PF_NO_TAIL(page, enforce) ({ \
350 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
351 PF_POISONED_CHECK(compound_head(page)); })
352 #define PF_NO_COMPOUND(page, enforce) ({ \
353 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
354 PF_POISONED_CHECK(page); })
355 #define PF_SECOND(page, enforce) ({ \
356 VM_BUG_ON_PGFLAGS(!PageHead(page), page); \
357 PF_POISONED_CHECK(&page[1]); })
359 /* Which page is the flag stored in */
360 #define FOLIO_PF_ANY 0
361 #define FOLIO_PF_HEAD 0
362 #define FOLIO_PF_NO_TAIL 0
363 #define FOLIO_PF_NO_COMPOUND 0
364 #define FOLIO_PF_SECOND 1
366 #define FOLIO_HEAD_PAGE 0
367 #define FOLIO_SECOND_PAGE 1
370 * Macros to create function definitions for page flags
372 #define FOLIO_TEST_FLAG(name, page) \
373 static __always_inline bool folio_test_##name(const struct folio *folio) \
374 { return test_bit(PG_##name, const_folio_flags(folio, page)); }
376 #define FOLIO_SET_FLAG(name, page) \
377 static __always_inline void folio_set_##name(struct folio *folio) \
378 { set_bit(PG_##name, folio_flags(folio, page)); }
380 #define FOLIO_CLEAR_FLAG(name, page) \
381 static __always_inline void folio_clear_##name(struct folio *folio) \
382 { clear_bit(PG_##name, folio_flags(folio, page)); }
384 #define __FOLIO_SET_FLAG(name, page) \
385 static __always_inline void __folio_set_##name(struct folio *folio) \
386 { __set_bit(PG_##name, folio_flags(folio, page)); }
388 #define __FOLIO_CLEAR_FLAG(name, page) \
389 static __always_inline void __folio_clear_##name(struct folio *folio) \
390 { __clear_bit(PG_##name, folio_flags(folio, page)); }
392 #define FOLIO_TEST_SET_FLAG(name, page) \
393 static __always_inline bool folio_test_set_##name(struct folio *folio) \
394 { return test_and_set_bit(PG_##name, folio_flags(folio, page)); }
396 #define FOLIO_TEST_CLEAR_FLAG(name, page) \
397 static __always_inline bool folio_test_clear_##name(struct folio *folio) \
398 { return test_and_clear_bit(PG_##name, folio_flags(folio, page)); }
400 #define FOLIO_FLAG(name, page) \
401 FOLIO_TEST_FLAG(name, page) \
402 FOLIO_SET_FLAG(name, page) \
403 FOLIO_CLEAR_FLAG(name, page)
405 #define TESTPAGEFLAG(uname, lname, policy) \
406 FOLIO_TEST_FLAG(lname, FOLIO_##policy) \
407 static __always_inline int Page##uname(const struct page *page) \
408 { return test_bit(PG_##lname, &policy(page, 0)->flags); }
410 #define SETPAGEFLAG(uname, lname, policy) \
411 FOLIO_SET_FLAG(lname, FOLIO_##policy) \
412 static __always_inline void SetPage##uname(struct page *page) \
413 { set_bit(PG_##lname, &policy(page, 1)->flags); }
415 #define CLEARPAGEFLAG(uname, lname, policy) \
416 FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \
417 static __always_inline void ClearPage##uname(struct page *page) \
418 { clear_bit(PG_##lname, &policy(page, 1)->flags); }
420 #define __SETPAGEFLAG(uname, lname, policy) \
421 __FOLIO_SET_FLAG(lname, FOLIO_##policy) \
422 static __always_inline void __SetPage##uname(struct page *page) \
423 { __set_bit(PG_##lname, &policy(page, 1)->flags); }
425 #define __CLEARPAGEFLAG(uname, lname, policy) \
426 __FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \
427 static __always_inline void __ClearPage##uname(struct page *page) \
428 { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
430 #define TESTSETFLAG(uname, lname, policy) \
431 FOLIO_TEST_SET_FLAG(lname, FOLIO_##policy) \
432 static __always_inline int TestSetPage##uname(struct page *page) \
433 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
435 #define TESTCLEARFLAG(uname, lname, policy) \
436 FOLIO_TEST_CLEAR_FLAG(lname, FOLIO_##policy) \
437 static __always_inline int TestClearPage##uname(struct page *page) \
438 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
440 #define PAGEFLAG(uname, lname, policy) \
441 TESTPAGEFLAG(uname, lname, policy) \
442 SETPAGEFLAG(uname, lname, policy) \
443 CLEARPAGEFLAG(uname, lname, policy)
445 #define __PAGEFLAG(uname, lname, policy) \
446 TESTPAGEFLAG(uname, lname, policy) \
447 __SETPAGEFLAG(uname, lname, policy) \
448 __CLEARPAGEFLAG(uname, lname, policy)
450 #define TESTSCFLAG(uname, lname, policy) \
451 TESTSETFLAG(uname, lname, policy) \
452 TESTCLEARFLAG(uname, lname, policy)
454 #define FOLIO_TEST_FLAG_FALSE(name) \
455 static inline bool folio_test_##name(const struct folio *folio) \
457 #define FOLIO_SET_FLAG_NOOP(name) \
458 static inline void folio_set_##name(struct folio *folio) { }
459 #define FOLIO_CLEAR_FLAG_NOOP(name) \
460 static inline void folio_clear_##name(struct folio *folio) { }
461 #define __FOLIO_SET_FLAG_NOOP(name) \
462 static inline void __folio_set_##name(struct folio *folio) { }
463 #define __FOLIO_CLEAR_FLAG_NOOP(name) \
464 static inline void __folio_clear_##name(struct folio *folio) { }
465 #define FOLIO_TEST_SET_FLAG_FALSE(name) \
466 static inline bool folio_test_set_##name(struct folio *folio) \
468 #define FOLIO_TEST_CLEAR_FLAG_FALSE(name) \
469 static inline bool folio_test_clear_##name(struct folio *folio) \
472 #define FOLIO_FLAG_FALSE(name) \
473 FOLIO_TEST_FLAG_FALSE(name) \
474 FOLIO_SET_FLAG_NOOP(name) \
475 FOLIO_CLEAR_FLAG_NOOP(name)
477 #define TESTPAGEFLAG_FALSE(uname, lname) \
478 FOLIO_TEST_FLAG_FALSE(lname) \
479 static inline int Page##uname(const struct page *page) { return 0; }
481 #define SETPAGEFLAG_NOOP(uname, lname) \
482 FOLIO_SET_FLAG_NOOP(lname) \
483 static inline void SetPage##uname(struct page *page) { }
485 #define CLEARPAGEFLAG_NOOP(uname, lname) \
486 FOLIO_CLEAR_FLAG_NOOP(lname) \
487 static inline void ClearPage##uname(struct page *page) { }
489 #define __CLEARPAGEFLAG_NOOP(uname, lname) \
490 __FOLIO_CLEAR_FLAG_NOOP(lname) \
491 static inline void __ClearPage##uname(struct page *page) { }
493 #define TESTSETFLAG_FALSE(uname, lname) \
494 FOLIO_TEST_SET_FLAG_FALSE(lname) \
495 static inline int TestSetPage##uname(struct page *page) { return 0; }
497 #define TESTCLEARFLAG_FALSE(uname, lname) \
498 FOLIO_TEST_CLEAR_FLAG_FALSE(lname) \
499 static inline int TestClearPage##uname(struct page *page) { return 0; }
501 #define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \
502 SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname)
504 #define TESTSCFLAG_FALSE(uname, lname) \
505 TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname)
507 __PAGEFLAG(Locked, locked, PF_NO_TAIL)
508 FOLIO_FLAG(waiters, FOLIO_HEAD_PAGE)
509 PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
510 FOLIO_FLAG(referenced, FOLIO_HEAD_PAGE)
511 FOLIO_TEST_CLEAR_FLAG(referenced, FOLIO_HEAD_PAGE)
512 __FOLIO_SET_FLAG(referenced, FOLIO_HEAD_PAGE)
513 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
514 __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
515 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
516 TESTCLEARFLAG(LRU, lru, PF_HEAD)
517 PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
518 TESTCLEARFLAG(Active, active, PF_HEAD)
519 PAGEFLAG(Workingset, workingset, PF_HEAD)
520 TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
521 PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
524 PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
525 TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
526 PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
527 PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
528 PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
529 TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
531 PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
532 __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
533 __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
534 PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
535 __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
536 __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
539 * Private page markings that may be used by the filesystem that owns the page
540 * for its own purposes.
541 * - PG_private and PG_private_2 cause release_folio() and co to be invoked
543 PAGEFLAG(Private, private, PF_ANY)
544 PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
545 PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
546 TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
549 * Only test-and-set exist for PG_writeback. The unconditional operators are
550 * risky: they bypass page accounting.
552 TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
553 TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
554 PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
556 /* PG_readahead is only used for reads; PG_reclaim is only for writes */
557 PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
558 TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
559 PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND)
560 TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND)
562 #ifdef CONFIG_HIGHMEM
564 * Must use a macro here due to header dependency issues. page_zone() is not
565 * available at this point.
567 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
568 #define folio_test_highmem(__f) is_highmem_idx(folio_zonenum(__f))
570 PAGEFLAG_FALSE(HighMem, highmem)
574 static __always_inline bool folio_test_swapcache(const struct folio *folio)
576 return folio_test_swapbacked(folio) &&
577 test_bit(PG_swapcache, const_folio_flags(folio, 0));
580 static __always_inline bool PageSwapCache(const struct page *page)
582 return folio_test_swapcache(page_folio(page));
585 SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
586 CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
588 PAGEFLAG_FALSE(SwapCache, swapcache)
591 PAGEFLAG(Unevictable, unevictable, PF_HEAD)
592 __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
593 TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
596 PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
597 __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
598 TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
600 PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked)
601 TESTSCFLAG_FALSE(Mlocked, mlocked)
604 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
605 PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
607 PAGEFLAG_FALSE(Uncached, uncached)
610 #ifdef CONFIG_MEMORY_FAILURE
611 PAGEFLAG(HWPoison, hwpoison, PF_ANY)
612 TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
613 #define __PG_HWPOISON (1UL << PG_hwpoison)
615 PAGEFLAG_FALSE(HWPoison, hwpoison)
616 #define __PG_HWPOISON 0
619 #ifdef CONFIG_PAGE_IDLE_FLAG
621 FOLIO_TEST_FLAG(young, FOLIO_HEAD_PAGE)
622 FOLIO_SET_FLAG(young, FOLIO_HEAD_PAGE)
623 FOLIO_TEST_CLEAR_FLAG(young, FOLIO_HEAD_PAGE)
624 FOLIO_FLAG(idle, FOLIO_HEAD_PAGE)
626 /* See page_idle.h for !64BIT workaround */
627 #else /* !CONFIG_PAGE_IDLE_FLAG */
628 FOLIO_FLAG_FALSE(young)
629 FOLIO_TEST_CLEAR_FLAG_FALSE(young)
630 FOLIO_FLAG_FALSE(idle)
634 * PageReported() is used to track reported free pages within the Buddy
635 * allocator. We can use the non-atomic version of the test and set
636 * operations as both should be shielded with the zone lock to prevent
637 * any possible races on the setting or clearing of the bit.
639 __PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
641 #ifdef CONFIG_MEMORY_HOTPLUG
642 PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY)
644 PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
648 * On an anonymous folio mapped into a user virtual memory area,
649 * folio->mapping points to its anon_vma, not to a struct address_space;
650 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
652 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
653 * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
654 * bit; and then folio->mapping points, not to an anon_vma, but to a private
655 * structure which KSM associates with that merged page. See ksm.h.
657 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
658 * page and then folio->mapping points to a struct movable_operations.
660 * Please note that, confusingly, "folio_mapping" refers to the inode
661 * address_space which maps the folio from disk; whereas "folio_mapped"
662 * refers to user virtual address space into which the folio is mapped.
664 * For slab pages, since slab reuses the bits in struct page to store its
665 * internal states, the folio->mapping does not exist as such, nor do
666 * these flags below. So in order to avoid testing non-existent bits,
667 * please make sure that folio_test_slab(folio) actually evaluates to
668 * false before calling the following functions (e.g., folio_test_anon).
671 #define PAGE_MAPPING_ANON 0x1
672 #define PAGE_MAPPING_MOVABLE 0x2
673 #define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
674 #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
677 * Different with flags above, this flag is used only for fsdax mode. It
678 * indicates that this page->mapping is now under reflink case.
680 #define PAGE_MAPPING_DAX_SHARED ((void *)0x1)
682 static __always_inline bool folio_mapping_flags(const struct folio *folio)
684 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0;
687 static __always_inline bool PageMappingFlags(const struct page *page)
689 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
692 static __always_inline bool folio_test_anon(const struct folio *folio)
694 return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0;
697 static __always_inline bool PageAnon(const struct page *page)
699 return folio_test_anon(page_folio(page));
702 static __always_inline bool __folio_test_movable(const struct folio *folio)
704 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
705 PAGE_MAPPING_MOVABLE;
708 static __always_inline bool __PageMovable(const struct page *page)
710 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
711 PAGE_MAPPING_MOVABLE;
716 * A KSM page is one of those write-protected "shared pages" or "merged pages"
717 * which KSM maps into multiple mms, wherever identical anonymous page content
718 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
719 * anon_vma, but to that page's node of the stable tree.
721 static __always_inline bool folio_test_ksm(const struct folio *folio)
723 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
727 static __always_inline bool PageKsm(const struct page *page)
729 return folio_test_ksm(page_folio(page));
732 TESTPAGEFLAG_FALSE(Ksm, ksm)
735 u64 stable_page_flags(const struct page *page);
738 * folio_xor_flags_has_waiters - Change some folio flags.
740 * @mask: Bits set in this word will be changed.
742 * This must only be used for flags which are changed with the folio
743 * lock held. For example, it is unsafe to use for PG_dirty as that
744 * can be set without the folio lock held. It can also only be used
745 * on flags which are in the range 0-6 as some of the implementations
746 * only affect those bits.
748 * Return: Whether there are tasks waiting on the folio.
750 static inline bool folio_xor_flags_has_waiters(struct folio *folio,
753 return xor_unlock_is_negative_byte(mask, folio_flags(folio, 0));
757 * folio_test_uptodate - Is this folio up to date?
760 * The uptodate flag is set on a folio when every byte in the folio is
761 * at least as new as the corresponding bytes on storage. Anonymous
762 * and CoW folios are always uptodate. If the folio is not uptodate,
763 * some of the bytes in it may be; see the is_partially_uptodate()
764 * address_space operation.
766 static inline bool folio_test_uptodate(const struct folio *folio)
768 bool ret = test_bit(PG_uptodate, const_folio_flags(folio, 0));
770 * Must ensure that the data we read out of the folio is loaded
771 * _after_ we've loaded folio->flags to check the uptodate bit.
772 * We can skip the barrier if the folio is not uptodate, because
773 * we wouldn't be reading anything from it.
775 * See folio_mark_uptodate() for the other side of the story.
783 static inline bool PageUptodate(const struct page *page)
785 return folio_test_uptodate(page_folio(page));
788 static __always_inline void __folio_mark_uptodate(struct folio *folio)
791 __set_bit(PG_uptodate, folio_flags(folio, 0));
794 static __always_inline void folio_mark_uptodate(struct folio *folio)
797 * Memory barrier must be issued before setting the PG_uptodate bit,
798 * so that all previous stores issued in order to bring the folio
799 * uptodate are actually visible before folio_test_uptodate becomes true.
802 set_bit(PG_uptodate, folio_flags(folio, 0));
805 static __always_inline void __SetPageUptodate(struct page *page)
807 __folio_mark_uptodate((struct folio *)page);
810 static __always_inline void SetPageUptodate(struct page *page)
812 folio_mark_uptodate((struct folio *)page);
815 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
817 void __folio_start_writeback(struct folio *folio, bool keep_write);
818 void set_page_writeback(struct page *page);
820 #define folio_start_writeback(folio) \
821 __folio_start_writeback(folio, false)
822 #define folio_start_writeback_keepwrite(folio) \
823 __folio_start_writeback(folio, true)
825 static __always_inline bool folio_test_head(const struct folio *folio)
827 return test_bit(PG_head, const_folio_flags(folio, FOLIO_PF_ANY));
830 static __always_inline int PageHead(const struct page *page)
832 PF_POISONED_CHECK(page);
833 return test_bit(PG_head, &page->flags) && !page_is_fake_head(page);
836 __SETPAGEFLAG(Head, head, PF_ANY)
837 __CLEARPAGEFLAG(Head, head, PF_ANY)
838 CLEARPAGEFLAG(Head, head, PF_ANY)
841 * folio_test_large() - Does this folio contain more than one page?
842 * @folio: The folio to test.
844 * Return: True if the folio is larger than one page.
846 static inline bool folio_test_large(const struct folio *folio)
848 return folio_test_head(folio);
851 static __always_inline void set_compound_head(struct page *page, struct page *head)
853 WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
856 static __always_inline void clear_compound_head(struct page *page)
858 WRITE_ONCE(page->compound_head, 0);
861 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
862 static inline void ClearPageCompound(struct page *page)
864 BUG_ON(!PageHead(page));
867 FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE)
869 FOLIO_FLAG_FALSE(large_rmappable)
872 #define PG_head_mask ((1UL << PG_head))
874 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
876 * PageHuge() only returns true for hugetlbfs pages, but not for
877 * normal or transparent huge pages.
879 * PageTransHuge() returns true for both transparent huge and
880 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
881 * called only in the core VM paths where hugetlbfs pages can't exist.
883 static inline int PageTransHuge(const struct page *page)
885 VM_BUG_ON_PAGE(PageTail(page), page);
886 return PageHead(page);
890 * PageTransCompound returns true for both transparent huge pages
891 * and hugetlbfs pages, so it should only be called when it's known
892 * that hugetlbfs pages aren't involved.
894 static inline int PageTransCompound(const struct page *page)
896 return PageCompound(page);
900 * PageTransTail returns true for both transparent huge pages
901 * and hugetlbfs pages, so it should only be called when it's known
902 * that hugetlbfs pages aren't involved.
904 static inline int PageTransTail(const struct page *page)
906 return PageTail(page);
909 TESTPAGEFLAG_FALSE(TransHuge, transhuge)
910 TESTPAGEFLAG_FALSE(TransCompound, transcompound)
911 TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap)
912 TESTPAGEFLAG_FALSE(TransTail, transtail)
915 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
917 * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the
920 * This flag is set by hwpoison handler. Cleared by THP split or free page.
922 PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
923 TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
925 PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
926 TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
930 * For pages that are never mapped to userspace,
931 * page_type may be used. Because it is initialised to -1, we invert the
932 * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
933 * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and
934 * low bits so that an underflow or overflow of _mapcount won't be
935 * mistaken for a page type value.
939 PG_buddy = 0x40000000,
940 PG_offline = 0x20000000,
941 PG_table = 0x10000000,
942 PG_guard = 0x08000000,
943 PG_hugetlb = 0x04000000,
944 PG_slab = 0x02000000,
945 PG_zsmalloc = 0x01000000,
947 PAGE_TYPE_BASE = 0x80000000,
950 * Reserve 0xffff0000 - 0xfffffffe to catch _mapcount underflows and
951 * allow owners that set a type to reuse the lower 16 bit for their own
954 PAGE_MAPCOUNT_RESERVE = ~0x0000ffff,
957 #define PageType(page, flag) \
958 ((READ_ONCE(page->page_type) & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
959 #define folio_test_type(folio, flag) \
960 ((READ_ONCE(folio->page.page_type) & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
962 static inline int page_type_has_type(unsigned int page_type)
964 return (int)page_type < PAGE_MAPCOUNT_RESERVE;
967 static inline int page_has_type(const struct page *page)
969 return page_type_has_type(READ_ONCE(page->page_type));
972 #define FOLIO_TYPE_OPS(lname, fname) \
973 static __always_inline bool folio_test_##fname(const struct folio *folio)\
975 return folio_test_type(folio, PG_##lname); \
977 static __always_inline void __folio_set_##fname(struct folio *folio) \
979 VM_BUG_ON_FOLIO(!folio_test_type(folio, 0), folio); \
980 folio->page.page_type &= ~PG_##lname; \
982 static __always_inline void __folio_clear_##fname(struct folio *folio) \
984 VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \
985 folio->page.page_type |= PG_##lname; \
988 #define PAGE_TYPE_OPS(uname, lname, fname) \
989 FOLIO_TYPE_OPS(lname, fname) \
990 static __always_inline int Page##uname(const struct page *page) \
992 return PageType(page, PG_##lname); \
994 static __always_inline void __SetPage##uname(struct page *page) \
996 VM_BUG_ON_PAGE(!PageType(page, 0), page); \
997 page->page_type &= ~PG_##lname; \
999 static __always_inline void __ClearPage##uname(struct page *page) \
1001 VM_BUG_ON_PAGE(!Page##uname(page), page); \
1002 page->page_type |= PG_##lname; \
1006 * PageBuddy() indicates that the page is free and in the buddy system
1007 * (see mm/page_alloc.c).
1009 PAGE_TYPE_OPS(Buddy, buddy, buddy)
1012 * PageOffline() indicates that the page is logically offline although the
1013 * containing section is online. (e.g. inflated in a balloon driver or
1014 * not onlined when onlining the section).
1015 * The content of these pages is effectively stale. Such pages should not
1016 * be touched (read/write/dump/save) except by their owner.
1018 * When a memory block gets onlined, all pages are initialized with a
1019 * refcount of 1 and PageOffline(). generic_online_page() will
1020 * take care of clearing PageOffline().
1022 * If a driver wants to allow to offline unmovable PageOffline() pages without
1023 * putting them back to the buddy, it can do so via the memory notifier by
1024 * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
1025 * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
1026 * pages (now with a reference count of zero) are treated like free (unmanaged)
1027 * pages, allowing the containing memory block to get offlined. A driver that
1028 * relies on this feature is aware that re-onlining the memory block will
1029 * require not giving them to the buddy via generic_online_page().
1031 * Memory offlining code will not adjust the managed page count for any
1032 * PageOffline() pages, treating them like they were never exposed to the
1033 * buddy using generic_online_page().
1035 * There are drivers that mark a page PageOffline() and expect there won't be
1036 * any further access to page content. PFN walkers that read content of random
1037 * pages should check PageOffline() and synchronize with such drivers using
1038 * page_offline_freeze()/page_offline_thaw().
1040 PAGE_TYPE_OPS(Offline, offline, offline)
1042 extern void page_offline_freeze(void);
1043 extern void page_offline_thaw(void);
1044 extern void page_offline_begin(void);
1045 extern void page_offline_end(void);
1048 * Marks pages in use as page tables.
1050 PAGE_TYPE_OPS(Table, table, pgtable)
1053 * Marks guardpages used with debug_pagealloc.
1055 PAGE_TYPE_OPS(Guard, guard, guard)
1057 FOLIO_TYPE_OPS(slab, slab)
1060 * PageSlab - Determine if the page belongs to the slab allocator
1061 * @page: The page to test.
1063 * Context: Any context.
1064 * Return: True for slab pages, false for any other kind of page.
1066 static inline bool PageSlab(const struct page *page)
1068 return folio_test_slab(page_folio(page));
1071 #ifdef CONFIG_HUGETLB_PAGE
1072 FOLIO_TYPE_OPS(hugetlb, hugetlb)
1074 FOLIO_TEST_FLAG_FALSE(hugetlb)
1077 PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc)
1080 * PageHuge - Determine if the page belongs to hugetlbfs
1081 * @page: The page to test.
1083 * Context: Any context.
1084 * Return: True for hugetlbfs pages, false for anon pages or pages
1085 * belonging to other filesystems.
1087 static inline bool PageHuge(const struct page *page)
1089 return folio_test_hugetlb(page_folio(page));
1093 * Check if a page is currently marked HWPoisoned. Note that this check is
1094 * best effort only and inherently racy: there is no way to synchronize with
1097 static inline bool is_page_hwpoison(const struct page *page)
1099 const struct folio *folio;
1101 if (PageHWPoison(page))
1103 folio = page_folio(page);
1104 return folio_test_hugetlb(folio) && PageHWPoison(&folio->page);
1107 bool is_free_buddy_page(const struct page *page);
1109 PAGEFLAG(Isolated, isolated, PF_ANY);
1111 static __always_inline int PageAnonExclusive(const struct page *page)
1113 VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
1115 * HugeTLB stores this information on the head page; THP keeps it per
1119 page = compound_head(page);
1120 return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1123 static __always_inline void SetPageAnonExclusive(struct page *page)
1125 VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
1126 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1127 set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1130 static __always_inline void ClearPageAnonExclusive(struct page *page)
1132 VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
1133 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1134 clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1137 static __always_inline void __ClearPageAnonExclusive(struct page *page)
1139 VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
1140 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1141 __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1145 #define __PG_MLOCKED (1UL << PG_mlocked)
1147 #define __PG_MLOCKED 0
1151 * Flags checked when a page is freed. Pages being freed should not have
1152 * these flags set. If they are, there is a problem.
1154 #define PAGE_FLAGS_CHECK_AT_FREE \
1155 (1UL << PG_lru | 1UL << PG_locked | \
1156 1UL << PG_private | 1UL << PG_private_2 | \
1157 1UL << PG_writeback | 1UL << PG_reserved | \
1158 1UL << PG_active | \
1159 1UL << PG_unevictable | __PG_MLOCKED | LRU_GEN_MASK)
1162 * Flags checked when a page is prepped for return by the page allocator.
1163 * Pages being prepped should not have these flags set. If they are set,
1164 * there has been a kernel bug or struct page corruption.
1166 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
1167 * alloc-free cycle to prevent from reusing the page.
1169 #define PAGE_FLAGS_CHECK_AT_PREP \
1170 ((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK)
1173 * Flags stored in the second page of a compound page. They may overlap
1174 * the CHECK_AT_FREE flags above, so need to be cleared.
1176 #define PAGE_FLAGS_SECOND \
1177 (0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \
1178 1UL << PG_large_rmappable)
1180 #define PAGE_FLAGS_PRIVATE \
1181 (1UL << PG_private | 1UL << PG_private_2)
1183 * page_has_private - Determine if page has private stuff
1184 * @page: The page to be checked
1186 * Determine if a page has private stuff, indicating that release routines
1187 * should be invoked upon it.
1189 static inline int page_has_private(const struct page *page)
1191 return !!(page->flags & PAGE_FLAGS_PRIVATE);
1194 static inline bool folio_has_private(const struct folio *folio)
1196 return page_has_private(&folio->page);
1202 #undef PF_NO_COMPOUND
1204 #endif /* !__GENERATING_BOUNDS_H */
1206 #endif /* PAGE_FLAGS_H */