1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MM_TYPES_TASK_H
3 #define _LINUX_MM_TYPES_TASK_H
6 * Here are the definitions of the MM data types that are embedded in 'struct task_struct'.
8 * (These are defined separately to decouple sched.h from mm_types.h as much as possible.)
11 #include <linux/align.h>
12 #include <linux/types.h>
16 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
17 #include <asm/tlbbatch.h>
20 #define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
23 * When updating this, please also update struct resident_page_types[] in
27 MM_FILEPAGES, /* Resident file mapping pages */
28 MM_ANONPAGES, /* Resident anonymous pages */
29 MM_SWAPENTS, /* Anonymous swap entries */
30 MM_SHMEMPAGES, /* Resident shared memory pages */
38 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
47 #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
48 #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
49 struct page_frag_cache {
50 /* encoded_page consists of the virtual address, pfmemalloc bit and
53 unsigned long encoded_page;
55 /* we maintain a pagecount bias, so that we dont dirty cache line
56 * containing page->_refcount every time we allocate a fragment.
58 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) && (BITS_PER_LONG <= 32)
67 /* Track pages that require TLB flushes */
68 struct tlbflush_unmap_batch {
69 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
71 * The arch code makes the following promise: generic code can modify a
72 * PTE, then call arch_tlbbatch_add_pending() (which internally provides
73 * all needed barriers), then call arch_tlbbatch_flush(), and the entries
74 * will be flushed on all CPUs by the time that arch_tlbbatch_flush()
77 struct arch_tlbflush_unmap_batch arch;
79 /* True if a flush is needed. */
83 * If true then the PTE was dirty when unmapped. The entry must be
84 * flushed before IO is initiated or a stale TLB entry potentially
85 * allows an update without redirtying the page.
91 #endif /* _LINUX_MM_TYPES_TASK_H */