]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* internal.h: mm/ internal definitions |
2 | * | |
3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells ([email protected]) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
0f8053a5 NP |
11 | #ifndef __MM_INTERNAL_H |
12 | #define __MM_INTERNAL_H | |
13 | ||
14 | #include <linux/mm.h> | |
1da177e4 | 15 | |
42b77728 JB |
16 | void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, |
17 | unsigned long floor, unsigned long ceiling); | |
18 | ||
7835e98b | 19 | static inline void set_page_count(struct page *page, int v) |
77a8a788 | 20 | { |
7835e98b NP |
21 | atomic_set(&page->_count, v); |
22 | } | |
23 | ||
24 | /* | |
25 | * Turn a non-refcounted page (->_count == 0) into refcounted with | |
26 | * a count of one. | |
27 | */ | |
28 | static inline void set_page_refcounted(struct page *page) | |
29 | { | |
ae1276b9 | 30 | VM_BUG_ON(PageTail(page)); |
725d704e | 31 | VM_BUG_ON(atomic_read(&page->_count)); |
77a8a788 | 32 | set_page_count(page, 1); |
77a8a788 NP |
33 | } |
34 | ||
0f8053a5 NP |
35 | static inline void __put_page(struct page *page) |
36 | { | |
37 | atomic_dec(&page->_count); | |
38 | } | |
39 | ||
70b50f94 AA |
40 | static inline void __get_page_tail_foll(struct page *page, |
41 | bool get_page_head) | |
42 | { | |
43 | /* | |
44 | * If we're getting a tail page, the elevated page->_count is | |
45 | * required only in the head page and we will elevate the head | |
46 | * page->_count and tail page->_mapcount. | |
47 | * | |
48 | * We elevate page_tail->_mapcount for tail pages to force | |
49 | * page_tail->_count to be zero at all times to avoid getting | |
50 | * false positives from get_page_unless_zero() with | |
51 | * speculative page access (like in | |
52 | * page_cache_get_speculative()) on tail pages. | |
53 | */ | |
54 | VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0); | |
55 | VM_BUG_ON(atomic_read(&page->_count) != 0); | |
56 | VM_BUG_ON(page_mapcount(page) < 0); | |
57 | if (get_page_head) | |
58 | atomic_inc(&page->first_page->_count); | |
59 | atomic_inc(&page->_mapcount); | |
60 | } | |
61 | ||
62 | /* | |
63 | * This is meant to be called as the FOLL_GET operation of | |
64 | * follow_page() and it must be called while holding the proper PT | |
65 | * lock while the pte (or pmd_trans_huge) is still mapping the page. | |
66 | */ | |
67 | static inline void get_page_foll(struct page *page) | |
68 | { | |
69 | if (unlikely(PageTail(page))) | |
70 | /* | |
71 | * This is safe only because | |
72 | * __split_huge_page_refcount() can't run under | |
73 | * get_page_foll() because we hold the proper PT lock. | |
74 | */ | |
75 | __get_page_tail_foll(page, true); | |
76 | else { | |
77 | /* | |
78 | * Getting a normal page or the head of a compound page | |
79 | * requires to already have an elevated page->_count. | |
80 | */ | |
81 | VM_BUG_ON(atomic_read(&page->_count) <= 0); | |
82 | atomic_inc(&page->_count); | |
83 | } | |
84 | } | |
85 | ||
03f6462a HD |
86 | extern unsigned long highest_memmap_pfn; |
87 | ||
894bc310 LS |
88 | /* |
89 | * in mm/vmscan.c: | |
90 | */ | |
62695a84 | 91 | extern int isolate_lru_page(struct page *page); |
894bc310 | 92 | extern void putback_lru_page(struct page *page); |
62695a84 | 93 | |
894bc310 LS |
94 | /* |
95 | * in mm/page_alloc.c | |
96 | */ | |
0c0a4a51 | 97 | extern void __free_pages_bootmem(struct page *page, unsigned int order); |
20a0307c | 98 | extern void prep_compound_page(struct page *page, unsigned long order); |
8d22ba1b WF |
99 | #ifdef CONFIG_MEMORY_FAILURE |
100 | extern bool is_free_buddy_page(struct page *page); | |
101 | #endif | |
20a0307c | 102 | |
ff9543fd MN |
103 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA |
104 | ||
105 | /* | |
106 | * in mm/compaction.c | |
107 | */ | |
108 | /* | |
109 | * compact_control is used to track pages being migrated and the free pages | |
110 | * they are being migrated to during memory compaction. The free_pfn starts | |
111 | * at the end of a zone and migrate_pfn begins at the start. Movable pages | |
112 | * are moved to the end of a zone during a compaction run and the run | |
113 | * completes when free_pfn <= migrate_pfn | |
114 | */ | |
115 | struct compact_control { | |
116 | struct list_head freepages; /* List of free pages to migrate to */ | |
117 | struct list_head migratepages; /* List of pages being migrated */ | |
118 | unsigned long nr_freepages; /* Number of isolated free pages */ | |
119 | unsigned long nr_migratepages; /* Number of pages to migrate */ | |
120 | unsigned long free_pfn; /* isolate_freepages search base */ | |
121 | unsigned long migrate_pfn; /* isolate_migratepages search base */ | |
68e3e926 | 122 | bool sync; /* Synchronous migration */ |
ff9543fd MN |
123 | |
124 | int order; /* order a direct compactor needs */ | |
125 | int migratetype; /* MOVABLE, RECLAIMABLE etc */ | |
126 | struct zone *zone; | |
127 | }; | |
128 | ||
129 | unsigned long | |
130 | isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn); | |
131 | unsigned long | |
132 | isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |
133 | unsigned long low_pfn, unsigned long end_pfn); | |
134 | ||
135 | #endif | |
0f8053a5 | 136 | |
48f13bf3 MG |
137 | /* |
138 | * function for dealing with page's order in buddy system. | |
139 | * zone->lock is already acquired when we use these. | |
140 | * So, we don't need atomic page->flags operations here. | |
141 | */ | |
142 | static inline unsigned long page_order(struct page *page) | |
143 | { | |
572438f9 | 144 | /* PageBuddy() must be checked by the caller */ |
48f13bf3 MG |
145 | return page_private(page); |
146 | } | |
b5a0e011 | 147 | |
6038def0 NK |
148 | /* mm/util.c */ |
149 | void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, | |
150 | struct vm_area_struct *prev, struct rb_node *rb_parent); | |
151 | ||
af8e3354 HD |
152 | #ifdef CONFIG_MMU |
153 | extern long mlock_vma_pages_range(struct vm_area_struct *vma, | |
154 | unsigned long start, unsigned long end); | |
155 | extern void munlock_vma_pages_range(struct vm_area_struct *vma, | |
156 | unsigned long start, unsigned long end); | |
157 | static inline void munlock_vma_pages_all(struct vm_area_struct *vma) | |
158 | { | |
159 | munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); | |
160 | } | |
161 | ||
b291f000 NP |
162 | /* |
163 | * Called only in fault path via page_evictable() for a new page | |
164 | * to determine if it's being mapped into a LOCKED vma. | |
165 | * If so, mark page as mlocked. | |
166 | */ | |
096a7cf4 YH |
167 | static inline int mlocked_vma_newpage(struct vm_area_struct *vma, |
168 | struct page *page) | |
b291f000 NP |
169 | { |
170 | VM_BUG_ON(PageLRU(page)); | |
171 | ||
172 | if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) | |
173 | return 0; | |
174 | ||
5344b7e6 NP |
175 | if (!TestSetPageMlocked(page)) { |
176 | inc_zone_page_state(page, NR_MLOCK); | |
177 | count_vm_event(UNEVICTABLE_PGMLOCKED); | |
178 | } | |
b291f000 NP |
179 | return 1; |
180 | } | |
181 | ||
182 | /* | |
73848b46 | 183 | * must be called with vma's mmap_sem held for read or write, and page locked. |
b291f000 NP |
184 | */ |
185 | extern void mlock_vma_page(struct page *page); | |
73848b46 | 186 | extern void munlock_vma_page(struct page *page); |
b291f000 NP |
187 | |
188 | /* | |
189 | * Clear the page's PageMlocked(). This can be useful in a situation where | |
190 | * we want to unconditionally remove a page from the pagecache -- e.g., | |
191 | * on truncation or freeing. | |
192 | * | |
193 | * It is legal to call this function for any page, mlocked or not. | |
194 | * If called for a page that is still mapped by mlocked vmas, all we do | |
195 | * is revert to lazy LRU behaviour -- semantics are not broken. | |
196 | */ | |
197 | extern void __clear_page_mlock(struct page *page); | |
198 | static inline void clear_page_mlock(struct page *page) | |
199 | { | |
200 | if (unlikely(TestClearPageMlocked(page))) | |
201 | __clear_page_mlock(page); | |
202 | } | |
203 | ||
204 | /* | |
205 | * mlock_migrate_page - called only from migrate_page_copy() to | |
5344b7e6 | 206 | * migrate the Mlocked page flag; update statistics. |
b291f000 NP |
207 | */ |
208 | static inline void mlock_migrate_page(struct page *newpage, struct page *page) | |
209 | { | |
5344b7e6 NP |
210 | if (TestClearPageMlocked(page)) { |
211 | unsigned long flags; | |
212 | ||
213 | local_irq_save(flags); | |
214 | __dec_zone_page_state(page, NR_MLOCK); | |
b291f000 | 215 | SetPageMlocked(newpage); |
5344b7e6 NP |
216 | __inc_zone_page_state(newpage, NR_MLOCK); |
217 | local_irq_restore(flags); | |
218 | } | |
b291f000 NP |
219 | } |
220 | ||
71e3aac0 AA |
221 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
222 | extern unsigned long vma_address(struct page *page, | |
223 | struct vm_area_struct *vma); | |
224 | #endif | |
af8e3354 | 225 | #else /* !CONFIG_MMU */ |
096a7cf4 | 226 | static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p) |
b291f000 NP |
227 | { |
228 | return 0; | |
229 | } | |
230 | static inline void clear_page_mlock(struct page *page) { } | |
231 | static inline void mlock_vma_page(struct page *page) { } | |
232 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } | |
233 | ||
af8e3354 | 234 | #endif /* !CONFIG_MMU */ |
894bc310 | 235 | |
69d177c2 AW |
236 | /* |
237 | * Return the mem_map entry representing the 'offset' subpage within | |
238 | * the maximally aligned gigantic page 'base'. Handle any discontiguity | |
239 | * in the mem_map at MAX_ORDER_NR_PAGES boundaries. | |
240 | */ | |
241 | static inline struct page *mem_map_offset(struct page *base, int offset) | |
242 | { | |
243 | if (unlikely(offset >= MAX_ORDER_NR_PAGES)) | |
244 | return pfn_to_page(page_to_pfn(base) + offset); | |
245 | return base + offset; | |
246 | } | |
247 | ||
248 | /* | |
25985edc | 249 | * Iterator over all subpages within the maximally aligned gigantic |
69d177c2 AW |
250 | * page 'base'. Handle any discontiguity in the mem_map. |
251 | */ | |
252 | static inline struct page *mem_map_next(struct page *iter, | |
253 | struct page *base, int offset) | |
254 | { | |
255 | if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) { | |
256 | unsigned long pfn = page_to_pfn(base) + offset; | |
257 | if (!pfn_valid(pfn)) | |
258 | return NULL; | |
259 | return pfn_to_page(pfn); | |
260 | } | |
261 | return iter + 1; | |
262 | } | |
263 | ||
b5a0e011 AH |
264 | /* |
265 | * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node, | |
266 | * so all functions starting at paging_init should be marked __init | |
267 | * in those cases. SPARSEMEM, however, allows for memory hotplug, | |
268 | * and alloc_bootmem_node is not used. | |
269 | */ | |
270 | #ifdef CONFIG_SPARSEMEM | |
271 | #define __paginginit __meminit | |
272 | #else | |
273 | #define __paginginit __init | |
274 | #endif | |
275 | ||
6b74ab97 MG |
276 | /* Memory initialisation debug and verification */ |
277 | enum mminit_level { | |
278 | MMINIT_WARNING, | |
279 | MMINIT_VERIFY, | |
280 | MMINIT_TRACE | |
281 | }; | |
282 | ||
283 | #ifdef CONFIG_DEBUG_MEMORY_INIT | |
284 | ||
285 | extern int mminit_loglevel; | |
286 | ||
287 | #define mminit_dprintk(level, prefix, fmt, arg...) \ | |
288 | do { \ | |
289 | if (level < mminit_loglevel) { \ | |
290 | printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \ | |
291 | printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \ | |
292 | } \ | |
293 | } while (0) | |
294 | ||
708614e6 MG |
295 | extern void mminit_verify_pageflags_layout(void); |
296 | extern void mminit_verify_page_links(struct page *page, | |
297 | enum zone_type zone, unsigned long nid, unsigned long pfn); | |
68ad8df4 | 298 | extern void mminit_verify_zonelist(void); |
708614e6 | 299 | |
6b74ab97 MG |
300 | #else |
301 | ||
302 | static inline void mminit_dprintk(enum mminit_level level, | |
303 | const char *prefix, const char *fmt, ...) | |
304 | { | |
305 | } | |
306 | ||
708614e6 MG |
307 | static inline void mminit_verify_pageflags_layout(void) |
308 | { | |
309 | } | |
310 | ||
311 | static inline void mminit_verify_page_links(struct page *page, | |
312 | enum zone_type zone, unsigned long nid, unsigned long pfn) | |
313 | { | |
314 | } | |
68ad8df4 MG |
315 | |
316 | static inline void mminit_verify_zonelist(void) | |
317 | { | |
318 | } | |
6b74ab97 | 319 | #endif /* CONFIG_DEBUG_MEMORY_INIT */ |
2dbb51c4 MG |
320 | |
321 | /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */ | |
322 | #if defined(CONFIG_SPARSEMEM) | |
323 | extern void mminit_validate_memmodel_limits(unsigned long *start_pfn, | |
324 | unsigned long *end_pfn); | |
325 | #else | |
326 | static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, | |
327 | unsigned long *end_pfn) | |
328 | { | |
329 | } | |
330 | #endif /* CONFIG_SPARSEMEM */ | |
331 | ||
fa5e084e MG |
332 | #define ZONE_RECLAIM_NOSCAN -2 |
333 | #define ZONE_RECLAIM_FULL -1 | |
334 | #define ZONE_RECLAIM_SOME 0 | |
335 | #define ZONE_RECLAIM_SUCCESS 1 | |
0f8053a5 | 336 | #endif |
7c116f2b | 337 | |
31d3d348 WF |
338 | extern int hwpoison_filter(struct page *p); |
339 | ||
7c116f2b WF |
340 | extern u32 hwpoison_filter_dev_major; |
341 | extern u32 hwpoison_filter_dev_minor; | |
478c5ffc WF |
342 | extern u64 hwpoison_filter_flags_mask; |
343 | extern u64 hwpoison_filter_flags_value; | |
4fd466eb | 344 | extern u64 hwpoison_filter_memcg; |
1bfe5feb | 345 | extern u32 hwpoison_filter_enable; |
eb36c587 AV |
346 | |
347 | extern unsigned long vm_mmap_pgoff(struct file *, unsigned long, | |
348 | unsigned long, unsigned long, | |
349 | unsigned long, unsigned long); |