]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* internal.h: mm/ internal definitions |
2 | * | |
3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells ([email protected]) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
0f8053a5 NP |
11 | #ifndef __MM_INTERNAL_H |
12 | #define __MM_INTERNAL_H | |
13 | ||
29f175d1 | 14 | #include <linux/fs.h> |
0f8053a5 | 15 | #include <linux/mm.h> |
e9b61f19 | 16 | #include <linux/pagemap.h> |
edf14cdb | 17 | #include <linux/tracepoint-defs.h> |
1da177e4 | 18 | |
dd56b046 MG |
19 | /* |
20 | * The set of flags that only affect watermark checking and reclaim | |
21 | * behaviour. This is used by the MM to obey the caller constraints | |
22 | * about IO, FS and watermark checking while ignoring placement | |
23 | * hints such as HIGHMEM usage. | |
24 | */ | |
25 | #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\ | |
dcda9b04 | 26 | __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\ |
e838a45f MG |
27 | __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\ |
28 | __GFP_ATOMIC) | |
dd56b046 MG |
29 | |
30 | /* The GFP flags allowed during early boot */ | |
31 | #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS)) | |
32 | ||
33 | /* Control allocation cpuset and node placement constraints */ | |
34 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) | |
35 | ||
36 | /* Do not use these with a slab allocator */ | |
37 | #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) | |
38 | ||
62906027 NP |
39 | void page_writeback_init(void); |
40 | ||
2b740303 | 41 | vm_fault_t do_swap_page(struct vm_fault *vmf); |
8a966ed7 | 42 | |
42b77728 JB |
43 | void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, |
44 | unsigned long floor, unsigned long ceiling); | |
45 | ||
23519073 KS |
46 | static inline bool can_madv_dontneed_vma(struct vm_area_struct *vma) |
47 | { | |
48 | return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)); | |
49 | } | |
50 | ||
aac45363 MH |
51 | void unmap_page_range(struct mmu_gather *tlb, |
52 | struct vm_area_struct *vma, | |
53 | unsigned long addr, unsigned long end, | |
54 | struct zap_details *details); | |
55 | ||
c534aa3f | 56 | extern unsigned int __do_page_cache_readahead(struct address_space *mapping, |
29f175d1 FF |
57 | struct file *filp, pgoff_t offset, unsigned long nr_to_read, |
58 | unsigned long lookahead_size); | |
59 | ||
60 | /* | |
61 | * Submit IO for the read-ahead request in file_ra_state. | |
62 | */ | |
63 | static inline unsigned long ra_submit(struct file_ra_state *ra, | |
64 | struct address_space *mapping, struct file *filp) | |
65 | { | |
66 | return __do_page_cache_readahead(mapping, filp, | |
67 | ra->start, ra->size, ra->async_size); | |
68 | } | |
69 | ||
7835e98b | 70 | /* |
0139aa7b | 71 | * Turn a non-refcounted page (->_refcount == 0) into refcounted with |
7835e98b NP |
72 | * a count of one. |
73 | */ | |
74 | static inline void set_page_refcounted(struct page *page) | |
75 | { | |
309381fe | 76 | VM_BUG_ON_PAGE(PageTail(page), page); |
fe896d18 | 77 | VM_BUG_ON_PAGE(page_ref_count(page), page); |
77a8a788 | 78 | set_page_count(page, 1); |
77a8a788 NP |
79 | } |
80 | ||
03f6462a HD |
81 | extern unsigned long highest_memmap_pfn; |
82 | ||
c73322d0 JW |
83 | /* |
84 | * Maximum number of reclaim retries without progress before the OOM | |
85 | * killer is consider the only way forward. | |
86 | */ | |
87 | #define MAX_RECLAIM_RETRIES 16 | |
88 | ||
894bc310 LS |
89 | /* |
90 | * in mm/vmscan.c: | |
91 | */ | |
62695a84 | 92 | extern int isolate_lru_page(struct page *page); |
894bc310 | 93 | extern void putback_lru_page(struct page *page); |
62695a84 | 94 | |
6219049a BL |
95 | /* |
96 | * in mm/rmap.c: | |
97 | */ | |
98 | extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); | |
99 | ||
894bc310 LS |
100 | /* |
101 | * in mm/page_alloc.c | |
102 | */ | |
3c605096 | 103 | |
1a6d53a1 VB |
104 | /* |
105 | * Structure for holding the mostly immutable allocation parameters passed | |
106 | * between functions involved in allocations, including the alloc_pages* | |
107 | * family of functions. | |
108 | * | |
109 | * nodemask, migratetype and high_zoneidx are initialized only once in | |
110 | * __alloc_pages_nodemask() and then never change. | |
111 | * | |
112 | * zonelist, preferred_zone and classzone_idx are set first in | |
113 | * __alloc_pages_nodemask() for the fast path, and might be later changed | |
114 | * in __alloc_pages_slowpath(). All other functions pass the whole strucure | |
115 | * by a const pointer. | |
116 | */ | |
117 | struct alloc_context { | |
118 | struct zonelist *zonelist; | |
119 | nodemask_t *nodemask; | |
c33d6c06 | 120 | struct zoneref *preferred_zoneref; |
1a6d53a1 VB |
121 | int migratetype; |
122 | enum zone_type high_zoneidx; | |
c9ab0c4f | 123 | bool spread_dirty_pages; |
1a6d53a1 VB |
124 | }; |
125 | ||
93ea9964 MG |
126 | #define ac_classzone_idx(ac) zonelist_zone_idx(ac->preferred_zoneref) |
127 | ||
3c605096 JK |
128 | /* |
129 | * Locate the struct page for both the matching buddy in our | |
130 | * pair (buddy1) and the combined O(n+1) page they form (page). | |
131 | * | |
132 | * 1) Any buddy B1 will have an order O twin B2 which satisfies | |
133 | * the following equation: | |
134 | * B2 = B1 ^ (1 << O) | |
135 | * For example, if the starting buddy (buddy2) is #8 its order | |
136 | * 1 buddy is #10: | |
137 | * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 | |
138 | * | |
139 | * 2) Any buddy B will have an order O+1 parent P which | |
140 | * satisfies the following equation: | |
141 | * P = B & ~(1 << O) | |
142 | * | |
143 | * Assumption: *_mem_map is contiguous at least up to MAX_ORDER | |
144 | */ | |
145 | static inline unsigned long | |
76741e77 | 146 | __find_buddy_pfn(unsigned long page_pfn, unsigned int order) |
3c605096 | 147 | { |
76741e77 | 148 | return page_pfn ^ (1 << order); |
3c605096 JK |
149 | } |
150 | ||
7cf91a98 JK |
151 | extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn, |
152 | unsigned long end_pfn, struct zone *zone); | |
153 | ||
154 | static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn, | |
155 | unsigned long end_pfn, struct zone *zone) | |
156 | { | |
157 | if (zone->contiguous) | |
158 | return pfn_to_page(start_pfn); | |
159 | ||
160 | return __pageblock_pfn_to_page(start_pfn, end_pfn, zone); | |
161 | } | |
162 | ||
3c605096 | 163 | extern int __isolate_free_page(struct page *page, unsigned int order); |
7c2ee349 | 164 | extern void memblock_free_pages(struct page *page, unsigned long pfn, |
d70ddd7a | 165 | unsigned int order); |
a9cd410a | 166 | extern void __free_pages_core(struct page *page, unsigned int order); |
d00181b9 | 167 | extern void prep_compound_page(struct page *page, unsigned int order); |
46f24fd8 JK |
168 | extern void post_alloc_hook(struct page *page, unsigned int order, |
169 | gfp_t gfp_flags); | |
42aa83cb | 170 | extern int user_min_free_kbytes; |
20a0307c | 171 | |
ff9543fd MN |
172 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA |
173 | ||
174 | /* | |
175 | * in mm/compaction.c | |
176 | */ | |
177 | /* | |
178 | * compact_control is used to track pages being migrated and the free pages | |
179 | * they are being migrated to during memory compaction. The free_pfn starts | |
180 | * at the end of a zone and migrate_pfn begins at the start. Movable pages | |
181 | * are moved to the end of a zone during a compaction run and the run | |
182 | * completes when free_pfn <= migrate_pfn | |
183 | */ | |
184 | struct compact_control { | |
185 | struct list_head freepages; /* List of free pages to migrate to */ | |
186 | struct list_head migratepages; /* List of pages being migrated */ | |
c5fbd937 MG |
187 | unsigned int nr_freepages; /* Number of isolated free pages */ |
188 | unsigned int nr_migratepages; /* Number of pages to migrate */ | |
ff9543fd MN |
189 | unsigned long free_pfn; /* isolate_freepages search base */ |
190 | unsigned long migrate_pfn; /* isolate_migratepages search base */ | |
70b44595 | 191 | unsigned long fast_start_pfn; /* a pfn to start linear scan from */ |
c5943b9c MG |
192 | struct zone *zone; |
193 | unsigned long total_migrate_scanned; | |
194 | unsigned long total_free_scanned; | |
70b44595 | 195 | unsigned int fast_search_fail; /* failures to use free list searches */ |
f25ba6dc VB |
196 | const gfp_t gfp_mask; /* gfp mask of a direct compactor */ |
197 | int order; /* order a direct compactor needs */ | |
d39773a0 | 198 | int migratetype; /* migratetype of direct compactor */ |
f25ba6dc VB |
199 | const unsigned int alloc_flags; /* alloc flags of a direct compactor */ |
200 | const int classzone_idx; /* zone index of a direct compactor */ | |
e0b9daeb | 201 | enum migrate_mode mode; /* Async or sync migration mode */ |
bb13ffeb | 202 | bool ignore_skip_hint; /* Scan blocks even if marked skip */ |
2583d671 | 203 | bool no_set_skip_hint; /* Don't mark blocks for skipping */ |
9f7e3387 | 204 | bool ignore_block_suitable; /* Scan blocks considered unsuitable */ |
accf6242 | 205 | bool direct_compaction; /* False from kcompactd or /proc/... */ |
06ed2998 | 206 | bool whole_zone; /* Whole zone should/has been scanned */ |
c3486f53 | 207 | bool contended; /* Signal lock or sched contention */ |
804d3121 | 208 | bool rescan; /* Rescanning the same pageblock */ |
ff9543fd MN |
209 | }; |
210 | ||
211 | unsigned long | |
bb13ffeb MG |
212 | isolate_freepages_range(struct compact_control *cc, |
213 | unsigned long start_pfn, unsigned long end_pfn); | |
ff9543fd | 214 | unsigned long |
edc2ca61 VB |
215 | isolate_migratepages_range(struct compact_control *cc, |
216 | unsigned long low_pfn, unsigned long end_pfn); | |
2149cdae JK |
217 | int find_suitable_fallback(struct free_area *area, unsigned int order, |
218 | int migratetype, bool only_stealable, bool *can_steal); | |
ff9543fd MN |
219 | |
220 | #endif | |
0f8053a5 | 221 | |
48f13bf3 | 222 | /* |
6c14466c MG |
223 | * This function returns the order of a free page in the buddy system. In |
224 | * general, page_zone(page)->lock must be held by the caller to prevent the | |
225 | * page from being allocated in parallel and returning garbage as the order. | |
226 | * If a caller does not hold page_zone(page)->lock, it must guarantee that the | |
99c0fd5e VB |
227 | * page cannot be allocated or merged in parallel. Alternatively, it must |
228 | * handle invalid values gracefully, and use page_order_unsafe() below. | |
48f13bf3 | 229 | */ |
d00181b9 | 230 | static inline unsigned int page_order(struct page *page) |
48f13bf3 | 231 | { |
572438f9 | 232 | /* PageBuddy() must be checked by the caller */ |
48f13bf3 MG |
233 | return page_private(page); |
234 | } | |
b5a0e011 | 235 | |
99c0fd5e VB |
236 | /* |
237 | * Like page_order(), but for callers who cannot afford to hold the zone lock. | |
238 | * PageBuddy() should be checked first by the caller to minimize race window, | |
239 | * and invalid values must be handled gracefully. | |
240 | * | |
4db0c3c2 | 241 | * READ_ONCE is used so that if the caller assigns the result into a local |
99c0fd5e VB |
242 | * variable and e.g. tests it for valid range before using, the compiler cannot |
243 | * decide to remove the variable and inline the page_private(page) multiple | |
244 | * times, potentially observing different values in the tests and the actual | |
245 | * use of the result. | |
246 | */ | |
4db0c3c2 | 247 | #define page_order_unsafe(page) READ_ONCE(page_private(page)) |
99c0fd5e | 248 | |
4bbd4c77 KS |
249 | static inline bool is_cow_mapping(vm_flags_t flags) |
250 | { | |
251 | return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; | |
252 | } | |
253 | ||
30bdbb78 KK |
254 | /* |
255 | * These three helpers classifies VMAs for virtual memory accounting. | |
256 | */ | |
257 | ||
258 | /* | |
259 | * Executable code area - executable, not writable, not stack | |
260 | */ | |
d977d56c KK |
261 | static inline bool is_exec_mapping(vm_flags_t flags) |
262 | { | |
30bdbb78 | 263 | return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC; |
d977d56c KK |
264 | } |
265 | ||
30bdbb78 KK |
266 | /* |
267 | * Stack area - atomatically grows in one direction | |
268 | * | |
269 | * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous: | |
270 | * do_mmap() forbids all other combinations. | |
271 | */ | |
d977d56c KK |
272 | static inline bool is_stack_mapping(vm_flags_t flags) |
273 | { | |
30bdbb78 | 274 | return (flags & VM_STACK) == VM_STACK; |
d977d56c KK |
275 | } |
276 | ||
30bdbb78 KK |
277 | /* |
278 | * Data area - private, writable, not stack | |
279 | */ | |
d977d56c KK |
280 | static inline bool is_data_mapping(vm_flags_t flags) |
281 | { | |
30bdbb78 | 282 | return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE; |
d977d56c KK |
283 | } |
284 | ||
6038def0 NK |
285 | /* mm/util.c */ |
286 | void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, | |
287 | struct vm_area_struct *prev, struct rb_node *rb_parent); | |
288 | ||
af8e3354 | 289 | #ifdef CONFIG_MMU |
fc05f566 | 290 | extern long populate_vma_page_range(struct vm_area_struct *vma, |
cea10a19 | 291 | unsigned long start, unsigned long end, int *nonblocking); |
af8e3354 HD |
292 | extern void munlock_vma_pages_range(struct vm_area_struct *vma, |
293 | unsigned long start, unsigned long end); | |
294 | static inline void munlock_vma_pages_all(struct vm_area_struct *vma) | |
295 | { | |
296 | munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); | |
297 | } | |
298 | ||
b291f000 | 299 | /* |
73848b46 | 300 | * must be called with vma's mmap_sem held for read or write, and page locked. |
b291f000 NP |
301 | */ |
302 | extern void mlock_vma_page(struct page *page); | |
ff6a6da6 | 303 | extern unsigned int munlock_vma_page(struct page *page); |
b291f000 NP |
304 | |
305 | /* | |
306 | * Clear the page's PageMlocked(). This can be useful in a situation where | |
307 | * we want to unconditionally remove a page from the pagecache -- e.g., | |
308 | * on truncation or freeing. | |
309 | * | |
310 | * It is legal to call this function for any page, mlocked or not. | |
311 | * If called for a page that is still mapped by mlocked vmas, all we do | |
312 | * is revert to lazy LRU behaviour -- semantics are not broken. | |
313 | */ | |
e6c509f8 | 314 | extern void clear_page_mlock(struct page *page); |
b291f000 NP |
315 | |
316 | /* | |
51afb12b HD |
317 | * mlock_migrate_page - called only from migrate_misplaced_transhuge_page() |
318 | * (because that does not go through the full procedure of migration ptes): | |
319 | * to migrate the Mlocked page flag; update statistics. | |
b291f000 NP |
320 | */ |
321 | static inline void mlock_migrate_page(struct page *newpage, struct page *page) | |
322 | { | |
5344b7e6 | 323 | if (TestClearPageMlocked(page)) { |
b32967ff | 324 | int nr_pages = hpage_nr_pages(page); |
5344b7e6 | 325 | |
51afb12b | 326 | /* Holding pmd lock, no change in irq context: __mod is safe */ |
b32967ff | 327 | __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); |
b291f000 | 328 | SetPageMlocked(newpage); |
b32967ff | 329 | __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages); |
5344b7e6 | 330 | } |
b291f000 NP |
331 | } |
332 | ||
f55e1014 | 333 | extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); |
b32967ff | 334 | |
e9b61f19 KS |
335 | /* |
336 | * At what user virtual address is page expected in @vma? | |
337 | */ | |
338 | static inline unsigned long | |
339 | __vma_address(struct page *page, struct vm_area_struct *vma) | |
340 | { | |
341 | pgoff_t pgoff = page_to_pgoff(page); | |
342 | return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); | |
343 | } | |
344 | ||
345 | static inline unsigned long | |
346 | vma_address(struct page *page, struct vm_area_struct *vma) | |
347 | { | |
a8fa41ad KS |
348 | unsigned long start, end; |
349 | ||
350 | start = __vma_address(page, vma); | |
351 | end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1); | |
e9b61f19 KS |
352 | |
353 | /* page should be within @vma mapping range */ | |
a8fa41ad | 354 | VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma); |
e9b61f19 | 355 | |
a8fa41ad | 356 | return max(start, vma->vm_start); |
e9b61f19 KS |
357 | } |
358 | ||
af8e3354 | 359 | #else /* !CONFIG_MMU */ |
b291f000 NP |
360 | static inline void clear_page_mlock(struct page *page) { } |
361 | static inline void mlock_vma_page(struct page *page) { } | |
362 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } | |
363 | ||
af8e3354 | 364 | #endif /* !CONFIG_MMU */ |
894bc310 | 365 | |
69d177c2 AW |
366 | /* |
367 | * Return the mem_map entry representing the 'offset' subpage within | |
368 | * the maximally aligned gigantic page 'base'. Handle any discontiguity | |
369 | * in the mem_map at MAX_ORDER_NR_PAGES boundaries. | |
370 | */ | |
371 | static inline struct page *mem_map_offset(struct page *base, int offset) | |
372 | { | |
373 | if (unlikely(offset >= MAX_ORDER_NR_PAGES)) | |
bc7f84c0 | 374 | return nth_page(base, offset); |
69d177c2 AW |
375 | return base + offset; |
376 | } | |
377 | ||
378 | /* | |
25985edc | 379 | * Iterator over all subpages within the maximally aligned gigantic |
69d177c2 AW |
380 | * page 'base'. Handle any discontiguity in the mem_map. |
381 | */ | |
382 | static inline struct page *mem_map_next(struct page *iter, | |
383 | struct page *base, int offset) | |
384 | { | |
385 | if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) { | |
386 | unsigned long pfn = page_to_pfn(base) + offset; | |
387 | if (!pfn_valid(pfn)) | |
388 | return NULL; | |
389 | return pfn_to_page(pfn); | |
390 | } | |
391 | return iter + 1; | |
392 | } | |
393 | ||
6b74ab97 MG |
394 | /* Memory initialisation debug and verification */ |
395 | enum mminit_level { | |
396 | MMINIT_WARNING, | |
397 | MMINIT_VERIFY, | |
398 | MMINIT_TRACE | |
399 | }; | |
400 | ||
401 | #ifdef CONFIG_DEBUG_MEMORY_INIT | |
402 | ||
403 | extern int mminit_loglevel; | |
404 | ||
405 | #define mminit_dprintk(level, prefix, fmt, arg...) \ | |
406 | do { \ | |
407 | if (level < mminit_loglevel) { \ | |
fc5199d1 | 408 | if (level <= MMINIT_WARNING) \ |
1170532b | 409 | pr_warn("mminit::" prefix " " fmt, ##arg); \ |
fc5199d1 RV |
410 | else \ |
411 | printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \ | |
6b74ab97 MG |
412 | } \ |
413 | } while (0) | |
414 | ||
708614e6 | 415 | extern void mminit_verify_pageflags_layout(void); |
68ad8df4 | 416 | extern void mminit_verify_zonelist(void); |
6b74ab97 MG |
417 | #else |
418 | ||
419 | static inline void mminit_dprintk(enum mminit_level level, | |
420 | const char *prefix, const char *fmt, ...) | |
421 | { | |
422 | } | |
423 | ||
708614e6 MG |
424 | static inline void mminit_verify_pageflags_layout(void) |
425 | { | |
426 | } | |
427 | ||
68ad8df4 MG |
428 | static inline void mminit_verify_zonelist(void) |
429 | { | |
430 | } | |
6b74ab97 | 431 | #endif /* CONFIG_DEBUG_MEMORY_INIT */ |
2dbb51c4 MG |
432 | |
433 | /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */ | |
434 | #if defined(CONFIG_SPARSEMEM) | |
435 | extern void mminit_validate_memmodel_limits(unsigned long *start_pfn, | |
436 | unsigned long *end_pfn); | |
437 | #else | |
438 | static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, | |
439 | unsigned long *end_pfn) | |
440 | { | |
441 | } | |
442 | #endif /* CONFIG_SPARSEMEM */ | |
443 | ||
a5f5f91d MG |
444 | #define NODE_RECLAIM_NOSCAN -2 |
445 | #define NODE_RECLAIM_FULL -1 | |
446 | #define NODE_RECLAIM_SOME 0 | |
447 | #define NODE_RECLAIM_SUCCESS 1 | |
7c116f2b | 448 | |
8b09549c WY |
449 | #ifdef CONFIG_NUMA |
450 | extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int); | |
451 | #else | |
452 | static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, | |
453 | unsigned int order) | |
454 | { | |
455 | return NODE_RECLAIM_NOSCAN; | |
456 | } | |
457 | #endif | |
458 | ||
31d3d348 WF |
459 | extern int hwpoison_filter(struct page *p); |
460 | ||
7c116f2b WF |
461 | extern u32 hwpoison_filter_dev_major; |
462 | extern u32 hwpoison_filter_dev_minor; | |
478c5ffc WF |
463 | extern u64 hwpoison_filter_flags_mask; |
464 | extern u64 hwpoison_filter_flags_value; | |
4fd466eb | 465 | extern u64 hwpoison_filter_memcg; |
1bfe5feb | 466 | extern u32 hwpoison_filter_enable; |
eb36c587 | 467 | |
dc0ef0df | 468 | extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long, |
eb36c587 | 469 | unsigned long, unsigned long, |
9fbeb5ab | 470 | unsigned long, unsigned long); |
ca57df79 XQ |
471 | |
472 | extern void set_pageblock_order(void); | |
02c6de8d MK |
473 | unsigned long reclaim_clean_pages_from_list(struct zone *zone, |
474 | struct list_head *page_list); | |
d95ea5d1 BZ |
475 | /* The ALLOC_WMARK bits are used as an index to zone->watermark */ |
476 | #define ALLOC_WMARK_MIN WMARK_MIN | |
477 | #define ALLOC_WMARK_LOW WMARK_LOW | |
478 | #define ALLOC_WMARK_HIGH WMARK_HIGH | |
479 | #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ | |
480 | ||
481 | /* Mask to get the watermark bits */ | |
482 | #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) | |
483 | ||
cd04ae1e MH |
484 | /* |
485 | * Only MMU archs have async oom victim reclaim - aka oom_reaper so we | |
486 | * cannot assume a reduced access to memory reserves is sufficient for | |
487 | * !MMU | |
488 | */ | |
489 | #ifdef CONFIG_MMU | |
490 | #define ALLOC_OOM 0x08 | |
491 | #else | |
492 | #define ALLOC_OOM ALLOC_NO_WATERMARKS | |
493 | #endif | |
494 | ||
6bb15450 MG |
495 | #define ALLOC_HARDER 0x10 /* try to alloc harder */ |
496 | #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ | |
497 | #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ | |
498 | #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ | |
499 | #ifdef CONFIG_ZONE_DMA32 | |
500 | #define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */ | |
501 | #else | |
502 | #define ALLOC_NOFRAGMENT 0x0 | |
503 | #endif | |
0a79cdad | 504 | #define ALLOC_KSWAPD 0x200 /* allow waking of kswapd */ |
d95ea5d1 | 505 | |
72b252ae MG |
506 | enum ttu_flags; |
507 | struct tlbflush_unmap_batch; | |
508 | ||
ce612879 MH |
509 | |
510 | /* | |
511 | * only for MM internal work items which do not depend on | |
512 | * any allocations or locks which might depend on allocations | |
513 | */ | |
514 | extern struct workqueue_struct *mm_percpu_wq; | |
515 | ||
72b252ae MG |
516 | #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH |
517 | void try_to_unmap_flush(void); | |
d950c947 | 518 | void try_to_unmap_flush_dirty(void); |
3ea27719 | 519 | void flush_tlb_batched_pending(struct mm_struct *mm); |
72b252ae MG |
520 | #else |
521 | static inline void try_to_unmap_flush(void) | |
522 | { | |
523 | } | |
d950c947 MG |
524 | static inline void try_to_unmap_flush_dirty(void) |
525 | { | |
526 | } | |
3ea27719 MG |
527 | static inline void flush_tlb_batched_pending(struct mm_struct *mm) |
528 | { | |
529 | } | |
72b252ae | 530 | #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ |
edf14cdb VB |
531 | |
532 | extern const struct trace_print_flags pageflag_names[]; | |
533 | extern const struct trace_print_flags vmaflag_names[]; | |
534 | extern const struct trace_print_flags gfpflag_names[]; | |
535 | ||
a6ffdc07 XQ |
536 | static inline bool is_migrate_highatomic(enum migratetype migratetype) |
537 | { | |
538 | return migratetype == MIGRATE_HIGHATOMIC; | |
539 | } | |
540 | ||
541 | static inline bool is_migrate_highatomic_page(struct page *page) | |
542 | { | |
543 | return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC; | |
544 | } | |
545 | ||
72675e13 | 546 | void setup_zone_pageset(struct zone *zone); |
666feb21 | 547 | extern struct page *alloc_new_node_page(struct page *page, unsigned long node); |
db971418 | 548 | #endif /* __MM_INTERNAL_H */ |