]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/mm/swap.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
5 | */ | |
6 | ||
7 | /* | |
183ff22b | 8 | * This file contains the default values for the operation of the |
1da177e4 LT |
9 | * Linux VM subsystem. Fine-tuning documentation can be found in |
10 | * Documentation/sysctl/vm.txt. | |
11 | * Started 18.12.91 | |
12 | * Swap aging added 23.2.95, Stephen Tweedie. | |
13 | * Buffermem limits added 12.3.98, Rik van Riel. | |
14 | */ | |
15 | ||
16 | #include <linux/mm.h> | |
17 | #include <linux/sched.h> | |
18 | #include <linux/kernel_stat.h> | |
19 | #include <linux/swap.h> | |
20 | #include <linux/mman.h> | |
21 | #include <linux/pagemap.h> | |
22 | #include <linux/pagevec.h> | |
23 | #include <linux/init.h> | |
b95f1b31 | 24 | #include <linux/export.h> |
1da177e4 | 25 | #include <linux/mm_inline.h> |
1da177e4 | 26 | #include <linux/percpu_counter.h> |
3565fce3 | 27 | #include <linux/memremap.h> |
1da177e4 LT |
28 | #include <linux/percpu.h> |
29 | #include <linux/cpu.h> | |
30 | #include <linux/notifier.h> | |
e0bf68dd | 31 | #include <linux/backing-dev.h> |
66e1707b | 32 | #include <linux/memcontrol.h> |
5a0e3ad6 | 33 | #include <linux/gfp.h> |
a27bb332 | 34 | #include <linux/uio.h> |
822fc613 | 35 | #include <linux/hugetlb.h> |
33c3fc71 | 36 | #include <linux/page_idle.h> |
1da177e4 | 37 | |
64d6519d LS |
38 | #include "internal.h" |
39 | ||
c6286c98 MG |
40 | #define CREATE_TRACE_POINTS |
41 | #include <trace/events/pagemap.h> | |
42 | ||
1da177e4 LT |
43 | /* How many pages do we try to swap or page in/out together? */ |
44 | int page_cluster; | |
45 | ||
13f7f789 | 46 | static DEFINE_PER_CPU(struct pagevec, lru_add_pvec); |
f84f9504 | 47 | static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); |
cc5993bd | 48 | static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs); |
10853a03 | 49 | static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); |
902aaed0 | 50 | |
b221385b AB |
51 | /* |
52 | * This path almost never happens for VM activity - pages are normally | |
53 | * freed via pagevecs. But it gets used by networking. | |
54 | */ | |
920c7a5d | 55 | static void __page_cache_release(struct page *page) |
b221385b AB |
56 | { |
57 | if (PageLRU(page)) { | |
b221385b | 58 | struct zone *zone = page_zone(page); |
fa9add64 HD |
59 | struct lruvec *lruvec; |
60 | unsigned long flags; | |
b221385b AB |
61 | |
62 | spin_lock_irqsave(&zone->lru_lock, flags); | |
fa9add64 | 63 | lruvec = mem_cgroup_page_lruvec(page, zone); |
309381fe | 64 | VM_BUG_ON_PAGE(!PageLRU(page), page); |
b221385b | 65 | __ClearPageLRU(page); |
fa9add64 | 66 | del_page_from_lru_list(page, lruvec, page_off_lru(page)); |
b221385b AB |
67 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
68 | } | |
0a31bc97 | 69 | mem_cgroup_uncharge(page); |
91807063 AA |
70 | } |
71 | ||
72 | static void __put_single_page(struct page *page) | |
73 | { | |
74 | __page_cache_release(page); | |
b745bc85 | 75 | free_hot_cold_page(page, false); |
b221385b AB |
76 | } |
77 | ||
91807063 | 78 | static void __put_compound_page(struct page *page) |
1da177e4 | 79 | { |
91807063 | 80 | compound_page_dtor *dtor; |
1da177e4 | 81 | |
822fc613 NH |
82 | /* |
83 | * __page_cache_release() is supposed to be called for thp, not for | |
84 | * hugetlb. This is because hugetlb page does never have PageLRU set | |
85 | * (it's never listed to any LRU lists) and no memcg routines should | |
86 | * be called for hugetlb (it has a separate hugetlb_cgroup.) | |
87 | */ | |
88 | if (!PageHuge(page)) | |
89 | __page_cache_release(page); | |
91807063 AA |
90 | dtor = get_compound_page_dtor(page); |
91 | (*dtor)(page); | |
92 | } | |
93 | ||
ddc58f27 | 94 | void __put_page(struct page *page) |
8519fb30 NP |
95 | { |
96 | if (unlikely(PageCompound(page))) | |
ddc58f27 KS |
97 | __put_compound_page(page); |
98 | else | |
91807063 | 99 | __put_single_page(page); |
1da177e4 | 100 | } |
ddc58f27 | 101 | EXPORT_SYMBOL(__put_page); |
70b50f94 | 102 | |
1d7ea732 | 103 | /** |
7682486b RD |
104 | * put_pages_list() - release a list of pages |
105 | * @pages: list of pages threaded on page->lru | |
1d7ea732 AZ |
106 | * |
107 | * Release a list of pages which are strung together on page.lru. Currently | |
108 | * used by read_cache_pages() and related error recovery code. | |
1d7ea732 AZ |
109 | */ |
110 | void put_pages_list(struct list_head *pages) | |
111 | { | |
112 | while (!list_empty(pages)) { | |
113 | struct page *victim; | |
114 | ||
115 | victim = list_entry(pages->prev, struct page, lru); | |
116 | list_del(&victim->lru); | |
117 | page_cache_release(victim); | |
118 | } | |
119 | } | |
120 | EXPORT_SYMBOL(put_pages_list); | |
121 | ||
18022c5d MG |
122 | /* |
123 | * get_kernel_pages() - pin kernel pages in memory | |
124 | * @kiov: An array of struct kvec structures | |
125 | * @nr_segs: number of segments to pin | |
126 | * @write: pinning for read/write, currently ignored | |
127 | * @pages: array that receives pointers to the pages pinned. | |
128 | * Should be at least nr_segs long. | |
129 | * | |
130 | * Returns number of pages pinned. This may be fewer than the number | |
131 | * requested. If nr_pages is 0 or negative, returns 0. If no pages | |
132 | * were pinned, returns -errno. Each page returned must be released | |
133 | * with a put_page() call when it is finished with. | |
134 | */ | |
135 | int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write, | |
136 | struct page **pages) | |
137 | { | |
138 | int seg; | |
139 | ||
140 | for (seg = 0; seg < nr_segs; seg++) { | |
141 | if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE)) | |
142 | return seg; | |
143 | ||
5a178119 | 144 | pages[seg] = kmap_to_page(kiov[seg].iov_base); |
18022c5d MG |
145 | page_cache_get(pages[seg]); |
146 | } | |
147 | ||
148 | return seg; | |
149 | } | |
150 | EXPORT_SYMBOL_GPL(get_kernel_pages); | |
151 | ||
152 | /* | |
153 | * get_kernel_page() - pin a kernel page in memory | |
154 | * @start: starting kernel address | |
155 | * @write: pinning for read/write, currently ignored | |
156 | * @pages: array that receives pointer to the page pinned. | |
157 | * Must be at least nr_segs long. | |
158 | * | |
159 | * Returns 1 if page is pinned. If the page was not pinned, returns | |
160 | * -errno. The page returned must be released with a put_page() call | |
161 | * when it is finished with. | |
162 | */ | |
163 | int get_kernel_page(unsigned long start, int write, struct page **pages) | |
164 | { | |
165 | const struct kvec kiov = { | |
166 | .iov_base = (void *)start, | |
167 | .iov_len = PAGE_SIZE | |
168 | }; | |
169 | ||
170 | return get_kernel_pages(&kiov, 1, write, pages); | |
171 | } | |
172 | EXPORT_SYMBOL_GPL(get_kernel_page); | |
173 | ||
3dd7ae8e | 174 | static void pagevec_lru_move_fn(struct pagevec *pvec, |
fa9add64 HD |
175 | void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg), |
176 | void *arg) | |
902aaed0 HH |
177 | { |
178 | int i; | |
902aaed0 | 179 | struct zone *zone = NULL; |
fa9add64 | 180 | struct lruvec *lruvec; |
3dd7ae8e | 181 | unsigned long flags = 0; |
902aaed0 HH |
182 | |
183 | for (i = 0; i < pagevec_count(pvec); i++) { | |
184 | struct page *page = pvec->pages[i]; | |
185 | struct zone *pagezone = page_zone(page); | |
186 | ||
187 | if (pagezone != zone) { | |
188 | if (zone) | |
3dd7ae8e | 189 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
902aaed0 | 190 | zone = pagezone; |
3dd7ae8e | 191 | spin_lock_irqsave(&zone->lru_lock, flags); |
902aaed0 | 192 | } |
3dd7ae8e | 193 | |
fa9add64 HD |
194 | lruvec = mem_cgroup_page_lruvec(page, zone); |
195 | (*move_fn)(page, lruvec, arg); | |
902aaed0 HH |
196 | } |
197 | if (zone) | |
3dd7ae8e | 198 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
83896fb5 LT |
199 | release_pages(pvec->pages, pvec->nr, pvec->cold); |
200 | pagevec_reinit(pvec); | |
d8505dee SL |
201 | } |
202 | ||
fa9add64 HD |
203 | static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, |
204 | void *arg) | |
3dd7ae8e SL |
205 | { |
206 | int *pgmoved = arg; | |
3dd7ae8e SL |
207 | |
208 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { | |
209 | enum lru_list lru = page_lru_base_type(page); | |
925b7673 | 210 | list_move_tail(&page->lru, &lruvec->lists[lru]); |
3dd7ae8e SL |
211 | (*pgmoved)++; |
212 | } | |
213 | } | |
214 | ||
215 | /* | |
216 | * pagevec_move_tail() must be called with IRQ disabled. | |
217 | * Otherwise this may cause nasty races. | |
218 | */ | |
219 | static void pagevec_move_tail(struct pagevec *pvec) | |
220 | { | |
221 | int pgmoved = 0; | |
222 | ||
223 | pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved); | |
224 | __count_vm_events(PGROTATED, pgmoved); | |
225 | } | |
226 | ||
1da177e4 LT |
227 | /* |
228 | * Writeback is about to end against a page which has been marked for immediate | |
229 | * reclaim. If it still appears to be reclaimable, move it to the tail of the | |
902aaed0 | 230 | * inactive list. |
1da177e4 | 231 | */ |
3dd7ae8e | 232 | void rotate_reclaimable_page(struct page *page) |
1da177e4 | 233 | { |
ac6aadb2 | 234 | if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && |
894bc310 | 235 | !PageUnevictable(page) && PageLRU(page)) { |
ac6aadb2 MS |
236 | struct pagevec *pvec; |
237 | unsigned long flags; | |
238 | ||
239 | page_cache_get(page); | |
240 | local_irq_save(flags); | |
7c8e0181 | 241 | pvec = this_cpu_ptr(&lru_rotate_pvecs); |
ac6aadb2 MS |
242 | if (!pagevec_add(pvec, page)) |
243 | pagevec_move_tail(pvec); | |
244 | local_irq_restore(flags); | |
245 | } | |
1da177e4 LT |
246 | } |
247 | ||
fa9add64 | 248 | static void update_page_reclaim_stat(struct lruvec *lruvec, |
3e2f41f1 KM |
249 | int file, int rotated) |
250 | { | |
fa9add64 | 251 | struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; |
3e2f41f1 KM |
252 | |
253 | reclaim_stat->recent_scanned[file]++; | |
254 | if (rotated) | |
255 | reclaim_stat->recent_rotated[file]++; | |
3e2f41f1 KM |
256 | } |
257 | ||
fa9add64 HD |
258 | static void __activate_page(struct page *page, struct lruvec *lruvec, |
259 | void *arg) | |
1da177e4 | 260 | { |
744ed144 | 261 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { |
7a608572 LT |
262 | int file = page_is_file_cache(page); |
263 | int lru = page_lru_base_type(page); | |
744ed144 | 264 | |
fa9add64 | 265 | del_page_from_lru_list(page, lruvec, lru); |
7a608572 LT |
266 | SetPageActive(page); |
267 | lru += LRU_ACTIVE; | |
fa9add64 | 268 | add_page_to_lru_list(page, lruvec, lru); |
24b7e581 | 269 | trace_mm_lru_activate(page); |
4f98a2fe | 270 | |
fa9add64 HD |
271 | __count_vm_event(PGACTIVATE); |
272 | update_page_reclaim_stat(lruvec, file, 1); | |
1da177e4 | 273 | } |
eb709b0d SL |
274 | } |
275 | ||
276 | #ifdef CONFIG_SMP | |
277 | static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); | |
278 | ||
279 | static void activate_page_drain(int cpu) | |
280 | { | |
281 | struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu); | |
282 | ||
283 | if (pagevec_count(pvec)) | |
284 | pagevec_lru_move_fn(pvec, __activate_page, NULL); | |
285 | } | |
286 | ||
5fbc4616 CM |
287 | static bool need_activate_page_drain(int cpu) |
288 | { | |
289 | return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0; | |
290 | } | |
291 | ||
eb709b0d SL |
292 | void activate_page(struct page *page) |
293 | { | |
294 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { | |
295 | struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); | |
296 | ||
297 | page_cache_get(page); | |
298 | if (!pagevec_add(pvec, page)) | |
299 | pagevec_lru_move_fn(pvec, __activate_page, NULL); | |
300 | put_cpu_var(activate_page_pvecs); | |
301 | } | |
302 | } | |
303 | ||
304 | #else | |
305 | static inline void activate_page_drain(int cpu) | |
306 | { | |
307 | } | |
308 | ||
5fbc4616 CM |
309 | static bool need_activate_page_drain(int cpu) |
310 | { | |
311 | return false; | |
312 | } | |
313 | ||
eb709b0d SL |
314 | void activate_page(struct page *page) |
315 | { | |
316 | struct zone *zone = page_zone(page); | |
317 | ||
318 | spin_lock_irq(&zone->lru_lock); | |
fa9add64 | 319 | __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL); |
1da177e4 LT |
320 | spin_unlock_irq(&zone->lru_lock); |
321 | } | |
eb709b0d | 322 | #endif |
1da177e4 | 323 | |
059285a2 MG |
324 | static void __lru_cache_activate_page(struct page *page) |
325 | { | |
326 | struct pagevec *pvec = &get_cpu_var(lru_add_pvec); | |
327 | int i; | |
328 | ||
329 | /* | |
330 | * Search backwards on the optimistic assumption that the page being | |
331 | * activated has just been added to this pagevec. Note that only | |
332 | * the local pagevec is examined as a !PageLRU page could be in the | |
333 | * process of being released, reclaimed, migrated or on a remote | |
334 | * pagevec that is currently being drained. Furthermore, marking | |
335 | * a remote pagevec's page PageActive potentially hits a race where | |
336 | * a page is marked PageActive just after it is added to the inactive | |
337 | * list causing accounting errors and BUG_ON checks to trigger. | |
338 | */ | |
339 | for (i = pagevec_count(pvec) - 1; i >= 0; i--) { | |
340 | struct page *pagevec_page = pvec->pages[i]; | |
341 | ||
342 | if (pagevec_page == page) { | |
343 | SetPageActive(page); | |
344 | break; | |
345 | } | |
346 | } | |
347 | ||
348 | put_cpu_var(lru_add_pvec); | |
349 | } | |
350 | ||
1da177e4 LT |
351 | /* |
352 | * Mark a page as having seen activity. | |
353 | * | |
354 | * inactive,unreferenced -> inactive,referenced | |
355 | * inactive,referenced -> active,unreferenced | |
356 | * active,unreferenced -> active,referenced | |
eb39d618 HD |
357 | * |
358 | * When a newly allocated page is not yet visible, so safe for non-atomic ops, | |
359 | * __SetPageReferenced(page) may be substituted for mark_page_accessed(page). | |
1da177e4 | 360 | */ |
920c7a5d | 361 | void mark_page_accessed(struct page *page) |
1da177e4 | 362 | { |
e90309c9 | 363 | page = compound_head(page); |
894bc310 | 364 | if (!PageActive(page) && !PageUnevictable(page) && |
059285a2 MG |
365 | PageReferenced(page)) { |
366 | ||
367 | /* | |
368 | * If the page is on the LRU, queue it for activation via | |
369 | * activate_page_pvecs. Otherwise, assume the page is on a | |
370 | * pagevec, mark it active and it'll be moved to the active | |
371 | * LRU on the next drain. | |
372 | */ | |
373 | if (PageLRU(page)) | |
374 | activate_page(page); | |
375 | else | |
376 | __lru_cache_activate_page(page); | |
1da177e4 | 377 | ClearPageReferenced(page); |
a528910e JW |
378 | if (page_is_file_cache(page)) |
379 | workingset_activation(page); | |
1da177e4 LT |
380 | } else if (!PageReferenced(page)) { |
381 | SetPageReferenced(page); | |
382 | } | |
33c3fc71 VD |
383 | if (page_is_idle(page)) |
384 | clear_page_idle(page); | |
1da177e4 | 385 | } |
1da177e4 LT |
386 | EXPORT_SYMBOL(mark_page_accessed); |
387 | ||
2329d375 | 388 | static void __lru_cache_add(struct page *page) |
1da177e4 | 389 | { |
13f7f789 MG |
390 | struct pagevec *pvec = &get_cpu_var(lru_add_pvec); |
391 | ||
1da177e4 | 392 | page_cache_get(page); |
d741c9cd | 393 | if (!pagevec_space(pvec)) |
a0b8cab3 | 394 | __pagevec_lru_add(pvec); |
d741c9cd | 395 | pagevec_add(pvec, page); |
13f7f789 | 396 | put_cpu_var(lru_add_pvec); |
1da177e4 | 397 | } |
2329d375 JZ |
398 | |
399 | /** | |
400 | * lru_cache_add: add a page to the page lists | |
401 | * @page: the page to add | |
402 | */ | |
403 | void lru_cache_add_anon(struct page *page) | |
404 | { | |
6fb81a17 MG |
405 | if (PageActive(page)) |
406 | ClearPageActive(page); | |
2329d375 JZ |
407 | __lru_cache_add(page); |
408 | } | |
409 | ||
410 | void lru_cache_add_file(struct page *page) | |
411 | { | |
6fb81a17 MG |
412 | if (PageActive(page)) |
413 | ClearPageActive(page); | |
2329d375 JZ |
414 | __lru_cache_add(page); |
415 | } | |
416 | EXPORT_SYMBOL(lru_cache_add_file); | |
1da177e4 | 417 | |
f04e9ebb | 418 | /** |
c53954a0 | 419 | * lru_cache_add - add a page to a page list |
f04e9ebb | 420 | * @page: the page to be added to the LRU. |
2329d375 JZ |
421 | * |
422 | * Queue the page for addition to the LRU via pagevec. The decision on whether | |
423 | * to add the page to the [in]active [file|anon] list is deferred until the | |
424 | * pagevec is drained. This gives a chance for the caller of lru_cache_add() | |
425 | * have the page added to the active list using mark_page_accessed(). | |
f04e9ebb | 426 | */ |
c53954a0 | 427 | void lru_cache_add(struct page *page) |
1da177e4 | 428 | { |
309381fe SL |
429 | VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); |
430 | VM_BUG_ON_PAGE(PageLRU(page), page); | |
c53954a0 | 431 | __lru_cache_add(page); |
1da177e4 LT |
432 | } |
433 | ||
894bc310 LS |
434 | /** |
435 | * add_page_to_unevictable_list - add a page to the unevictable list | |
436 | * @page: the page to be added to the unevictable list | |
437 | * | |
438 | * Add page directly to its zone's unevictable list. To avoid races with | |
439 | * tasks that might be making the page evictable, through eg. munlock, | |
440 | * munmap or exit, while it's not on the lru, we want to add the page | |
441 | * while it's locked or otherwise "invisible" to other tasks. This is | |
442 | * difficult to do when using the pagevec cache, so bypass that. | |
443 | */ | |
444 | void add_page_to_unevictable_list(struct page *page) | |
445 | { | |
446 | struct zone *zone = page_zone(page); | |
fa9add64 | 447 | struct lruvec *lruvec; |
894bc310 LS |
448 | |
449 | spin_lock_irq(&zone->lru_lock); | |
fa9add64 | 450 | lruvec = mem_cgroup_page_lruvec(page, zone); |
ef2a2cbd | 451 | ClearPageActive(page); |
894bc310 LS |
452 | SetPageUnevictable(page); |
453 | SetPageLRU(page); | |
fa9add64 | 454 | add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE); |
894bc310 LS |
455 | spin_unlock_irq(&zone->lru_lock); |
456 | } | |
457 | ||
00501b53 JW |
458 | /** |
459 | * lru_cache_add_active_or_unevictable | |
460 | * @page: the page to be added to LRU | |
461 | * @vma: vma in which page is mapped for determining reclaimability | |
462 | * | |
463 | * Place @page on the active or unevictable LRU list, depending on its | |
464 | * evictability. Note that if the page is not evictable, it goes | |
465 | * directly back onto it's zone's unevictable list, it does NOT use a | |
466 | * per cpu pagevec. | |
467 | */ | |
468 | void lru_cache_add_active_or_unevictable(struct page *page, | |
469 | struct vm_area_struct *vma) | |
470 | { | |
471 | VM_BUG_ON_PAGE(PageLRU(page), page); | |
472 | ||
473 | if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) { | |
474 | SetPageActive(page); | |
475 | lru_cache_add(page); | |
476 | return; | |
477 | } | |
478 | ||
479 | if (!TestSetPageMlocked(page)) { | |
480 | /* | |
481 | * We use the irq-unsafe __mod_zone_page_stat because this | |
482 | * counter is not modified from interrupt context, and the pte | |
483 | * lock is held(spinlock), which implies preemption disabled. | |
484 | */ | |
485 | __mod_zone_page_state(page_zone(page), NR_MLOCK, | |
486 | hpage_nr_pages(page)); | |
487 | count_vm_event(UNEVICTABLE_PGMLOCKED); | |
488 | } | |
489 | add_page_to_unevictable_list(page); | |
490 | } | |
491 | ||
31560180 MK |
492 | /* |
493 | * If the page can not be invalidated, it is moved to the | |
494 | * inactive list to speed up its reclaim. It is moved to the | |
495 | * head of the list, rather than the tail, to give the flusher | |
496 | * threads some time to write it out, as this is much more | |
497 | * effective than the single-page writeout from reclaim. | |
278df9f4 MK |
498 | * |
499 | * If the page isn't page_mapped and dirty/writeback, the page | |
500 | * could reclaim asap using PG_reclaim. | |
501 | * | |
502 | * 1. active, mapped page -> none | |
503 | * 2. active, dirty/writeback page -> inactive, head, PG_reclaim | |
504 | * 3. inactive, mapped page -> none | |
505 | * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim | |
506 | * 5. inactive, clean -> inactive, tail | |
507 | * 6. Others -> none | |
508 | * | |
509 | * In 4, why it moves inactive's head, the VM expects the page would | |
510 | * be write it out by flusher threads as this is much more effective | |
511 | * than the single-page writeout from reclaim. | |
31560180 | 512 | */ |
cc5993bd | 513 | static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, |
fa9add64 | 514 | void *arg) |
31560180 MK |
515 | { |
516 | int lru, file; | |
278df9f4 | 517 | bool active; |
31560180 | 518 | |
278df9f4 | 519 | if (!PageLRU(page)) |
31560180 MK |
520 | return; |
521 | ||
bad49d9c MK |
522 | if (PageUnevictable(page)) |
523 | return; | |
524 | ||
31560180 MK |
525 | /* Some processes are using the page */ |
526 | if (page_mapped(page)) | |
527 | return; | |
528 | ||
278df9f4 | 529 | active = PageActive(page); |
31560180 MK |
530 | file = page_is_file_cache(page); |
531 | lru = page_lru_base_type(page); | |
fa9add64 HD |
532 | |
533 | del_page_from_lru_list(page, lruvec, lru + active); | |
31560180 MK |
534 | ClearPageActive(page); |
535 | ClearPageReferenced(page); | |
fa9add64 | 536 | add_page_to_lru_list(page, lruvec, lru); |
31560180 | 537 | |
278df9f4 MK |
538 | if (PageWriteback(page) || PageDirty(page)) { |
539 | /* | |
540 | * PG_reclaim could be raced with end_page_writeback | |
541 | * It can make readahead confusing. But race window | |
542 | * is _really_ small and it's non-critical problem. | |
543 | */ | |
544 | SetPageReclaim(page); | |
545 | } else { | |
546 | /* | |
547 | * The page's writeback ends up during pagevec | |
548 | * We moves tha page into tail of inactive. | |
549 | */ | |
925b7673 | 550 | list_move_tail(&page->lru, &lruvec->lists[lru]); |
278df9f4 MK |
551 | __count_vm_event(PGROTATED); |
552 | } | |
553 | ||
554 | if (active) | |
555 | __count_vm_event(PGDEACTIVATE); | |
fa9add64 | 556 | update_page_reclaim_stat(lruvec, file, 0); |
31560180 MK |
557 | } |
558 | ||
10853a03 MK |
559 | |
560 | static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec, | |
561 | void *arg) | |
562 | { | |
563 | if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { | |
564 | int file = page_is_file_cache(page); | |
565 | int lru = page_lru_base_type(page); | |
566 | ||
567 | del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE); | |
568 | ClearPageActive(page); | |
569 | ClearPageReferenced(page); | |
570 | add_page_to_lru_list(page, lruvec, lru); | |
571 | ||
572 | __count_vm_event(PGDEACTIVATE); | |
573 | update_page_reclaim_stat(lruvec, file, 0); | |
574 | } | |
575 | } | |
576 | ||
902aaed0 HH |
577 | /* |
578 | * Drain pages out of the cpu's pagevecs. | |
579 | * Either "cpu" is the current CPU, and preemption has already been | |
580 | * disabled; or "cpu" is being hot-unplugged, and is already dead. | |
581 | */ | |
f0cb3c76 | 582 | void lru_add_drain_cpu(int cpu) |
1da177e4 | 583 | { |
13f7f789 | 584 | struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu); |
1da177e4 | 585 | |
13f7f789 | 586 | if (pagevec_count(pvec)) |
a0b8cab3 | 587 | __pagevec_lru_add(pvec); |
902aaed0 HH |
588 | |
589 | pvec = &per_cpu(lru_rotate_pvecs, cpu); | |
590 | if (pagevec_count(pvec)) { | |
591 | unsigned long flags; | |
592 | ||
593 | /* No harm done if a racing interrupt already did this */ | |
594 | local_irq_save(flags); | |
595 | pagevec_move_tail(pvec); | |
596 | local_irq_restore(flags); | |
597 | } | |
31560180 | 598 | |
cc5993bd | 599 | pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); |
31560180 | 600 | if (pagevec_count(pvec)) |
cc5993bd | 601 | pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); |
eb709b0d | 602 | |
10853a03 MK |
603 | pvec = &per_cpu(lru_deactivate_pvecs, cpu); |
604 | if (pagevec_count(pvec)) | |
605 | pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); | |
606 | ||
eb709b0d | 607 | activate_page_drain(cpu); |
31560180 MK |
608 | } |
609 | ||
610 | /** | |
cc5993bd | 611 | * deactivate_file_page - forcefully deactivate a file page |
31560180 MK |
612 | * @page: page to deactivate |
613 | * | |
614 | * This function hints the VM that @page is a good reclaim candidate, | |
615 | * for example if its invalidation fails due to the page being dirty | |
616 | * or under writeback. | |
617 | */ | |
cc5993bd | 618 | void deactivate_file_page(struct page *page) |
31560180 | 619 | { |
821ed6bb | 620 | /* |
cc5993bd MK |
621 | * In a workload with many unevictable page such as mprotect, |
622 | * unevictable page deactivation for accelerating reclaim is pointless. | |
821ed6bb MK |
623 | */ |
624 | if (PageUnevictable(page)) | |
625 | return; | |
626 | ||
31560180 | 627 | if (likely(get_page_unless_zero(page))) { |
cc5993bd | 628 | struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); |
31560180 MK |
629 | |
630 | if (!pagevec_add(pvec, page)) | |
cc5993bd MK |
631 | pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); |
632 | put_cpu_var(lru_deactivate_file_pvecs); | |
31560180 | 633 | } |
80bfed90 AM |
634 | } |
635 | ||
10853a03 MK |
636 | /** |
637 | * deactivate_page - deactivate a page | |
638 | * @page: page to deactivate | |
639 | * | |
640 | * deactivate_page() moves @page to the inactive list if @page was on the active | |
641 | * list and was not an unevictable page. This is done to accelerate the reclaim | |
642 | * of @page. | |
643 | */ | |
644 | void deactivate_page(struct page *page) | |
645 | { | |
646 | if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { | |
647 | struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); | |
648 | ||
649 | page_cache_get(page); | |
650 | if (!pagevec_add(pvec, page)) | |
651 | pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); | |
652 | put_cpu_var(lru_deactivate_pvecs); | |
653 | } | |
654 | } | |
655 | ||
80bfed90 AM |
656 | void lru_add_drain(void) |
657 | { | |
f0cb3c76 | 658 | lru_add_drain_cpu(get_cpu()); |
80bfed90 | 659 | put_cpu(); |
1da177e4 LT |
660 | } |
661 | ||
c4028958 | 662 | static void lru_add_drain_per_cpu(struct work_struct *dummy) |
053837fc NP |
663 | { |
664 | lru_add_drain(); | |
665 | } | |
666 | ||
5fbc4616 CM |
667 | static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); |
668 | ||
669 | void lru_add_drain_all(void) | |
053837fc | 670 | { |
5fbc4616 CM |
671 | static DEFINE_MUTEX(lock); |
672 | static struct cpumask has_work; | |
673 | int cpu; | |
674 | ||
675 | mutex_lock(&lock); | |
676 | get_online_cpus(); | |
677 | cpumask_clear(&has_work); | |
678 | ||
679 | for_each_online_cpu(cpu) { | |
680 | struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); | |
681 | ||
682 | if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || | |
683 | pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || | |
cc5993bd | 684 | pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) || |
10853a03 | 685 | pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) || |
5fbc4616 CM |
686 | need_activate_page_drain(cpu)) { |
687 | INIT_WORK(work, lru_add_drain_per_cpu); | |
688 | schedule_work_on(cpu, work); | |
689 | cpumask_set_cpu(cpu, &has_work); | |
690 | } | |
691 | } | |
692 | ||
693 | for_each_cpu(cpu, &has_work) | |
694 | flush_work(&per_cpu(lru_add_drain_work, cpu)); | |
695 | ||
696 | put_online_cpus(); | |
697 | mutex_unlock(&lock); | |
053837fc NP |
698 | } |
699 | ||
aabfb572 MH |
700 | /** |
701 | * release_pages - batched page_cache_release() | |
702 | * @pages: array of pages to release | |
703 | * @nr: number of pages | |
704 | * @cold: whether the pages are cache cold | |
1da177e4 | 705 | * |
aabfb572 MH |
706 | * Decrement the reference count on all the pages in @pages. If it |
707 | * fell to zero, remove the page from the LRU and free it. | |
1da177e4 | 708 | */ |
b745bc85 | 709 | void release_pages(struct page **pages, int nr, bool cold) |
1da177e4 LT |
710 | { |
711 | int i; | |
cc59850e | 712 | LIST_HEAD(pages_to_free); |
1da177e4 | 713 | struct zone *zone = NULL; |
fa9add64 | 714 | struct lruvec *lruvec; |
902aaed0 | 715 | unsigned long uninitialized_var(flags); |
aabfb572 | 716 | unsigned int uninitialized_var(lock_batch); |
1da177e4 | 717 | |
1da177e4 LT |
718 | for (i = 0; i < nr; i++) { |
719 | struct page *page = pages[i]; | |
1da177e4 | 720 | |
aabfb572 MH |
721 | /* |
722 | * Make sure the IRQ-safe lock-holding time does not get | |
723 | * excessive with a continuous string of pages from the | |
724 | * same zone. The lock is held only if zone != NULL. | |
725 | */ | |
726 | if (zone && ++lock_batch == SWAP_CLUSTER_MAX) { | |
727 | spin_unlock_irqrestore(&zone->lru_lock, flags); | |
728 | zone = NULL; | |
729 | } | |
730 | ||
ddc58f27 | 731 | page = compound_head(page); |
b5810039 | 732 | if (!put_page_testzero(page)) |
1da177e4 LT |
733 | continue; |
734 | ||
ddc58f27 KS |
735 | if (PageCompound(page)) { |
736 | if (zone) { | |
737 | spin_unlock_irqrestore(&zone->lru_lock, flags); | |
738 | zone = NULL; | |
739 | } | |
740 | __put_compound_page(page); | |
741 | continue; | |
742 | } | |
743 | ||
46453a6e NP |
744 | if (PageLRU(page)) { |
745 | struct zone *pagezone = page_zone(page); | |
894bc310 | 746 | |
46453a6e NP |
747 | if (pagezone != zone) { |
748 | if (zone) | |
902aaed0 HH |
749 | spin_unlock_irqrestore(&zone->lru_lock, |
750 | flags); | |
aabfb572 | 751 | lock_batch = 0; |
46453a6e | 752 | zone = pagezone; |
902aaed0 | 753 | spin_lock_irqsave(&zone->lru_lock, flags); |
46453a6e | 754 | } |
fa9add64 HD |
755 | |
756 | lruvec = mem_cgroup_page_lruvec(page, zone); | |
309381fe | 757 | VM_BUG_ON_PAGE(!PageLRU(page), page); |
67453911 | 758 | __ClearPageLRU(page); |
fa9add64 | 759 | del_page_from_lru_list(page, lruvec, page_off_lru(page)); |
46453a6e NP |
760 | } |
761 | ||
c53954a0 | 762 | /* Clear Active bit in case of parallel mark_page_accessed */ |
e3741b50 | 763 | __ClearPageActive(page); |
c53954a0 | 764 | |
cc59850e | 765 | list_add(&page->lru, &pages_to_free); |
1da177e4 LT |
766 | } |
767 | if (zone) | |
902aaed0 | 768 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
1da177e4 | 769 | |
747db954 | 770 | mem_cgroup_uncharge_list(&pages_to_free); |
cc59850e | 771 | free_hot_cold_page_list(&pages_to_free, cold); |
1da177e4 | 772 | } |
0be8557b | 773 | EXPORT_SYMBOL(release_pages); |
1da177e4 LT |
774 | |
775 | /* | |
776 | * The pages which we're about to release may be in the deferred lru-addition | |
777 | * queues. That would prevent them from really being freed right now. That's | |
778 | * OK from a correctness point of view but is inefficient - those pages may be | |
779 | * cache-warm and we want to give them back to the page allocator ASAP. | |
780 | * | |
781 | * So __pagevec_release() will drain those queues here. __pagevec_lru_add() | |
782 | * and __pagevec_lru_add_active() call release_pages() directly to avoid | |
783 | * mutual recursion. | |
784 | */ | |
785 | void __pagevec_release(struct pagevec *pvec) | |
786 | { | |
787 | lru_add_drain(); | |
788 | release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); | |
789 | pagevec_reinit(pvec); | |
790 | } | |
7f285701 SF |
791 | EXPORT_SYMBOL(__pagevec_release); |
792 | ||
12d27107 | 793 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
71e3aac0 | 794 | /* used by __split_huge_page_refcount() */ |
fa9add64 | 795 | void lru_add_page_tail(struct page *page, struct page *page_tail, |
5bc7b8ac | 796 | struct lruvec *lruvec, struct list_head *list) |
71e3aac0 | 797 | { |
71e3aac0 | 798 | const int file = 0; |
71e3aac0 | 799 | |
309381fe SL |
800 | VM_BUG_ON_PAGE(!PageHead(page), page); |
801 | VM_BUG_ON_PAGE(PageCompound(page_tail), page); | |
802 | VM_BUG_ON_PAGE(PageLRU(page_tail), page); | |
fa9add64 HD |
803 | VM_BUG_ON(NR_CPUS != 1 && |
804 | !spin_is_locked(&lruvec_zone(lruvec)->lru_lock)); | |
71e3aac0 | 805 | |
5bc7b8ac SL |
806 | if (!list) |
807 | SetPageLRU(page_tail); | |
71e3aac0 | 808 | |
12d27107 HD |
809 | if (likely(PageLRU(page))) |
810 | list_add_tail(&page_tail->lru, &page->lru); | |
5bc7b8ac SL |
811 | else if (list) { |
812 | /* page reclaim is reclaiming a huge page */ | |
813 | get_page(page_tail); | |
814 | list_add_tail(&page_tail->lru, list); | |
815 | } else { | |
12d27107 HD |
816 | struct list_head *list_head; |
817 | /* | |
818 | * Head page has not yet been counted, as an hpage, | |
819 | * so we must account for each subpage individually. | |
820 | * | |
821 | * Use the standard add function to put page_tail on the list, | |
822 | * but then correct its position so they all end up in order. | |
823 | */ | |
e180cf80 | 824 | add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail)); |
12d27107 HD |
825 | list_head = page_tail->lru.prev; |
826 | list_move_tail(&page_tail->lru, list_head); | |
71e3aac0 | 827 | } |
7512102c HD |
828 | |
829 | if (!PageUnevictable(page)) | |
e180cf80 | 830 | update_page_reclaim_stat(lruvec, file, PageActive(page_tail)); |
71e3aac0 | 831 | } |
12d27107 | 832 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
71e3aac0 | 833 | |
fa9add64 HD |
834 | static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, |
835 | void *arg) | |
3dd7ae8e | 836 | { |
13f7f789 MG |
837 | int file = page_is_file_cache(page); |
838 | int active = PageActive(page); | |
839 | enum lru_list lru = page_lru(page); | |
3dd7ae8e | 840 | |
309381fe | 841 | VM_BUG_ON_PAGE(PageLRU(page), page); |
3dd7ae8e SL |
842 | |
843 | SetPageLRU(page); | |
fa9add64 HD |
844 | add_page_to_lru_list(page, lruvec, lru); |
845 | update_page_reclaim_stat(lruvec, file, active); | |
24b7e581 | 846 | trace_mm_lru_insertion(page, lru); |
3dd7ae8e SL |
847 | } |
848 | ||
1da177e4 LT |
849 | /* |
850 | * Add the passed pages to the LRU, then drop the caller's refcount | |
851 | * on them. Reinitialises the caller's pagevec. | |
852 | */ | |
a0b8cab3 | 853 | void __pagevec_lru_add(struct pagevec *pvec) |
1da177e4 | 854 | { |
a0b8cab3 | 855 | pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL); |
1da177e4 | 856 | } |
5095ae83 | 857 | EXPORT_SYMBOL(__pagevec_lru_add); |
1da177e4 | 858 | |
0cd6144a JW |
859 | /** |
860 | * pagevec_lookup_entries - gang pagecache lookup | |
861 | * @pvec: Where the resulting entries are placed | |
862 | * @mapping: The address_space to search | |
863 | * @start: The starting entry index | |
864 | * @nr_entries: The maximum number of entries | |
865 | * @indices: The cache indices corresponding to the entries in @pvec | |
866 | * | |
867 | * pagevec_lookup_entries() will search for and return a group of up | |
868 | * to @nr_entries pages and shadow entries in the mapping. All | |
869 | * entries are placed in @pvec. pagevec_lookup_entries() takes a | |
870 | * reference against actual pages in @pvec. | |
871 | * | |
872 | * The search returns a group of mapping-contiguous entries with | |
873 | * ascending indexes. There may be holes in the indices due to | |
874 | * not-present entries. | |
875 | * | |
876 | * pagevec_lookup_entries() returns the number of entries which were | |
877 | * found. | |
878 | */ | |
879 | unsigned pagevec_lookup_entries(struct pagevec *pvec, | |
880 | struct address_space *mapping, | |
881 | pgoff_t start, unsigned nr_pages, | |
882 | pgoff_t *indices) | |
883 | { | |
884 | pvec->nr = find_get_entries(mapping, start, nr_pages, | |
885 | pvec->pages, indices); | |
886 | return pagevec_count(pvec); | |
887 | } | |
888 | ||
889 | /** | |
890 | * pagevec_remove_exceptionals - pagevec exceptionals pruning | |
891 | * @pvec: The pagevec to prune | |
892 | * | |
893 | * pagevec_lookup_entries() fills both pages and exceptional radix | |
894 | * tree entries into the pagevec. This function prunes all | |
895 | * exceptionals from @pvec without leaving holes, so that it can be | |
896 | * passed on to page-only pagevec operations. | |
897 | */ | |
898 | void pagevec_remove_exceptionals(struct pagevec *pvec) | |
899 | { | |
900 | int i, j; | |
901 | ||
902 | for (i = 0, j = 0; i < pagevec_count(pvec); i++) { | |
903 | struct page *page = pvec->pages[i]; | |
904 | if (!radix_tree_exceptional_entry(page)) | |
905 | pvec->pages[j++] = page; | |
906 | } | |
907 | pvec->nr = j; | |
908 | } | |
909 | ||
1da177e4 LT |
910 | /** |
911 | * pagevec_lookup - gang pagecache lookup | |
912 | * @pvec: Where the resulting pages are placed | |
913 | * @mapping: The address_space to search | |
914 | * @start: The starting page index | |
915 | * @nr_pages: The maximum number of pages | |
916 | * | |
917 | * pagevec_lookup() will search for and return a group of up to @nr_pages pages | |
918 | * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a | |
919 | * reference against the pages in @pvec. | |
920 | * | |
921 | * The search returns a group of mapping-contiguous pages with ascending | |
922 | * indexes. There may be holes in the indices due to not-present pages. | |
923 | * | |
924 | * pagevec_lookup() returns the number of pages which were found. | |
925 | */ | |
926 | unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, | |
927 | pgoff_t start, unsigned nr_pages) | |
928 | { | |
929 | pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); | |
930 | return pagevec_count(pvec); | |
931 | } | |
78539fdf CH |
932 | EXPORT_SYMBOL(pagevec_lookup); |
933 | ||
1da177e4 LT |
934 | unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, |
935 | pgoff_t *index, int tag, unsigned nr_pages) | |
936 | { | |
937 | pvec->nr = find_get_pages_tag(mapping, index, tag, | |
938 | nr_pages, pvec->pages); | |
939 | return pagevec_count(pvec); | |
940 | } | |
7f285701 | 941 | EXPORT_SYMBOL(pagevec_lookup_tag); |
1da177e4 | 942 | |
1da177e4 LT |
943 | /* |
944 | * Perform any setup for the swap system | |
945 | */ | |
946 | void __init swap_setup(void) | |
947 | { | |
4481374c | 948 | unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT); |
e0bf68dd | 949 | #ifdef CONFIG_SWAP |
33806f06 SL |
950 | int i; |
951 | ||
27ba0644 | 952 | for (i = 0; i < MAX_SWAPFILES; i++) |
33806f06 | 953 | spin_lock_init(&swapper_spaces[i].tree_lock); |
e0bf68dd PZ |
954 | #endif |
955 | ||
1da177e4 LT |
956 | /* Use a smaller cluster for small-memory machines */ |
957 | if (megs < 16) | |
958 | page_cluster = 2; | |
959 | else | |
960 | page_cluster = 3; | |
961 | /* | |
962 | * Right now other parts of the system means that we | |
963 | * _really_ don't want to cluster much more | |
964 | */ | |
1da177e4 | 965 | } |