]> Git Repo - linux.git/blame - mm/swap.c
mm/swap.c: serialize memcg changes in pagevec_lru_move_fn
[linux.git] / mm / swap.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * linux/mm/swap.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 */
7
8/*
183ff22b 9 * This file contains the default values for the operation of the
1da177e4 10 * Linux VM subsystem. Fine-tuning documentation can be found in
57043247 11 * Documentation/admin-guide/sysctl/vm.rst.
1da177e4
LT
12 * Started 18.12.91
13 * Swap aging added 23.2.95, Stephen Tweedie.
14 * Buffermem limits added 12.3.98, Rik van Riel.
15 */
16
17#include <linux/mm.h>
18#include <linux/sched.h>
19#include <linux/kernel_stat.h>
20#include <linux/swap.h>
21#include <linux/mman.h>
22#include <linux/pagemap.h>
23#include <linux/pagevec.h>
24#include <linux/init.h>
b95f1b31 25#include <linux/export.h>
1da177e4 26#include <linux/mm_inline.h>
1da177e4 27#include <linux/percpu_counter.h>
3565fce3 28#include <linux/memremap.h>
1da177e4
LT
29#include <linux/percpu.h>
30#include <linux/cpu.h>
31#include <linux/notifier.h>
e0bf68dd 32#include <linux/backing-dev.h>
66e1707b 33#include <linux/memcontrol.h>
5a0e3ad6 34#include <linux/gfp.h>
a27bb332 35#include <linux/uio.h>
822fc613 36#include <linux/hugetlb.h>
33c3fc71 37#include <linux/page_idle.h>
b01b2141 38#include <linux/local_lock.h>
1da177e4 39
64d6519d
LS
40#include "internal.h"
41
c6286c98
MG
42#define CREATE_TRACE_POINTS
43#include <trace/events/pagemap.h>
44
1da177e4
LT
45/* How many pages do we try to swap or page in/out together? */
46int page_cluster;
47
b01b2141
IM
48/* Protecting only lru_rotate.pvec which requires disabling interrupts */
49struct lru_rotate {
50 local_lock_t lock;
51 struct pagevec pvec;
52};
53static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = {
54 .lock = INIT_LOCAL_LOCK(lock),
55};
56
57/*
58 * The following struct pagevec are grouped together because they are protected
59 * by disabling preemption (and interrupts remain enabled).
60 */
61struct lru_pvecs {
62 local_lock_t lock;
63 struct pagevec lru_add;
64 struct pagevec lru_deactivate_file;
65 struct pagevec lru_deactivate;
66 struct pagevec lru_lazyfree;
a4a921aa 67#ifdef CONFIG_SMP
b01b2141 68 struct pagevec activate_page;
a4a921aa 69#endif
b01b2141
IM
70};
71static DEFINE_PER_CPU(struct lru_pvecs, lru_pvecs) = {
72 .lock = INIT_LOCAL_LOCK(lock),
73};
902aaed0 74
b221385b
AB
75/*
76 * This path almost never happens for VM activity - pages are normally
77 * freed via pagevecs. But it gets used by networking.
78 */
920c7a5d 79static void __page_cache_release(struct page *page)
b221385b
AB
80{
81 if (PageLRU(page)) {
f4b7e272 82 pg_data_t *pgdat = page_pgdat(page);
fa9add64
HD
83 struct lruvec *lruvec;
84 unsigned long flags;
b221385b 85
f4b7e272
AR
86 spin_lock_irqsave(&pgdat->lru_lock, flags);
87 lruvec = mem_cgroup_page_lruvec(page, pgdat);
309381fe 88 VM_BUG_ON_PAGE(!PageLRU(page), page);
b221385b 89 __ClearPageLRU(page);
fa9add64 90 del_page_from_lru_list(page, lruvec, page_off_lru(page));
f4b7e272 91 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
b221385b 92 }
62906027 93 __ClearPageWaiters(page);
91807063
AA
94}
95
96static void __put_single_page(struct page *page)
97{
98 __page_cache_release(page);
7ae88534 99 mem_cgroup_uncharge(page);
2d4894b5 100 free_unref_page(page);
b221385b
AB
101}
102
91807063 103static void __put_compound_page(struct page *page)
1da177e4 104{
822fc613
NH
105 /*
106 * __page_cache_release() is supposed to be called for thp, not for
107 * hugetlb. This is because hugetlb page does never have PageLRU set
108 * (it's never listed to any LRU lists) and no memcg routines should
109 * be called for hugetlb (it has a separate hugetlb_cgroup.)
110 */
111 if (!PageHuge(page))
112 __page_cache_release(page);
ff45fc3c 113 destroy_compound_page(page);
91807063
AA
114}
115
ddc58f27 116void __put_page(struct page *page)
8519fb30 117{
71389703
DW
118 if (is_zone_device_page(page)) {
119 put_dev_pagemap(page->pgmap);
120
121 /*
122 * The page belongs to the device that created pgmap. Do
123 * not return it to page allocator.
124 */
125 return;
126 }
127
8519fb30 128 if (unlikely(PageCompound(page)))
ddc58f27
KS
129 __put_compound_page(page);
130 else
91807063 131 __put_single_page(page);
1da177e4 132}
ddc58f27 133EXPORT_SYMBOL(__put_page);
70b50f94 134
1d7ea732 135/**
7682486b
RD
136 * put_pages_list() - release a list of pages
137 * @pages: list of pages threaded on page->lru
1d7ea732
AZ
138 *
139 * Release a list of pages which are strung together on page.lru. Currently
140 * used by read_cache_pages() and related error recovery code.
1d7ea732
AZ
141 */
142void put_pages_list(struct list_head *pages)
143{
144 while (!list_empty(pages)) {
145 struct page *victim;
146
f86196ea 147 victim = lru_to_page(pages);
1d7ea732 148 list_del(&victim->lru);
09cbfeaf 149 put_page(victim);
1d7ea732
AZ
150 }
151}
152EXPORT_SYMBOL(put_pages_list);
153
18022c5d
MG
154/*
155 * get_kernel_pages() - pin kernel pages in memory
156 * @kiov: An array of struct kvec structures
157 * @nr_segs: number of segments to pin
158 * @write: pinning for read/write, currently ignored
159 * @pages: array that receives pointers to the pages pinned.
160 * Should be at least nr_segs long.
161 *
162 * Returns number of pages pinned. This may be fewer than the number
163 * requested. If nr_pages is 0 or negative, returns 0. If no pages
164 * were pinned, returns -errno. Each page returned must be released
165 * with a put_page() call when it is finished with.
166 */
167int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
168 struct page **pages)
169{
170 int seg;
171
172 for (seg = 0; seg < nr_segs; seg++) {
173 if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
174 return seg;
175
5a178119 176 pages[seg] = kmap_to_page(kiov[seg].iov_base);
09cbfeaf 177 get_page(pages[seg]);
18022c5d
MG
178 }
179
180 return seg;
181}
182EXPORT_SYMBOL_GPL(get_kernel_pages);
183
184/*
185 * get_kernel_page() - pin a kernel page in memory
186 * @start: starting kernel address
187 * @write: pinning for read/write, currently ignored
188 * @pages: array that receives pointer to the page pinned.
189 * Must be at least nr_segs long.
190 *
191 * Returns 1 if page is pinned. If the page was not pinned, returns
192 * -errno. The page returned must be released with a put_page() call
193 * when it is finished with.
194 */
195int get_kernel_page(unsigned long start, int write, struct page **pages)
196{
197 const struct kvec kiov = {
198 .iov_base = (void *)start,
199 .iov_len = PAGE_SIZE
200 };
201
202 return get_kernel_pages(&kiov, 1, write, pages);
203}
204EXPORT_SYMBOL_GPL(get_kernel_page);
205
3dd7ae8e 206static void pagevec_lru_move_fn(struct pagevec *pvec,
c7c7b80c 207 void (*move_fn)(struct page *page, struct lruvec *lruvec))
902aaed0
HH
208{
209 int i;
68eb0731 210 struct pglist_data *pgdat = NULL;
fa9add64 211 struct lruvec *lruvec;
3dd7ae8e 212 unsigned long flags = 0;
902aaed0
HH
213
214 for (i = 0; i < pagevec_count(pvec); i++) {
215 struct page *page = pvec->pages[i];
68eb0731 216 struct pglist_data *pagepgdat = page_pgdat(page);
902aaed0 217
68eb0731
MG
218 if (pagepgdat != pgdat) {
219 if (pgdat)
220 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
221 pgdat = pagepgdat;
222 spin_lock_irqsave(&pgdat->lru_lock, flags);
902aaed0 223 }
3dd7ae8e 224
fc574c23
AS
225 /* block memcg migration during page moving between lru */
226 if (!TestClearPageLRU(page))
227 continue;
228
68eb0731 229 lruvec = mem_cgroup_page_lruvec(page, pgdat);
c7c7b80c 230 (*move_fn)(page, lruvec);
fc574c23
AS
231
232 SetPageLRU(page);
902aaed0 233 }
68eb0731
MG
234 if (pgdat)
235 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
c6f92f9f 236 release_pages(pvec->pages, pvec->nr);
83896fb5 237 pagevec_reinit(pvec);
d8505dee
SL
238}
239
c7c7b80c 240static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec)
3dd7ae8e 241{
fc574c23 242 if (!PageUnevictable(page)) {
c55e8d03
JW
243 del_page_from_lru_list(page, lruvec, page_lru(page));
244 ClearPageActive(page);
245 add_page_to_lru_list_tail(page, lruvec, page_lru(page));
c7c7b80c 246 __count_vm_events(PGROTATED, thp_nr_pages(page));
3dd7ae8e
SL
247 }
248}
249
1da177e4
LT
250/*
251 * Writeback is about to end against a page which has been marked for immediate
252 * reclaim. If it still appears to be reclaimable, move it to the tail of the
902aaed0 253 * inactive list.
c7c7b80c
AS
254 *
255 * rotate_reclaimable_page() must disable IRQs, to prevent nasty races.
1da177e4 256 */
3dd7ae8e 257void rotate_reclaimable_page(struct page *page)
1da177e4 258{
c55e8d03 259 if (!PageLocked(page) && !PageDirty(page) &&
894bc310 260 !PageUnevictable(page) && PageLRU(page)) {
ac6aadb2
MS
261 struct pagevec *pvec;
262 unsigned long flags;
263
09cbfeaf 264 get_page(page);
b01b2141
IM
265 local_lock_irqsave(&lru_rotate.lock, flags);
266 pvec = this_cpu_ptr(&lru_rotate.pvec);
8f182270 267 if (!pagevec_add(pvec, page) || PageCompound(page))
c7c7b80c 268 pagevec_lru_move_fn(pvec, pagevec_move_tail_fn);
b01b2141 269 local_unlock_irqrestore(&lru_rotate.lock, flags);
ac6aadb2 270 }
1da177e4
LT
271}
272
96f8bf4f 273void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
3e2f41f1 274{
7cf111bc
JW
275 do {
276 unsigned long lrusize;
75cc3c91 277 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
7cf111bc 278
75cc3c91 279 spin_lock_irq(&pgdat->lru_lock);
7cf111bc 280 /* Record cost event */
96f8bf4f
JW
281 if (file)
282 lruvec->file_cost += nr_pages;
7cf111bc 283 else
96f8bf4f 284 lruvec->anon_cost += nr_pages;
7cf111bc
JW
285
286 /*
287 * Decay previous events
288 *
289 * Because workloads change over time (and to avoid
290 * overflow) we keep these statistics as a floating
291 * average, which ends up weighing recent refaults
292 * more than old ones.
293 */
294 lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) +
295 lruvec_page_state(lruvec, NR_ACTIVE_ANON) +
296 lruvec_page_state(lruvec, NR_INACTIVE_FILE) +
297 lruvec_page_state(lruvec, NR_ACTIVE_FILE);
298
299 if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) {
300 lruvec->file_cost /= 2;
301 lruvec->anon_cost /= 2;
302 }
75cc3c91 303 spin_unlock_irq(&pgdat->lru_lock);
7cf111bc 304 } while ((lruvec = parent_lruvec(lruvec)));
3e2f41f1
KM
305}
306
96f8bf4f
JW
307void lru_note_cost_page(struct page *page)
308{
309 lru_note_cost(mem_cgroup_page_lruvec(page, page_pgdat(page)),
6c357848 310 page_is_file_lru(page), thp_nr_pages(page));
96f8bf4f
JW
311}
312
c7c7b80c 313static void __activate_page(struct page *page, struct lruvec *lruvec)
1da177e4 314{
fc574c23 315 if (!PageActive(page) && !PageUnevictable(page)) {
7a608572 316 int lru = page_lru_base_type(page);
6c357848 317 int nr_pages = thp_nr_pages(page);
744ed144 318
fa9add64 319 del_page_from_lru_list(page, lruvec, lru);
7a608572
LT
320 SetPageActive(page);
321 lru += LRU_ACTIVE;
fa9add64 322 add_page_to_lru_list(page, lruvec, lru);
24b7e581 323 trace_mm_lru_activate(page);
4f98a2fe 324
21e330fc
SB
325 __count_vm_events(PGACTIVATE, nr_pages);
326 __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE,
327 nr_pages);
1da177e4 328 }
eb709b0d
SL
329}
330
331#ifdef CONFIG_SMP
eb709b0d
SL
332static void activate_page_drain(int cpu)
333{
b01b2141 334 struct pagevec *pvec = &per_cpu(lru_pvecs.activate_page, cpu);
eb709b0d
SL
335
336 if (pagevec_count(pvec))
c7c7b80c 337 pagevec_lru_move_fn(pvec, __activate_page);
eb709b0d
SL
338}
339
5fbc4616
CM
340static bool need_activate_page_drain(int cpu)
341{
b01b2141 342 return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0;
5fbc4616
CM
343}
344
cc2828b2 345static void activate_page(struct page *page)
eb709b0d 346{
800d8c63 347 page = compound_head(page);
eb709b0d 348 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
b01b2141 349 struct pagevec *pvec;
eb709b0d 350
b01b2141
IM
351 local_lock(&lru_pvecs.lock);
352 pvec = this_cpu_ptr(&lru_pvecs.activate_page);
09cbfeaf 353 get_page(page);
8f182270 354 if (!pagevec_add(pvec, page) || PageCompound(page))
c7c7b80c 355 pagevec_lru_move_fn(pvec, __activate_page);
b01b2141 356 local_unlock(&lru_pvecs.lock);
eb709b0d
SL
357 }
358}
359
360#else
361static inline void activate_page_drain(int cpu)
362{
363}
364
cc2828b2 365static void activate_page(struct page *page)
eb709b0d 366{
f4b7e272 367 pg_data_t *pgdat = page_pgdat(page);
eb709b0d 368
800d8c63 369 page = compound_head(page);
f4b7e272 370 spin_lock_irq(&pgdat->lru_lock);
fc574c23
AS
371 if (PageLRU(page))
372 __activate_page(page, mem_cgroup_page_lruvec(page, pgdat));
f4b7e272 373 spin_unlock_irq(&pgdat->lru_lock);
1da177e4 374}
eb709b0d 375#endif
1da177e4 376
059285a2
MG
377static void __lru_cache_activate_page(struct page *page)
378{
b01b2141 379 struct pagevec *pvec;
059285a2
MG
380 int i;
381
b01b2141
IM
382 local_lock(&lru_pvecs.lock);
383 pvec = this_cpu_ptr(&lru_pvecs.lru_add);
384
059285a2
MG
385 /*
386 * Search backwards on the optimistic assumption that the page being
387 * activated has just been added to this pagevec. Note that only
388 * the local pagevec is examined as a !PageLRU page could be in the
389 * process of being released, reclaimed, migrated or on a remote
390 * pagevec that is currently being drained. Furthermore, marking
391 * a remote pagevec's page PageActive potentially hits a race where
392 * a page is marked PageActive just after it is added to the inactive
393 * list causing accounting errors and BUG_ON checks to trigger.
394 */
395 for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
396 struct page *pagevec_page = pvec->pages[i];
397
398 if (pagevec_page == page) {
399 SetPageActive(page);
400 break;
401 }
402 }
403
b01b2141 404 local_unlock(&lru_pvecs.lock);
059285a2
MG
405}
406
1da177e4
LT
407/*
408 * Mark a page as having seen activity.
409 *
410 * inactive,unreferenced -> inactive,referenced
411 * inactive,referenced -> active,unreferenced
412 * active,unreferenced -> active,referenced
eb39d618
HD
413 *
414 * When a newly allocated page is not yet visible, so safe for non-atomic ops,
415 * __SetPageReferenced(page) may be substituted for mark_page_accessed(page).
1da177e4 416 */
920c7a5d 417void mark_page_accessed(struct page *page)
1da177e4 418{
e90309c9 419 page = compound_head(page);
059285a2 420
a1100a74
FW
421 if (!PageReferenced(page)) {
422 SetPageReferenced(page);
423 } else if (PageUnevictable(page)) {
424 /*
425 * Unevictable pages are on the "LRU_UNEVICTABLE" list. But,
426 * this list is never rotated or maintained, so marking an
427 * evictable page accessed has no effect.
428 */
429 } else if (!PageActive(page)) {
059285a2
MG
430 /*
431 * If the page is on the LRU, queue it for activation via
b01b2141 432 * lru_pvecs.activate_page. Otherwise, assume the page is on a
059285a2
MG
433 * pagevec, mark it active and it'll be moved to the active
434 * LRU on the next drain.
435 */
436 if (PageLRU(page))
437 activate_page(page);
438 else
439 __lru_cache_activate_page(page);
1da177e4 440 ClearPageReferenced(page);
cb686883 441 workingset_activation(page);
1da177e4 442 }
33c3fc71
VD
443 if (page_is_idle(page))
444 clear_page_idle(page);
1da177e4 445}
1da177e4
LT
446EXPORT_SYMBOL(mark_page_accessed);
447
f04e9ebb 448/**
c53954a0 449 * lru_cache_add - add a page to a page list
f04e9ebb 450 * @page: the page to be added to the LRU.
2329d375
JZ
451 *
452 * Queue the page for addition to the LRU via pagevec. The decision on whether
453 * to add the page to the [in]active [file|anon] list is deferred until the
454 * pagevec is drained. This gives a chance for the caller of lru_cache_add()
455 * have the page added to the active list using mark_page_accessed().
f04e9ebb 456 */
c53954a0 457void lru_cache_add(struct page *page)
1da177e4 458{
6058eaec
JW
459 struct pagevec *pvec;
460
309381fe
SL
461 VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
462 VM_BUG_ON_PAGE(PageLRU(page), page);
6058eaec
JW
463
464 get_page(page);
465 local_lock(&lru_pvecs.lock);
466 pvec = this_cpu_ptr(&lru_pvecs.lru_add);
467 if (!pagevec_add(pvec, page) || PageCompound(page))
468 __pagevec_lru_add(pvec);
469 local_unlock(&lru_pvecs.lock);
1da177e4 470}
6058eaec 471EXPORT_SYMBOL(lru_cache_add);
1da177e4 472
00501b53 473/**
b518154e 474 * lru_cache_add_inactive_or_unevictable
00501b53
JW
475 * @page: the page to be added to LRU
476 * @vma: vma in which page is mapped for determining reclaimability
477 *
b518154e 478 * Place @page on the inactive or unevictable LRU list, depending on its
12eab428 479 * evictability.
00501b53 480 */
b518154e 481void lru_cache_add_inactive_or_unevictable(struct page *page,
00501b53
JW
482 struct vm_area_struct *vma)
483{
b518154e
JK
484 bool unevictable;
485
00501b53
JW
486 VM_BUG_ON_PAGE(PageLRU(page), page);
487
b518154e
JK
488 unevictable = (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED;
489 if (unlikely(unevictable) && !TestSetPageMlocked(page)) {
0964730b 490 int nr_pages = thp_nr_pages(page);
00501b53
JW
491 /*
492 * We use the irq-unsafe __mod_zone_page_stat because this
493 * counter is not modified from interrupt context, and the pte
494 * lock is held(spinlock), which implies preemption disabled.
495 */
0964730b
HD
496 __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
497 count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
00501b53 498 }
9c4e6b1a 499 lru_cache_add(page);
00501b53
JW
500}
501
31560180
MK
502/*
503 * If the page can not be invalidated, it is moved to the
504 * inactive list to speed up its reclaim. It is moved to the
505 * head of the list, rather than the tail, to give the flusher
506 * threads some time to write it out, as this is much more
507 * effective than the single-page writeout from reclaim.
278df9f4
MK
508 *
509 * If the page isn't page_mapped and dirty/writeback, the page
510 * could reclaim asap using PG_reclaim.
511 *
512 * 1. active, mapped page -> none
513 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
514 * 3. inactive, mapped page -> none
515 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
516 * 5. inactive, clean -> inactive, tail
517 * 6. Others -> none
518 *
519 * In 4, why it moves inactive's head, the VM expects the page would
520 * be write it out by flusher threads as this is much more effective
521 * than the single-page writeout from reclaim.
31560180 522 */
c7c7b80c 523static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
31560180 524{
fbbb602e 525 int lru;
278df9f4 526 bool active;
6c357848 527 int nr_pages = thp_nr_pages(page);
31560180 528
bad49d9c
MK
529 if (PageUnevictable(page))
530 return;
531
31560180
MK
532 /* Some processes are using the page */
533 if (page_mapped(page))
534 return;
535
278df9f4 536 active = PageActive(page);
31560180 537 lru = page_lru_base_type(page);
fa9add64
HD
538
539 del_page_from_lru_list(page, lruvec, lru + active);
31560180
MK
540 ClearPageActive(page);
541 ClearPageReferenced(page);
31560180 542
278df9f4
MK
543 if (PageWriteback(page) || PageDirty(page)) {
544 /*
545 * PG_reclaim could be raced with end_page_writeback
546 * It can make readahead confusing. But race window
547 * is _really_ small and it's non-critical problem.
548 */
e7a1aaf2 549 add_page_to_lru_list(page, lruvec, lru);
278df9f4
MK
550 SetPageReclaim(page);
551 } else {
552 /*
553 * The page's writeback ends up during pagevec
554 * We moves tha page into tail of inactive.
555 */
e7a1aaf2 556 add_page_to_lru_list_tail(page, lruvec, lru);
5d91f31f 557 __count_vm_events(PGROTATED, nr_pages);
278df9f4
MK
558 }
559
21e330fc 560 if (active) {
5d91f31f 561 __count_vm_events(PGDEACTIVATE, nr_pages);
21e330fc
SB
562 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
563 nr_pages);
564 }
31560180
MK
565}
566
c7c7b80c 567static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec)
9c276cc6 568{
fc574c23 569 if (PageActive(page) && !PageUnevictable(page)) {
9c276cc6 570 int lru = page_lru_base_type(page);
6c357848 571 int nr_pages = thp_nr_pages(page);
9c276cc6
MK
572
573 del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
574 ClearPageActive(page);
575 ClearPageReferenced(page);
576 add_page_to_lru_list(page, lruvec, lru);
577
21e330fc
SB
578 __count_vm_events(PGDEACTIVATE, nr_pages);
579 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
580 nr_pages);
9c276cc6
MK
581 }
582}
10853a03 583
c7c7b80c 584static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec)
10853a03 585{
fc574c23 586 if (PageAnon(page) && PageSwapBacked(page) &&
24c92eb7 587 !PageSwapCache(page) && !PageUnevictable(page)) {
f7ad2a6c 588 bool active = PageActive(page);
6c357848 589 int nr_pages = thp_nr_pages(page);
10853a03 590
f7ad2a6c
SL
591 del_page_from_lru_list(page, lruvec,
592 LRU_INACTIVE_ANON + active);
10853a03
MK
593 ClearPageActive(page);
594 ClearPageReferenced(page);
f7ad2a6c 595 /*
9de4f22a
YH
596 * Lazyfree pages are clean anonymous pages. They have
597 * PG_swapbacked flag cleared, to distinguish them from normal
598 * anonymous pages
f7ad2a6c
SL
599 */
600 ClearPageSwapBacked(page);
601 add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
10853a03 602
21e330fc
SB
603 __count_vm_events(PGLAZYFREE, nr_pages);
604 __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE,
605 nr_pages);
10853a03
MK
606 }
607}
608
902aaed0
HH
609/*
610 * Drain pages out of the cpu's pagevecs.
611 * Either "cpu" is the current CPU, and preemption has already been
612 * disabled; or "cpu" is being hot-unplugged, and is already dead.
613 */
f0cb3c76 614void lru_add_drain_cpu(int cpu)
1da177e4 615{
b01b2141 616 struct pagevec *pvec = &per_cpu(lru_pvecs.lru_add, cpu);
1da177e4 617
13f7f789 618 if (pagevec_count(pvec))
a0b8cab3 619 __pagevec_lru_add(pvec);
902aaed0 620
b01b2141 621 pvec = &per_cpu(lru_rotate.pvec, cpu);
7e0cc01e
QC
622 /* Disabling interrupts below acts as a compiler barrier. */
623 if (data_race(pagevec_count(pvec))) {
902aaed0
HH
624 unsigned long flags;
625
626 /* No harm done if a racing interrupt already did this */
b01b2141 627 local_lock_irqsave(&lru_rotate.lock, flags);
c7c7b80c 628 pagevec_lru_move_fn(pvec, pagevec_move_tail_fn);
b01b2141 629 local_unlock_irqrestore(&lru_rotate.lock, flags);
902aaed0 630 }
31560180 631
b01b2141 632 pvec = &per_cpu(lru_pvecs.lru_deactivate_file, cpu);
31560180 633 if (pagevec_count(pvec))
c7c7b80c 634 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn);
eb709b0d 635
b01b2141 636 pvec = &per_cpu(lru_pvecs.lru_deactivate, cpu);
9c276cc6 637 if (pagevec_count(pvec))
c7c7b80c 638 pagevec_lru_move_fn(pvec, lru_deactivate_fn);
9c276cc6 639
b01b2141 640 pvec = &per_cpu(lru_pvecs.lru_lazyfree, cpu);
10853a03 641 if (pagevec_count(pvec))
c7c7b80c 642 pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
10853a03 643
eb709b0d 644 activate_page_drain(cpu);
31560180
MK
645}
646
647/**
cc5993bd 648 * deactivate_file_page - forcefully deactivate a file page
31560180
MK
649 * @page: page to deactivate
650 *
651 * This function hints the VM that @page is a good reclaim candidate,
652 * for example if its invalidation fails due to the page being dirty
653 * or under writeback.
654 */
cc5993bd 655void deactivate_file_page(struct page *page)
31560180 656{
821ed6bb 657 /*
cc5993bd
MK
658 * In a workload with many unevictable page such as mprotect,
659 * unevictable page deactivation for accelerating reclaim is pointless.
821ed6bb
MK
660 */
661 if (PageUnevictable(page))
662 return;
663
31560180 664 if (likely(get_page_unless_zero(page))) {
b01b2141
IM
665 struct pagevec *pvec;
666
667 local_lock(&lru_pvecs.lock);
668 pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file);
31560180 669
8f182270 670 if (!pagevec_add(pvec, page) || PageCompound(page))
c7c7b80c 671 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn);
b01b2141 672 local_unlock(&lru_pvecs.lock);
31560180 673 }
80bfed90
AM
674}
675
9c276cc6
MK
676/*
677 * deactivate_page - deactivate a page
678 * @page: page to deactivate
679 *
680 * deactivate_page() moves @page to the inactive list if @page was on the active
681 * list and was not an unevictable page. This is done to accelerate the reclaim
682 * of @page.
683 */
684void deactivate_page(struct page *page)
685{
686 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
b01b2141 687 struct pagevec *pvec;
9c276cc6 688
b01b2141
IM
689 local_lock(&lru_pvecs.lock);
690 pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate);
9c276cc6
MK
691 get_page(page);
692 if (!pagevec_add(pvec, page) || PageCompound(page))
c7c7b80c 693 pagevec_lru_move_fn(pvec, lru_deactivate_fn);
b01b2141 694 local_unlock(&lru_pvecs.lock);
9c276cc6
MK
695 }
696}
697
10853a03 698/**
f7ad2a6c 699 * mark_page_lazyfree - make an anon page lazyfree
10853a03
MK
700 * @page: page to deactivate
701 *
f7ad2a6c
SL
702 * mark_page_lazyfree() moves @page to the inactive file list.
703 * This is done to accelerate the reclaim of @page.
10853a03 704 */
f7ad2a6c 705void mark_page_lazyfree(struct page *page)
10853a03 706{
f7ad2a6c 707 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
24c92eb7 708 !PageSwapCache(page) && !PageUnevictable(page)) {
b01b2141 709 struct pagevec *pvec;
10853a03 710
b01b2141
IM
711 local_lock(&lru_pvecs.lock);
712 pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree);
09cbfeaf 713 get_page(page);
8f182270 714 if (!pagevec_add(pvec, page) || PageCompound(page))
c7c7b80c 715 pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
b01b2141 716 local_unlock(&lru_pvecs.lock);
10853a03
MK
717 }
718}
719
80bfed90
AM
720void lru_add_drain(void)
721{
b01b2141
IM
722 local_lock(&lru_pvecs.lock);
723 lru_add_drain_cpu(smp_processor_id());
724 local_unlock(&lru_pvecs.lock);
725}
726
727void lru_add_drain_cpu_zone(struct zone *zone)
728{
729 local_lock(&lru_pvecs.lock);
730 lru_add_drain_cpu(smp_processor_id());
731 drain_local_pages(zone);
732 local_unlock(&lru_pvecs.lock);
1da177e4
LT
733}
734
6ea183d6
MH
735#ifdef CONFIG_SMP
736
737static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
738
c4028958 739static void lru_add_drain_per_cpu(struct work_struct *dummy)
053837fc
NP
740{
741 lru_add_drain();
742}
743
9852a721
MH
744/*
745 * Doesn't need any cpu hotplug locking because we do rely on per-cpu
746 * kworkers being shut down before our page_alloc_cpu_dead callback is
747 * executed on the offlined cpu.
748 * Calling this function with cpu hotplug locks held can actually lead
749 * to obscure indirect dependencies via WQ context.
750 */
751void lru_add_drain_all(void)
053837fc 752{
6446a513
AD
753 /*
754 * lru_drain_gen - Global pages generation number
755 *
756 * (A) Definition: global lru_drain_gen = x implies that all generations
757 * 0 < n <= x are already *scheduled* for draining.
758 *
759 * This is an optimization for the highly-contended use case where a
760 * user space workload keeps constantly generating a flow of pages for
761 * each CPU.
762 */
763 static unsigned int lru_drain_gen;
5fbc4616 764 static struct cpumask has_work;
6446a513
AD
765 static DEFINE_MUTEX(lock);
766 unsigned cpu, this_gen;
5fbc4616 767
ce612879
MH
768 /*
769 * Make sure nobody triggers this path before mm_percpu_wq is fully
770 * initialized.
771 */
772 if (WARN_ON(!mm_percpu_wq))
773 return;
774
6446a513
AD
775 /*
776 * Guarantee pagevec counter stores visible by this CPU are visible to
777 * other CPUs before loading the current drain generation.
778 */
779 smp_mb();
780
781 /*
782 * (B) Locally cache global LRU draining generation number
783 *
784 * The read barrier ensures that the counter is loaded before the mutex
785 * is taken. It pairs with smp_mb() inside the mutex critical section
786 * at (D).
787 */
788 this_gen = smp_load_acquire(&lru_drain_gen);
eef1a429 789
5fbc4616 790 mutex_lock(&lock);
eef1a429
KK
791
792 /*
6446a513
AD
793 * (C) Exit the draining operation if a newer generation, from another
794 * lru_add_drain_all(), was already scheduled for draining. Check (A).
eef1a429 795 */
6446a513 796 if (unlikely(this_gen != lru_drain_gen))
eef1a429
KK
797 goto done;
798
6446a513
AD
799 /*
800 * (D) Increment global generation number
801 *
802 * Pairs with smp_load_acquire() at (B), outside of the critical
803 * section. Use a full memory barrier to guarantee that the new global
804 * drain generation number is stored before loading pagevec counters.
805 *
806 * This pairing must be done here, before the for_each_online_cpu loop
807 * below which drains the page vectors.
808 *
809 * Let x, y, and z represent some system CPU numbers, where x < y < z.
810 * Assume CPU #z is is in the middle of the for_each_online_cpu loop
811 * below and has already reached CPU #y's per-cpu data. CPU #x comes
812 * along, adds some pages to its per-cpu vectors, then calls
813 * lru_add_drain_all().
814 *
815 * If the paired barrier is done at any later step, e.g. after the
816 * loop, CPU #x will just exit at (C) and miss flushing out all of its
817 * added pages.
818 */
819 WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1);
820 smp_mb();
eef1a429 821
5fbc4616 822 cpumask_clear(&has_work);
5fbc4616
CM
823 for_each_online_cpu(cpu) {
824 struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
825
b01b2141 826 if (pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
7e0cc01e 827 data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) ||
b01b2141
IM
828 pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
829 pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
830 pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) ||
5fbc4616
CM
831 need_activate_page_drain(cpu)) {
832 INIT_WORK(work, lru_add_drain_per_cpu);
ce612879 833 queue_work_on(cpu, mm_percpu_wq, work);
6446a513 834 __cpumask_set_cpu(cpu, &has_work);
5fbc4616
CM
835 }
836 }
837
838 for_each_cpu(cpu, &has_work)
839 flush_work(&per_cpu(lru_add_drain_work, cpu));
840
eef1a429 841done:
5fbc4616 842 mutex_unlock(&lock);
053837fc 843}
6ea183d6
MH
844#else
845void lru_add_drain_all(void)
846{
847 lru_add_drain();
848}
6446a513 849#endif /* CONFIG_SMP */
053837fc 850
aabfb572 851/**
ea1754a0 852 * release_pages - batched put_page()
aabfb572
MH
853 * @pages: array of pages to release
854 * @nr: number of pages
1da177e4 855 *
aabfb572
MH
856 * Decrement the reference count on all the pages in @pages. If it
857 * fell to zero, remove the page from the LRU and free it.
1da177e4 858 */
c6f92f9f 859void release_pages(struct page **pages, int nr)
1da177e4
LT
860{
861 int i;
cc59850e 862 LIST_HEAD(pages_to_free);
599d0c95 863 struct pglist_data *locked_pgdat = NULL;
fa9add64 864 struct lruvec *lruvec;
3f649ab7
KC
865 unsigned long flags;
866 unsigned int lock_batch;
1da177e4 867
1da177e4
LT
868 for (i = 0; i < nr; i++) {
869 struct page *page = pages[i];
1da177e4 870
aabfb572
MH
871 /*
872 * Make sure the IRQ-safe lock-holding time does not get
873 * excessive with a continuous string of pages from the
599d0c95 874 * same pgdat. The lock is held only if pgdat != NULL.
aabfb572 875 */
599d0c95
MG
876 if (locked_pgdat && ++lock_batch == SWAP_CLUSTER_MAX) {
877 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
878 locked_pgdat = NULL;
aabfb572
MH
879 }
880
a9b576f7 881 page = compound_head(page);
6fcb52a5 882 if (is_huge_zero_page(page))
aa88b68c 883 continue;
aa88b68c 884
c5d6c45e 885 if (is_zone_device_page(page)) {
df6ad698
JG
886 if (locked_pgdat) {
887 spin_unlock_irqrestore(&locked_pgdat->lru_lock,
888 flags);
889 locked_pgdat = NULL;
890 }
c5d6c45e
IW
891 /*
892 * ZONE_DEVICE pages that return 'false' from
a3e7bea0 893 * page_is_devmap_managed() do not require special
c5d6c45e
IW
894 * processing, and instead, expect a call to
895 * put_page_testzero().
896 */
07d80269
JH
897 if (page_is_devmap_managed(page)) {
898 put_devmap_managed_page(page);
c5d6c45e 899 continue;
07d80269 900 }
43fbdeb3
RC
901 if (put_page_testzero(page))
902 put_dev_pagemap(page->pgmap);
903 continue;
df6ad698
JG
904 }
905
b5810039 906 if (!put_page_testzero(page))
1da177e4
LT
907 continue;
908
ddc58f27 909 if (PageCompound(page)) {
599d0c95
MG
910 if (locked_pgdat) {
911 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
912 locked_pgdat = NULL;
ddc58f27
KS
913 }
914 __put_compound_page(page);
915 continue;
916 }
917
46453a6e 918 if (PageLRU(page)) {
599d0c95 919 struct pglist_data *pgdat = page_pgdat(page);
894bc310 920
599d0c95
MG
921 if (pgdat != locked_pgdat) {
922 if (locked_pgdat)
923 spin_unlock_irqrestore(&locked_pgdat->lru_lock,
902aaed0 924 flags);
aabfb572 925 lock_batch = 0;
599d0c95
MG
926 locked_pgdat = pgdat;
927 spin_lock_irqsave(&locked_pgdat->lru_lock, flags);
46453a6e 928 }
fa9add64 929
599d0c95 930 lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
309381fe 931 VM_BUG_ON_PAGE(!PageLRU(page), page);
67453911 932 __ClearPageLRU(page);
fa9add64 933 del_page_from_lru_list(page, lruvec, page_off_lru(page));
46453a6e
NP
934 }
935
62906027 936 __ClearPageWaiters(page);
c53954a0 937
cc59850e 938 list_add(&page->lru, &pages_to_free);
1da177e4 939 }
599d0c95
MG
940 if (locked_pgdat)
941 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
1da177e4 942
747db954 943 mem_cgroup_uncharge_list(&pages_to_free);
2d4894b5 944 free_unref_page_list(&pages_to_free);
1da177e4 945}
0be8557b 946EXPORT_SYMBOL(release_pages);
1da177e4
LT
947
948/*
949 * The pages which we're about to release may be in the deferred lru-addition
950 * queues. That would prevent them from really being freed right now. That's
951 * OK from a correctness point of view but is inefficient - those pages may be
952 * cache-warm and we want to give them back to the page allocator ASAP.
953 *
954 * So __pagevec_release() will drain those queues here. __pagevec_lru_add()
955 * and __pagevec_lru_add_active() call release_pages() directly to avoid
956 * mutual recursion.
957 */
958void __pagevec_release(struct pagevec *pvec)
959{
7f0b5fb9 960 if (!pvec->percpu_pvec_drained) {
d9ed0d08 961 lru_add_drain();
7f0b5fb9 962 pvec->percpu_pvec_drained = true;
d9ed0d08 963 }
c6f92f9f 964 release_pages(pvec->pages, pagevec_count(pvec));
1da177e4
LT
965 pagevec_reinit(pvec);
966}
7f285701
SF
967EXPORT_SYMBOL(__pagevec_release);
968
c7c7b80c 969static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec)
3dd7ae8e 970{
9c4e6b1a
SB
971 enum lru_list lru;
972 int was_unevictable = TestClearPageUnevictable(page);
6c357848 973 int nr_pages = thp_nr_pages(page);
3dd7ae8e 974
309381fe 975 VM_BUG_ON_PAGE(PageLRU(page), page);
3dd7ae8e 976
9c4e6b1a
SB
977 /*
978 * Page becomes evictable in two ways:
dae966dc 979 * 1) Within LRU lock [munlock_vma_page() and __munlock_pagevec()].
9c4e6b1a
SB
980 * 2) Before acquiring LRU lock to put the page to correct LRU and then
981 * a) do PageLRU check with lock [check_move_unevictable_pages]
982 * b) do PageLRU check before lock [clear_page_mlock]
983 *
984 * (1) & (2a) are ok as LRU lock will serialize them. For (2b), we need
985 * following strict ordering:
986 *
987 * #0: __pagevec_lru_add_fn #1: clear_page_mlock
988 *
989 * SetPageLRU() TestClearPageMlocked()
990 * smp_mb() // explicit ordering // above provides strict
991 * // ordering
992 * PageMlocked() PageLRU()
993 *
994 *
995 * if '#1' does not observe setting of PG_lru by '#0' and fails
996 * isolation, the explicit barrier will make sure that page_evictable
997 * check will put the page in correct LRU. Without smp_mb(), SetPageLRU
998 * can be reordered after PageMlocked check and can make '#1' to fail
999 * the isolation of the page whose Mlocked bit is cleared (#0 is also
1000 * looking at the same page) and the evictable page will be stranded
1001 * in an unevictable LRU.
1002 */
9a9b6cce
YS
1003 SetPageLRU(page);
1004 smp_mb__after_atomic();
9c4e6b1a
SB
1005
1006 if (page_evictable(page)) {
1007 lru = page_lru(page);
9c4e6b1a 1008 if (was_unevictable)
5d91f31f 1009 __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
9c4e6b1a
SB
1010 } else {
1011 lru = LRU_UNEVICTABLE;
1012 ClearPageActive(page);
1013 SetPageUnevictable(page);
1014 if (!was_unevictable)
5d91f31f 1015 __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
9c4e6b1a
SB
1016 }
1017
fa9add64 1018 add_page_to_lru_list(page, lruvec, lru);
24b7e581 1019 trace_mm_lru_insertion(page, lru);
3dd7ae8e
SL
1020}
1021
1da177e4
LT
1022/*
1023 * Add the passed pages to the LRU, then drop the caller's refcount
1024 * on them. Reinitialises the caller's pagevec.
1025 */
a0b8cab3 1026void __pagevec_lru_add(struct pagevec *pvec)
1da177e4 1027{
fc574c23
AS
1028 int i;
1029 struct pglist_data *pgdat = NULL;
1030 struct lruvec *lruvec;
1031 unsigned long flags = 0;
1032
1033 for (i = 0; i < pagevec_count(pvec); i++) {
1034 struct page *page = pvec->pages[i];
1035 struct pglist_data *pagepgdat = page_pgdat(page);
1036
1037 if (pagepgdat != pgdat) {
1038 if (pgdat)
1039 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
1040 pgdat = pagepgdat;
1041 spin_lock_irqsave(&pgdat->lru_lock, flags);
1042 }
1043
1044 lruvec = mem_cgroup_page_lruvec(page, pgdat);
1045 __pagevec_lru_add_fn(page, lruvec);
1046 }
1047 if (pgdat)
1048 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
1049 release_pages(pvec->pages, pvec->nr);
1050 pagevec_reinit(pvec);
1da177e4 1051}
1da177e4 1052
0cd6144a
JW
1053/**
1054 * pagevec_lookup_entries - gang pagecache lookup
1055 * @pvec: Where the resulting entries are placed
1056 * @mapping: The address_space to search
1057 * @start: The starting entry index
cb6f0f34 1058 * @nr_entries: The maximum number of pages
0cd6144a
JW
1059 * @indices: The cache indices corresponding to the entries in @pvec
1060 *
1061 * pagevec_lookup_entries() will search for and return a group of up
f144c390 1062 * to @nr_pages pages and shadow entries in the mapping. All
0cd6144a
JW
1063 * entries are placed in @pvec. pagevec_lookup_entries() takes a
1064 * reference against actual pages in @pvec.
1065 *
1066 * The search returns a group of mapping-contiguous entries with
1067 * ascending indexes. There may be holes in the indices due to
1068 * not-present entries.
1069 *
71725ed1
HD
1070 * Only one subpage of a Transparent Huge Page is returned in one call:
1071 * allowing truncate_inode_pages_range() to evict the whole THP without
1072 * cycling through a pagevec of extra references.
1073 *
0cd6144a
JW
1074 * pagevec_lookup_entries() returns the number of entries which were
1075 * found.
1076 */
1077unsigned pagevec_lookup_entries(struct pagevec *pvec,
1078 struct address_space *mapping,
e02a9f04 1079 pgoff_t start, unsigned nr_entries,
0cd6144a
JW
1080 pgoff_t *indices)
1081{
e02a9f04 1082 pvec->nr = find_get_entries(mapping, start, nr_entries,
0cd6144a
JW
1083 pvec->pages, indices);
1084 return pagevec_count(pvec);
1085}
1086
1087/**
1088 * pagevec_remove_exceptionals - pagevec exceptionals pruning
1089 * @pvec: The pagevec to prune
1090 *
1091 * pagevec_lookup_entries() fills both pages and exceptional radix
1092 * tree entries into the pagevec. This function prunes all
1093 * exceptionals from @pvec without leaving holes, so that it can be
1094 * passed on to page-only pagevec operations.
1095 */
1096void pagevec_remove_exceptionals(struct pagevec *pvec)
1097{
1098 int i, j;
1099
1100 for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
1101 struct page *page = pvec->pages[i];
3159f943 1102 if (!xa_is_value(page))
0cd6144a
JW
1103 pvec->pages[j++] = page;
1104 }
1105 pvec->nr = j;
1106}
1107
1da177e4 1108/**
b947cee4 1109 * pagevec_lookup_range - gang pagecache lookup
1da177e4
LT
1110 * @pvec: Where the resulting pages are placed
1111 * @mapping: The address_space to search
1112 * @start: The starting page index
b947cee4 1113 * @end: The final page index
1da177e4 1114 *
e02a9f04 1115 * pagevec_lookup_range() will search for & return a group of up to PAGEVEC_SIZE
b947cee4
JK
1116 * pages in the mapping starting from index @start and upto index @end
1117 * (inclusive). The pages are placed in @pvec. pagevec_lookup() takes a
1da177e4
LT
1118 * reference against the pages in @pvec.
1119 *
1120 * The search returns a group of mapping-contiguous pages with ascending
d72dc8a2
JK
1121 * indexes. There may be holes in the indices due to not-present pages. We
1122 * also update @start to index the next page for the traversal.
1da177e4 1123 *
b947cee4 1124 * pagevec_lookup_range() returns the number of pages which were found. If this
e02a9f04 1125 * number is smaller than PAGEVEC_SIZE, the end of specified range has been
b947cee4 1126 * reached.
1da177e4 1127 */
b947cee4 1128unsigned pagevec_lookup_range(struct pagevec *pvec,
397162ff 1129 struct address_space *mapping, pgoff_t *start, pgoff_t end)
1da177e4 1130{
397162ff 1131 pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE,
b947cee4 1132 pvec->pages);
1da177e4
LT
1133 return pagevec_count(pvec);
1134}
b947cee4 1135EXPORT_SYMBOL(pagevec_lookup_range);
78539fdf 1136
72b045ae
JK
1137unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
1138 struct address_space *mapping, pgoff_t *index, pgoff_t end,
10bbd235 1139 xa_mark_t tag)
1da177e4 1140{
72b045ae 1141 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
67fd707f 1142 PAGEVEC_SIZE, pvec->pages);
1da177e4
LT
1143 return pagevec_count(pvec);
1144}
72b045ae 1145EXPORT_SYMBOL(pagevec_lookup_range_tag);
1da177e4 1146
1da177e4
LT
1147/*
1148 * Perform any setup for the swap system
1149 */
1150void __init swap_setup(void)
1151{
ca79b0c2 1152 unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT);
e0bf68dd 1153
1da177e4
LT
1154 /* Use a smaller cluster for small-memory machines */
1155 if (megs < 16)
1156 page_cluster = 2;
1157 else
1158 page_cluster = 3;
1159 /*
1160 * Right now other parts of the system means that we
1161 * _really_ don't want to cluster much more
1162 */
1da177e4 1163}
07d80269
JH
1164
1165#ifdef CONFIG_DEV_PAGEMAP_OPS
1166void put_devmap_managed_page(struct page *page)
1167{
1168 int count;
1169
1170 if (WARN_ON_ONCE(!page_is_devmap_managed(page)))
1171 return;
1172
1173 count = page_ref_dec_return(page);
1174
1175 /*
1176 * devmap page refcounts are 1-based, rather than 0-based: if
1177 * refcount is 1, then the page is free and the refcount is
1178 * stable because nobody holds a reference on the page.
1179 */
1180 if (count == 1)
1181 free_devmap_managed_page(page);
1182 else if (!count)
1183 __put_page(page);
1184}
1185EXPORT_SYMBOL(put_devmap_managed_page);
1186#endif
This page took 1.314472 seconds and 4 git commands to generate.