]> Git Repo - linux.git/blame - mm/swap_state.c
mm/khugepaged: convert release_pte_pages() to use folios
[linux.git] / mm / swap_state.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * linux/mm/swap_state.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie
7 *
8 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
9 */
1da177e4 10#include <linux/mm.h>
5a0e3ad6 11#include <linux/gfp.h>
1da177e4
LT
12#include <linux/kernel_stat.h>
13#include <linux/swap.h>
46017e95 14#include <linux/swapops.h>
1da177e4
LT
15#include <linux/init.h>
16#include <linux/pagemap.h>
1da177e4 17#include <linux/backing-dev.h>
3fb5c298 18#include <linux/blkdev.h>
c484d410 19#include <linux/pagevec.h>
b20a3503 20#include <linux/migrate.h>
4b3ef9da 21#include <linux/vmalloc.h>
67afa38e 22#include <linux/swap_slots.h>
38d8b4e6 23#include <linux/huge_mm.h>
61ef1865 24#include <linux/shmem_fs.h>
243bce09 25#include "internal.h"
014bb1de 26#include "swap.h"
1da177e4
LT
27
28/*
29 * swapper_space is a fiction, retained to simplify the path through
7eaceacc 30 * vmscan's shrink_page_list.
1da177e4 31 */
f5e54d6e 32static const struct address_space_operations swap_aops = {
1da177e4 33 .writepage = swap_writepage,
4c4a7634 34 .dirty_folio = noop_dirty_folio,
1c93923c 35#ifdef CONFIG_MIGRATION
54184650 36 .migrate_folio = migrate_folio,
1c93923c 37#endif
1da177e4
LT
38};
39
783cb68e
CD
40struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
41static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
f5c754d6 42static bool enable_vma_readahead __read_mostly = true;
ec560175 43
ec560175
YH
44#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
45#define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
46#define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
47#define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
48
49#define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK)
50#define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
51#define SWAP_RA_ADDR(v) ((v) & PAGE_MASK)
52
53#define SWAP_RA_VAL(addr, win, hits) \
54 (((addr) & PAGE_MASK) | \
55 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \
56 ((hits) & SWAP_RA_HITS_MASK))
57
58/* Initial readahead hits is 4 to start up with a small window */
59#define GET_SWAP_RA_VAL(vma) \
60 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
1da177e4 61
579f8290
SL
62static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
63
1da177e4
LT
64void show_swap_cache_info(void)
65{
33806f06 66 printk("%lu pages in swap cache\n", total_swapcache_pages());
ec8acf20
SL
67 printk("Free swap = %ldkB\n",
68 get_nr_swap_pages() << (PAGE_SHIFT - 10));
1da177e4
LT
69 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
70}
71
aae466b0
JK
72void *get_shadow_from_swap_cache(swp_entry_t entry)
73{
74 struct address_space *address_space = swap_address_space(entry);
75 pgoff_t idx = swp_offset(entry);
76 struct page *page;
77
8c647dd1 78 page = xa_load(&address_space->i_pages, idx);
aae466b0
JK
79 if (xa_is_value(page))
80 return page;
aae466b0
JK
81 return NULL;
82}
83
1da177e4 84/*
2bb876b5 85 * add_to_swap_cache resembles filemap_add_folio on swapper_space,
1da177e4
LT
86 * but sets SwapCache flag and private instead of mapping and index.
87 */
a4c366f0 88int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
3852f676 89 gfp_t gfp, void **shadowp)
1da177e4 90{
8d93b41c 91 struct address_space *address_space = swap_address_space(entry);
38d8b4e6 92 pgoff_t idx = swp_offset(entry);
a4c366f0
MWO
93 XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio));
94 unsigned long i, nr = folio_nr_pages(folio);
3852f676 95 void *old;
1da177e4 96
a4c366f0
MWO
97 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
98 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
99 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
51726b12 100
a4c366f0
MWO
101 folio_ref_add(folio, nr);
102 folio_set_swapcache(folio);
31a56396 103
8d93b41c
MW
104 do {
105 xas_lock_irq(&xas);
106 xas_create_range(&xas);
107 if (xas_error(&xas))
108 goto unlock;
109 for (i = 0; i < nr; i++) {
a4c366f0 110 VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
3852f676
JK
111 old = xas_load(&xas);
112 if (xa_is_value(old)) {
3852f676
JK
113 if (shadowp)
114 *shadowp = old;
115 }
a4c366f0
MWO
116 set_page_private(folio_page(folio, i), entry.val + i);
117 xas_store(&xas, folio);
8d93b41c
MW
118 xas_next(&xas);
119 }
38d8b4e6 120 address_space->nrpages += nr;
a4c366f0
MWO
121 __node_stat_mod_folio(folio, NR_FILE_PAGES, nr);
122 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr);
8d93b41c
MW
123unlock:
124 xas_unlock_irq(&xas);
125 } while (xas_nomem(&xas, gfp));
31a56396 126
8d93b41c
MW
127 if (!xas_error(&xas))
128 return 0;
31a56396 129
a4c366f0
MWO
130 folio_clear_swapcache(folio);
131 folio_ref_sub(folio, nr);
8d93b41c 132 return xas_error(&xas);
1da177e4
LT
133}
134
1da177e4 135/*
ceff9d33 136 * This must be called only on folios that have
1da177e4
LT
137 * been verified to be in the swap cache.
138 */
ceff9d33 139void __delete_from_swap_cache(struct folio *folio,
3852f676 140 swp_entry_t entry, void *shadow)
1da177e4 141{
4e17ec25 142 struct address_space *address_space = swap_address_space(entry);
ceff9d33
MWO
143 int i;
144 long nr = folio_nr_pages(folio);
4e17ec25
MW
145 pgoff_t idx = swp_offset(entry);
146 XA_STATE(xas, &address_space->i_pages, idx);
33806f06 147
ceff9d33
MWO
148 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
149 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
150 VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
1da177e4 151
38d8b4e6 152 for (i = 0; i < nr; i++) {
3852f676 153 void *entry = xas_store(&xas, shadow);
b9eb7776 154 VM_BUG_ON_PAGE(entry != folio, entry);
ceff9d33 155 set_page_private(folio_page(folio, i), 0);
4e17ec25 156 xas_next(&xas);
38d8b4e6 157 }
ceff9d33 158 folio_clear_swapcache(folio);
38d8b4e6 159 address_space->nrpages -= nr;
ceff9d33
MWO
160 __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
161 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
1da177e4
LT
162}
163
164/**
09c02e56
MWO
165 * add_to_swap - allocate swap space for a folio
166 * @folio: folio we want to move to swap
1da177e4 167 *
09c02e56
MWO
168 * Allocate swap space for the folio and add the folio to the
169 * swap cache.
170 *
171 * Context: Caller needs to hold the folio lock.
172 * Return: Whether the folio was added to the swap cache.
1da177e4 173 */
09c02e56 174bool add_to_swap(struct folio *folio)
1da177e4
LT
175{
176 swp_entry_t entry;
1da177e4
LT
177 int err;
178
09c02e56
MWO
179 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
180 VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
1da177e4 181
e2e3fdc7 182 entry = folio_alloc_swap(folio);
2ca4532a 183 if (!entry.val)
09c02e56 184 return false;
0f074658 185
2ca4532a 186 /*
8d93b41c 187 * XArray node allocations from PF_MEMALLOC contexts could
2ca4532a
DN
188 * completely exhaust the page allocator. __GFP_NOMEMALLOC
189 * stops emergency reserves from being allocated.
190 *
191 * TODO: this could cause a theoretical memory reclaim
192 * deadlock in the swap out path.
193 */
194 /*
854e9ed0 195 * Add it to the swap cache.
2ca4532a 196 */
a4c366f0 197 err = add_to_swap_cache(folio, entry,
3852f676 198 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
38d8b4e6 199 if (err)
bd53b714 200 /*
2ca4532a
DN
201 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
202 * clear SWAP_HAS_CACHE flag.
1da177e4 203 */
0f074658 204 goto fail;
9625456c 205 /*
09c02e56
MWO
206 * Normally the folio will be dirtied in unmap because its
207 * pte should be dirty. A special case is MADV_FREE page. The
208 * page's pte could have dirty bit cleared but the folio's
209 * SwapBacked flag is still set because clearing the dirty bit
210 * and SwapBacked flag has no lock protected. For such folio,
211 * unmap will not set dirty bit for it, so folio reclaim will
212 * not write the folio out. This can cause data corruption when
213 * the folio is swapped in later. Always setting the dirty flag
214 * for the folio solves the problem.
9625456c 215 */
09c02e56 216 folio_mark_dirty(folio);
38d8b4e6 217
09c02e56 218 return true;
38d8b4e6 219
38d8b4e6 220fail:
4081f744 221 put_swap_folio(folio, entry);
09c02e56 222 return false;
1da177e4
LT
223}
224
225/*
75fa68a5 226 * This must be called only on folios that have
1da177e4 227 * been verified to be in the swap cache and locked.
75fa68a5
MWO
228 * It will never put the folio into the free list,
229 * the caller has a reference on the folio.
1da177e4 230 */
75fa68a5 231void delete_from_swap_cache(struct folio *folio)
1da177e4 232{
75fa68a5 233 swp_entry_t entry = folio_swap_entry(folio);
4e17ec25 234 struct address_space *address_space = swap_address_space(entry);
1da177e4 235
b93b0163 236 xa_lock_irq(&address_space->i_pages);
ceff9d33 237 __delete_from_swap_cache(folio, entry, NULL);
b93b0163 238 xa_unlock_irq(&address_space->i_pages);
1da177e4 239
4081f744 240 put_swap_folio(folio, entry);
75fa68a5 241 folio_ref_sub(folio, folio_nr_pages(folio));
1da177e4
LT
242}
243
3852f676
JK
244void clear_shadow_from_swap_cache(int type, unsigned long begin,
245 unsigned long end)
246{
247 unsigned long curr = begin;
248 void *old;
249
250 for (;;) {
3852f676
JK
251 swp_entry_t entry = swp_entry(type, curr);
252 struct address_space *address_space = swap_address_space(entry);
253 XA_STATE(xas, &address_space->i_pages, curr);
254
255 xa_lock_irq(&address_space->i_pages);
256 xas_for_each(&xas, old, end) {
257 if (!xa_is_value(old))
258 continue;
259 xas_store(&xas, NULL);
3852f676 260 }
3852f676
JK
261 xa_unlock_irq(&address_space->i_pages);
262
263 /* search the next swapcache until we meet end */
264 curr >>= SWAP_ADDRESS_SPACE_SHIFT;
265 curr++;
266 curr <<= SWAP_ADDRESS_SPACE_SHIFT;
267 if (curr > end)
268 break;
269 }
270}
271
1da177e4
LT
272/*
273 * If we are the only user, then try to free up the swap cache.
274 *
aedd74d4 275 * Its ok to check the swapcache flag without the folio lock
a2c43eed 276 * here because we are going to recheck again inside
aedd74d4 277 * folio_free_swap() _with_ the lock.
1da177e4
LT
278 * - Marcelo
279 */
f4c4a3f4 280void free_swap_cache(struct page *page)
1da177e4 281{
aedd74d4
MWO
282 struct folio *folio = page_folio(page);
283
284 if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
285 folio_trylock(folio)) {
286 folio_free_swap(folio);
287 folio_unlock(folio);
1da177e4
LT
288 }
289}
290
291/*
292 * Perform a free_page(), also freeing any swap cache associated with
b8072f09 293 * this page if it is the last user of the page.
1da177e4
LT
294 */
295void free_page_and_swap_cache(struct page *page)
296{
297 free_swap_cache(page);
6fcb52a5 298 if (!is_huge_zero_page(page))
770a5370 299 put_page(page);
1da177e4
LT
300}
301
302/*
303 * Passed an array of pages, drop them all from swapcache and then release
304 * them. They are removed from the LRU and freed if this is their last use.
305 */
7cc8f9c7 306void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
1da177e4 307{
1da177e4 308 lru_add_drain();
7cc8f9c7
LT
309 for (int i = 0; i < nr; i++)
310 free_swap_cache(encoded_page_ptr(pages[i]));
311 release_pages(pages, nr);
1da177e4
LT
312}
313
e9e9b7ec
MK
314static inline bool swap_use_vma_readahead(void)
315{
316 return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
317}
318
1da177e4 319/*
c9edc242 320 * Lookup a swap entry in the swap cache. A found folio will be returned
1da177e4 321 * unlocked and with its refcount incremented - we rely on the kernel
c9edc242 322 * lock getting page table operations atomic even if we drop the folio
1da177e4 323 * lock before returning.
cbc2bd98
KS
324 *
325 * Caller must lock the swap device or hold a reference to keep it valid.
1da177e4 326 */
c9edc242
MWO
327struct folio *swap_cache_get_folio(swp_entry_t entry,
328 struct vm_area_struct *vma, unsigned long addr)
1da177e4 329{
c9edc242 330 struct folio *folio;
1da177e4 331
c9edc242 332 folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry));
c9edc242 333 if (folio) {
eaf649eb
MK
334 bool vma_ra = swap_use_vma_readahead();
335 bool readahead;
336
eaf649eb
MK
337 /*
338 * At the moment, we don't support PG_readahead for anon THP
339 * so let's bail out rather than confusing the readahead stat.
340 */
c9edc242
MWO
341 if (unlikely(folio_test_large(folio)))
342 return folio;
eaf649eb 343
c9edc242 344 readahead = folio_test_clear_readahead(folio);
eaf649eb
MK
345 if (vma && vma_ra) {
346 unsigned long ra_val;
347 int win, hits;
348
349 ra_val = GET_SWAP_RA_VAL(vma);
350 win = SWAP_RA_WIN(ra_val);
351 hits = SWAP_RA_HITS(ra_val);
ec560175
YH
352 if (readahead)
353 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
354 atomic_long_set(&vma->swap_readahead_info,
355 SWAP_RA_VAL(addr, win, hits));
356 }
eaf649eb 357
ec560175 358 if (readahead) {
cbc65df2 359 count_vm_event(SWAP_RA_HIT);
eaf649eb 360 if (!vma || !vma_ra)
ec560175 361 atomic_inc(&swapin_readahead_hits);
cbc65df2 362 }
579f8290 363 }
eaf649eb 364
c9edc242
MWO
365 return folio;
366}
367
61ef1865 368/**
524984ff 369 * filemap_get_incore_folio - Find and get a folio from the page or swap caches.
61ef1865
MWO
370 * @mapping: The address_space to search.
371 * @index: The page cache index.
372 *
524984ff
MWO
373 * This differs from filemap_get_folio() in that it will also look for the
374 * folio in the swap cache.
61ef1865 375 *
524984ff 376 * Return: The found folio or %NULL.
61ef1865 377 */
524984ff
MWO
378struct folio *filemap_get_incore_folio(struct address_space *mapping,
379 pgoff_t index)
61ef1865
MWO
380{
381 swp_entry_t swp;
382 struct swap_info_struct *si;
dd8095b1 383 struct folio *folio = __filemap_get_folio(mapping, index, FGP_ENTRY, 0);
61ef1865 384
dd8095b1
MWO
385 if (!xa_is_value(folio))
386 goto out;
61ef1865
MWO
387 if (!shmem_mapping(mapping))
388 return NULL;
389
dd8095b1 390 swp = radix_to_swp_entry(folio);
ba6851b4
ML
391 /* There might be swapin error entries in shmem mapping. */
392 if (non_swap_entry(swp))
393 return NULL;
61ef1865
MWO
394 /* Prevent swapoff from happening to us */
395 si = get_swap_device(swp);
396 if (!si)
397 return NULL;
dd8095b1
MWO
398 index = swp_offset(swp);
399 folio = filemap_get_folio(swap_address_space(swp), index);
61ef1865 400 put_swap_device(si);
dd8095b1 401out:
524984ff 402 return folio;
61ef1865
MWO
403}
404
5b999aad
DS
405struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
406 struct vm_area_struct *vma, unsigned long addr,
407 bool *new_page_allocated)
1da177e4 408{
eb085574 409 struct swap_info_struct *si;
a0d3374b 410 struct folio *folio;
aae466b0 411 void *shadow = NULL;
4c6355b2 412
5b999aad 413 *new_page_allocated = false;
1da177e4 414
4c6355b2
JW
415 for (;;) {
416 int err;
1da177e4
LT
417 /*
418 * First check the swap cache. Since this is normally
cb691e2f 419 * called after swap_cache_get_folio() failed, re-calling
1da177e4
LT
420 * that would confuse statistics.
421 */
eb085574
YH
422 si = get_swap_device(entry);
423 if (!si)
4c6355b2 424 return NULL;
a0d3374b
MWO
425 folio = filemap_get_folio(swap_address_space(entry),
426 swp_offset(entry));
eb085574 427 put_swap_device(si);
a0d3374b
MWO
428 if (folio)
429 return folio_file_page(folio, swp_offset(entry));
1da177e4 430
ba81f838
YH
431 /*
432 * Just skip read ahead for unused swap slot.
433 * During swap_off when swap_slot_cache is disabled,
434 * we have to handle the race between putting
435 * swap entry in swap cache and marking swap slot
436 * as SWAP_HAS_CACHE. That's done in later part of code or
437 * else swap_off will be aborted if we return NULL.
438 */
439 if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
4c6355b2 440 return NULL;
e8c26ab6 441
1da177e4 442 /*
4c6355b2
JW
443 * Get a new page to read into from swap. Allocate it now,
444 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
445 * cause any racers to loop around until we add it to cache.
1da177e4 446 */
a0d3374b
MWO
447 folio = vma_alloc_folio(gfp_mask, 0, vma, addr, false);
448 if (!folio)
4c6355b2 449 return NULL;
1da177e4 450
f000944d
HD
451 /*
452 * Swap entry may have been freed since our caller observed it.
453 */
355cfa73 454 err = swapcache_prepare(entry);
4c6355b2 455 if (!err)
f000944d
HD
456 break;
457
a0d3374b 458 folio_put(folio);
4c6355b2
JW
459 if (err != -EEXIST)
460 return NULL;
461
2ca4532a 462 /*
4c6355b2
JW
463 * We might race against __delete_from_swap_cache(), and
464 * stumble across a swap_map entry whose SWAP_HAS_CACHE
465 * has not yet been cleared. Or race against another
466 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
467 * in swap_map, but not yet added its page to swap cache.
2ca4532a 468 */
029c4628 469 schedule_timeout_uninterruptible(1);
4c6355b2
JW
470 }
471
472 /*
473 * The swap entry is ours to swap in. Prepare the new page.
474 */
475
a0d3374b
MWO
476 __folio_set_locked(folio);
477 __folio_set_swapbacked(folio);
4c6355b2 478
65995918 479 if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
4c6355b2 480 goto fail_unlock;
4c6355b2 481
0add0c77 482 /* May fail (-ENOMEM) if XArray node allocation failed. */
a4c366f0 483 if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
4c6355b2 484 goto fail_unlock;
0add0c77
SB
485
486 mem_cgroup_swapin_uncharge_swap(entry);
4c6355b2 487
aae466b0 488 if (shadow)
a0d3374b 489 workingset_refault(folio, shadow);
314b57fb 490
a0d3374b
MWO
491 /* Caller will initiate read into locked folio */
492 folio_add_lru(folio);
4c6355b2 493 *new_page_allocated = true;
a0d3374b 494 return &folio->page;
1da177e4 495
4c6355b2 496fail_unlock:
4081f744 497 put_swap_folio(folio, entry);
a0d3374b
MWO
498 folio_unlock(folio);
499 folio_put(folio);
4c6355b2 500 return NULL;
1da177e4 501}
46017e95 502
5b999aad
DS
503/*
504 * Locate a page of swap in physical memory, reserving swap cache space
505 * and reading the disk if it is not already cached.
506 * A failure return means that either the page allocation failed or that
507 * the swap entry is no longer in use.
508 */
509struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
5169b844
N
510 struct vm_area_struct *vma,
511 unsigned long addr, bool do_poll,
512 struct swap_iocb **plug)
5b999aad
DS
513{
514 bool page_was_allocated;
515 struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
516 vma, addr, &page_was_allocated);
517
518 if (page_was_allocated)
5169b844 519 swap_readpage(retpage, do_poll, plug);
5b999aad
DS
520
521 return retpage;
522}
523
ec560175
YH
524static unsigned int __swapin_nr_pages(unsigned long prev_offset,
525 unsigned long offset,
526 int hits,
527 int max_pages,
528 int prev_win)
579f8290 529{
ec560175 530 unsigned int pages, last_ra;
579f8290
SL
531
532 /*
533 * This heuristic has been found to work well on both sequential and
534 * random loads, swapping to hard disk or to SSD: please don't ask
535 * what the "+ 2" means, it just happens to work well, that's all.
536 */
ec560175 537 pages = hits + 2;
579f8290
SL
538 if (pages == 2) {
539 /*
540 * We can have no readahead hits to judge by: but must not get
541 * stuck here forever, so check for an adjacent offset instead
542 * (and don't even bother to check whether swap type is same).
543 */
544 if (offset != prev_offset + 1 && offset != prev_offset - 1)
545 pages = 1;
579f8290
SL
546 } else {
547 unsigned int roundup = 4;
548 while (roundup < pages)
549 roundup <<= 1;
550 pages = roundup;
551 }
552
553 if (pages > max_pages)
554 pages = max_pages;
555
556 /* Don't shrink readahead too fast */
ec560175 557 last_ra = prev_win / 2;
579f8290
SL
558 if (pages < last_ra)
559 pages = last_ra;
ec560175
YH
560
561 return pages;
562}
563
564static unsigned long swapin_nr_pages(unsigned long offset)
565{
566 static unsigned long prev_offset;
567 unsigned int hits, pages, max_pages;
568 static atomic_t last_readahead_pages;
569
570 max_pages = 1 << READ_ONCE(page_cluster);
571 if (max_pages <= 1)
572 return 1;
573
574 hits = atomic_xchg(&swapin_readahead_hits, 0);
d6c1f098
QC
575 pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
576 max_pages,
ec560175
YH
577 atomic_read(&last_readahead_pages));
578 if (!hits)
d6c1f098 579 WRITE_ONCE(prev_offset, offset);
579f8290
SL
580 atomic_set(&last_readahead_pages, pages);
581
582 return pages;
583}
584
46017e95 585/**
e9e9b7ec 586 * swap_cluster_readahead - swap in pages in hope we need them soon
46017e95 587 * @entry: swap entry of this memory
7682486b 588 * @gfp_mask: memory allocation flags
e9e9b7ec 589 * @vmf: fault information
46017e95
HD
590 *
591 * Returns the struct page for entry and addr, after queueing swapin.
592 *
593 * Primitive swap readahead code. We simply read an aligned block of
594 * (1 << page_cluster) entries in the swap area. This method is chosen
595 * because it doesn't cost us any seek time. We also make sure to queue
596 * the 'original' request together with the readahead ones...
597 *
598 * This has been extended to use the NUMA policies from the mm triggering
599 * the readahead.
600 *
c1e8d7c6 601 * Caller must hold read mmap_lock if vmf->vma is not NULL.
46017e95 602 */
e9e9b7ec
MK
603struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
604 struct vm_fault *vmf)
46017e95 605{
46017e95 606 struct page *page;
579f8290
SL
607 unsigned long entry_offset = swp_offset(entry);
608 unsigned long offset = entry_offset;
67f96aa2 609 unsigned long start_offset, end_offset;
579f8290 610 unsigned long mask;
e9a6effa 611 struct swap_info_struct *si = swp_swap_info(entry);
3fb5c298 612 struct blk_plug plug;
5169b844 613 struct swap_iocb *splug = NULL;
c4fa6309 614 bool do_poll = true, page_allocated;
e9e9b7ec
MK
615 struct vm_area_struct *vma = vmf->vma;
616 unsigned long addr = vmf->address;
46017e95 617
579f8290
SL
618 mask = swapin_nr_pages(offset) - 1;
619 if (!mask)
620 goto skip;
621
23955622 622 do_poll = false;
67f96aa2
RR
623 /* Read a page_cluster sized and aligned cluster around offset. */
624 start_offset = offset & ~mask;
625 end_offset = offset | mask;
626 if (!start_offset) /* First page is swap header. */
627 start_offset++;
e9a6effa
HY
628 if (end_offset >= si->max)
629 end_offset = si->max - 1;
67f96aa2 630
3fb5c298 631 blk_start_plug(&plug);
67f96aa2 632 for (offset = start_offset; offset <= end_offset ; offset++) {
46017e95 633 /* Ok, do the async read-ahead now */
c4fa6309
YH
634 page = __read_swap_cache_async(
635 swp_entry(swp_type(entry), offset),
636 gfp_mask, vma, addr, &page_allocated);
46017e95 637 if (!page)
67f96aa2 638 continue;
c4fa6309 639 if (page_allocated) {
5169b844 640 swap_readpage(page, false, &splug);
eaf649eb 641 if (offset != entry_offset) {
c4fa6309
YH
642 SetPageReadahead(page);
643 count_vm_event(SWAP_RA);
644 }
cbc65df2 645 }
09cbfeaf 646 put_page(page);
46017e95 647 }
3fb5c298 648 blk_finish_plug(&plug);
5169b844 649 swap_read_unplug(splug);
3fb5c298 650
46017e95 651 lru_add_drain(); /* Push any new pages onto the LRU now */
579f8290 652skip:
5169b844
N
653 /* The page was likely read above, so no need for plugging here */
654 return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll, NULL);
46017e95 655}
4b3ef9da
YH
656
657int init_swap_address_space(unsigned int type, unsigned long nr_pages)
658{
659 struct address_space *spaces, *space;
660 unsigned int i, nr;
661
662 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
778e1cdd 663 spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
4b3ef9da
YH
664 if (!spaces)
665 return -ENOMEM;
666 for (i = 0; i < nr; i++) {
667 space = spaces + i;
a2833486 668 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
4b3ef9da
YH
669 atomic_set(&space->i_mmap_writable, 0);
670 space->a_ops = &swap_aops;
671 /* swap cache doesn't use writeback related tags */
672 mapping_set_no_writeback_tags(space);
4b3ef9da
YH
673 }
674 nr_swapper_spaces[type] = nr;
054f1d1f 675 swapper_spaces[type] = spaces;
4b3ef9da
YH
676
677 return 0;
678}
679
680void exit_swap_address_space(unsigned int type)
681{
eea4a501
YH
682 int i;
683 struct address_space *spaces = swapper_spaces[type];
684
685 for (i = 0; i < nr_swapper_spaces[type]; i++)
686 VM_WARN_ON_ONCE(!mapping_empty(&spaces[i]));
687 kvfree(spaces);
4b3ef9da 688 nr_swapper_spaces[type] = 0;
054f1d1f 689 swapper_spaces[type] = NULL;
4b3ef9da 690}
ec560175 691
eaf649eb 692static void swap_ra_info(struct vm_fault *vmf,
16ba391e 693 struct vma_swap_readahead *ra_info)
ec560175
YH
694{
695 struct vm_area_struct *vma = vmf->vma;
eaf649eb 696 unsigned long ra_val;
16ba391e 697 unsigned long faddr, pfn, fpfn, lpfn, rpfn;
ec560175 698 unsigned long start, end;
eaf649eb 699 pte_t *pte, *orig_pte;
16ba391e 700 unsigned int max_win, hits, prev_win, win;
ec560175
YH
701#ifndef CONFIG_64BIT
702 pte_t *tpte;
703#endif
704
61b63972
YH
705 max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
706 SWAP_RA_ORDER_CEILING);
707 if (max_win == 1) {
eaf649eb
MK
708 ra_info->win = 1;
709 return;
61b63972
YH
710 }
711
ec560175 712 faddr = vmf->address;
ec560175 713 fpfn = PFN_DOWN(faddr);
eaf649eb
MK
714 ra_val = GET_SWAP_RA_VAL(vma);
715 pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
716 prev_win = SWAP_RA_WIN(ra_val);
717 hits = SWAP_RA_HITS(ra_val);
718 ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
ec560175
YH
719 max_win, prev_win);
720 atomic_long_set(&vma->swap_readahead_info,
721 SWAP_RA_VAL(faddr, win, 0));
722
18ad72f5 723 if (win == 1)
eaf649eb 724 return;
ec560175
YH
725
726 /* Copy the PTEs because the page table may be unmapped */
18ad72f5 727 orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
16ba391e
KS
728 if (fpfn == pfn + 1) {
729 lpfn = fpfn;
730 rpfn = fpfn + win;
731 } else if (pfn == fpfn + 1) {
732 lpfn = fpfn - win + 1;
733 rpfn = fpfn + 1;
734 } else {
735 unsigned int left = (win - 1) / 2;
736
737 lpfn = fpfn - left;
738 rpfn = fpfn + win - left;
ec560175 739 }
16ba391e
KS
740 start = max3(lpfn, PFN_DOWN(vma->vm_start),
741 PFN_DOWN(faddr & PMD_MASK));
742 end = min3(rpfn, PFN_DOWN(vma->vm_end),
743 PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
744
eaf649eb
MK
745 ra_info->nr_pte = end - start;
746 ra_info->offset = fpfn - start;
747 pte -= ra_info->offset;
ec560175 748#ifdef CONFIG_64BIT
eaf649eb 749 ra_info->ptes = pte;
ec560175 750#else
eaf649eb 751 tpte = ra_info->ptes;
ec560175
YH
752 for (pfn = start; pfn != end; pfn++)
753 *tpte++ = *pte++;
754#endif
eaf649eb 755 pte_unmap(orig_pte);
ec560175
YH
756}
757
e9f59873
YS
758/**
759 * swap_vma_readahead - swap in pages in hope we need them soon
27ec4878 760 * @fentry: swap entry of this memory
e9f59873
YS
761 * @gfp_mask: memory allocation flags
762 * @vmf: fault information
763 *
764 * Returns the struct page for entry and addr, after queueing swapin.
765 *
cb152a1a 766 * Primitive swap readahead code. We simply read in a few pages whose
e9f59873
YS
767 * virtual addresses are around the fault address in the same vma.
768 *
c1e8d7c6 769 * Caller must hold read mmap_lock if vmf->vma is not NULL.
e9f59873
YS
770 *
771 */
f5c754d6
CIK
772static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
773 struct vm_fault *vmf)
ec560175
YH
774{
775 struct blk_plug plug;
5169b844 776 struct swap_iocb *splug = NULL;
ec560175
YH
777 struct vm_area_struct *vma = vmf->vma;
778 struct page *page;
779 pte_t *pte, pentry;
780 swp_entry_t entry;
781 unsigned int i;
782 bool page_allocated;
e97af699
ML
783 struct vma_swap_readahead ra_info = {
784 .win = 1,
785 };
ec560175 786
eaf649eb
MK
787 swap_ra_info(vmf, &ra_info);
788 if (ra_info.win == 1)
ec560175
YH
789 goto skip;
790
791 blk_start_plug(&plug);
eaf649eb 792 for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
ec560175
YH
793 i++, pte++) {
794 pentry = *pte;
92bafb20 795 if (!is_swap_pte(pentry))
ec560175
YH
796 continue;
797 entry = pte_to_swp_entry(pentry);
798 if (unlikely(non_swap_entry(entry)))
799 continue;
800 page = __read_swap_cache_async(entry, gfp_mask, vma,
801 vmf->address, &page_allocated);
802 if (!page)
803 continue;
804 if (page_allocated) {
5169b844 805 swap_readpage(page, false, &splug);
eaf649eb 806 if (i != ra_info.offset) {
ec560175
YH
807 SetPageReadahead(page);
808 count_vm_event(SWAP_RA);
809 }
810 }
811 put_page(page);
812 }
813 blk_finish_plug(&plug);
5169b844 814 swap_read_unplug(splug);
ec560175
YH
815 lru_add_drain();
816skip:
5169b844 817 /* The page was likely read above, so no need for plugging here */
ec560175 818 return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
5169b844 819 ra_info.win == 1, NULL);
ec560175 820}
d9bfcfdc 821
e9e9b7ec
MK
822/**
823 * swapin_readahead - swap in pages in hope we need them soon
824 * @entry: swap entry of this memory
825 * @gfp_mask: memory allocation flags
826 * @vmf: fault information
827 *
828 * Returns the struct page for entry and addr, after queueing swapin.
829 *
830 * It's a main entry function for swap readahead. By the configuration,
831 * it will read ahead blocks by cluster-based(ie, physical disk based)
832 * or vma-based(ie, virtual address based on faulty address) readahead.
833 */
834struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
835 struct vm_fault *vmf)
836{
837 return swap_use_vma_readahead() ?
838 swap_vma_readahead(entry, gfp_mask, vmf) :
839 swap_cluster_readahead(entry, gfp_mask, vmf);
840}
841
d9bfcfdc
YH
842#ifdef CONFIG_SYSFS
843static ssize_t vma_ra_enabled_show(struct kobject *kobj,
844 struct kobj_attribute *attr, char *buf)
845{
ae7a927d
JP
846 return sysfs_emit(buf, "%s\n",
847 enable_vma_readahead ? "true" : "false");
d9bfcfdc
YH
848}
849static ssize_t vma_ra_enabled_store(struct kobject *kobj,
850 struct kobj_attribute *attr,
851 const char *buf, size_t count)
852{
717aeab4
JG
853 ssize_t ret;
854
855 ret = kstrtobool(buf, &enable_vma_readahead);
856 if (ret)
857 return ret;
d9bfcfdc
YH
858
859 return count;
860}
6106b93e 861static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled);
d9bfcfdc 862
d9bfcfdc
YH
863static struct attribute *swap_attrs[] = {
864 &vma_ra_enabled_attr.attr,
d9bfcfdc
YH
865 NULL,
866};
867
e48333b6 868static const struct attribute_group swap_attr_group = {
d9bfcfdc
YH
869 .attrs = swap_attrs,
870};
871
872static int __init swap_init_sysfs(void)
873{
874 int err;
875 struct kobject *swap_kobj;
876
877 swap_kobj = kobject_create_and_add("swap", mm_kobj);
878 if (!swap_kobj) {
879 pr_err("failed to create swap kobject\n");
880 return -ENOMEM;
881 }
882 err = sysfs_create_group(swap_kobj, &swap_attr_group);
883 if (err) {
884 pr_err("failed to register swap group\n");
885 goto delete_obj;
886 }
887 return 0;
888
889delete_obj:
890 kobject_put(swap_kobj);
891 return err;
892}
893subsys_initcall(swap_init_sysfs);
894#endif
This page took 1.472201 seconds and 4 git commands to generate.