]> Git Repo - linux.git/blob - mm/page_io.c
xfs: AIL doesn't need manual pushing
[linux.git] / mm / page_io.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/mm/page_io.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *
7  *  Swap reorganised 29.12.95, 
8  *  Asynchronous swapping added 30.12.95. Stephen Tweedie
9  *  Removed race in async swapping. 14.4.1996. Bruno Haible
10  *  Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
11  *  Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
12  */
13
14 #include <linux/mm.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/gfp.h>
17 #include <linux/pagemap.h>
18 #include <linux/swap.h>
19 #include <linux/bio.h>
20 #include <linux/swapops.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/psi.h>
24 #include <linux/uio.h>
25 #include <linux/sched/task.h>
26 #include <linux/delayacct.h>
27 #include <linux/zswap.h>
28 #include "swap.h"
29
30 static void __end_swap_bio_write(struct bio *bio)
31 {
32         struct folio *folio = bio_first_folio_all(bio);
33
34         if (bio->bi_status) {
35                 /*
36                  * We failed to write the page out to swap-space.
37                  * Re-dirty the page in order to avoid it being reclaimed.
38                  * Also print a dire warning that things will go BAD (tm)
39                  * very quickly.
40                  *
41                  * Also clear PG_reclaim to avoid folio_rotate_reclaimable()
42                  */
43                 folio_mark_dirty(folio);
44                 pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
45                                      MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
46                                      (unsigned long long)bio->bi_iter.bi_sector);
47                 folio_clear_reclaim(folio);
48         }
49         folio_end_writeback(folio);
50 }
51
52 static void end_swap_bio_write(struct bio *bio)
53 {
54         __end_swap_bio_write(bio);
55         bio_put(bio);
56 }
57
58 static void __end_swap_bio_read(struct bio *bio)
59 {
60         struct folio *folio = bio_first_folio_all(bio);
61
62         if (bio->bi_status) {
63                 pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n",
64                                      MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
65                                      (unsigned long long)bio->bi_iter.bi_sector);
66         } else {
67                 folio_mark_uptodate(folio);
68         }
69         folio_unlock(folio);
70 }
71
72 static void end_swap_bio_read(struct bio *bio)
73 {
74         __end_swap_bio_read(bio);
75         bio_put(bio);
76 }
77
78 int generic_swapfile_activate(struct swap_info_struct *sis,
79                                 struct file *swap_file,
80                                 sector_t *span)
81 {
82         struct address_space *mapping = swap_file->f_mapping;
83         struct inode *inode = mapping->host;
84         unsigned blocks_per_page;
85         unsigned long page_no;
86         unsigned blkbits;
87         sector_t probe_block;
88         sector_t last_block;
89         sector_t lowest_block = -1;
90         sector_t highest_block = 0;
91         int nr_extents = 0;
92         int ret;
93
94         blkbits = inode->i_blkbits;
95         blocks_per_page = PAGE_SIZE >> blkbits;
96
97         /*
98          * Map all the blocks into the extent tree.  This code doesn't try
99          * to be very smart.
100          */
101         probe_block = 0;
102         page_no = 0;
103         last_block = i_size_read(inode) >> blkbits;
104         while ((probe_block + blocks_per_page) <= last_block &&
105                         page_no < sis->max) {
106                 unsigned block_in_page;
107                 sector_t first_block;
108
109                 cond_resched();
110
111                 first_block = probe_block;
112                 ret = bmap(inode, &first_block);
113                 if (ret || !first_block)
114                         goto bad_bmap;
115
116                 /*
117                  * It must be PAGE_SIZE aligned on-disk
118                  */
119                 if (first_block & (blocks_per_page - 1)) {
120                         probe_block++;
121                         goto reprobe;
122                 }
123
124                 for (block_in_page = 1; block_in_page < blocks_per_page;
125                                         block_in_page++) {
126                         sector_t block;
127
128                         block = probe_block + block_in_page;
129                         ret = bmap(inode, &block);
130                         if (ret || !block)
131                                 goto bad_bmap;
132
133                         if (block != first_block + block_in_page) {
134                                 /* Discontiguity */
135                                 probe_block++;
136                                 goto reprobe;
137                         }
138                 }
139
140                 first_block >>= (PAGE_SHIFT - blkbits);
141                 if (page_no) {  /* exclude the header page */
142                         if (first_block < lowest_block)
143                                 lowest_block = first_block;
144                         if (first_block > highest_block)
145                                 highest_block = first_block;
146                 }
147
148                 /*
149                  * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
150                  */
151                 ret = add_swap_extent(sis, page_no, 1, first_block);
152                 if (ret < 0)
153                         goto out;
154                 nr_extents += ret;
155                 page_no++;
156                 probe_block += blocks_per_page;
157 reprobe:
158                 continue;
159         }
160         ret = nr_extents;
161         *span = 1 + highest_block - lowest_block;
162         if (page_no == 0)
163                 page_no = 1;    /* force Empty message */
164         sis->max = page_no;
165         sis->pages = page_no - 1;
166         sis->highest_bit = page_no - 1;
167 out:
168         return ret;
169 bad_bmap:
170         pr_err("swapon: swapfile has holes\n");
171         ret = -EINVAL;
172         goto out;
173 }
174
175 /*
176  * We may have stale swap cache pages in memory: notice
177  * them here and get rid of the unnecessary final write.
178  */
179 int swap_writepage(struct page *page, struct writeback_control *wbc)
180 {
181         struct folio *folio = page_folio(page);
182         int ret;
183
184         if (folio_free_swap(folio)) {
185                 folio_unlock(folio);
186                 return 0;
187         }
188         /*
189          * Arch code may have to preserve more data than just the page
190          * contents, e.g. memory tags.
191          */
192         ret = arch_prepare_to_swap(folio);
193         if (ret) {
194                 folio_mark_dirty(folio);
195                 folio_unlock(folio);
196                 return ret;
197         }
198         if (zswap_store(folio)) {
199                 folio_start_writeback(folio);
200                 folio_unlock(folio);
201                 folio_end_writeback(folio);
202                 return 0;
203         }
204         if (!mem_cgroup_zswap_writeback_enabled(folio_memcg(folio))) {
205                 folio_mark_dirty(folio);
206                 return AOP_WRITEPAGE_ACTIVATE;
207         }
208
209         __swap_writepage(folio, wbc);
210         return 0;
211 }
212
213 static inline void count_swpout_vm_event(struct folio *folio)
214 {
215 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
216         if (unlikely(folio_test_pmd_mappable(folio))) {
217                 count_memcg_folio_events(folio, THP_SWPOUT, 1);
218                 count_vm_event(THP_SWPOUT);
219         }
220         count_mthp_stat(folio_order(folio), MTHP_STAT_SWPOUT);
221 #endif
222         count_vm_events(PSWPOUT, folio_nr_pages(folio));
223 }
224
225 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
226 static void bio_associate_blkg_from_page(struct bio *bio, struct folio *folio)
227 {
228         struct cgroup_subsys_state *css;
229         struct mem_cgroup *memcg;
230
231         memcg = folio_memcg(folio);
232         if (!memcg)
233                 return;
234
235         rcu_read_lock();
236         css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys);
237         bio_associate_blkg_from_css(bio, css);
238         rcu_read_unlock();
239 }
240 #else
241 #define bio_associate_blkg_from_page(bio, folio)                do { } while (0)
242 #endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */
243
244 struct swap_iocb {
245         struct kiocb            iocb;
246         struct bio_vec          bvec[SWAP_CLUSTER_MAX];
247         int                     pages;
248         int                     len;
249 };
250 static mempool_t *sio_pool;
251
252 int sio_pool_init(void)
253 {
254         if (!sio_pool) {
255                 mempool_t *pool = mempool_create_kmalloc_pool(
256                         SWAP_CLUSTER_MAX, sizeof(struct swap_iocb));
257                 if (cmpxchg(&sio_pool, NULL, pool))
258                         mempool_destroy(pool);
259         }
260         if (!sio_pool)
261                 return -ENOMEM;
262         return 0;
263 }
264
265 static void sio_write_complete(struct kiocb *iocb, long ret)
266 {
267         struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
268         struct page *page = sio->bvec[0].bv_page;
269         int p;
270
271         if (ret != sio->len) {
272                 /*
273                  * In the case of swap-over-nfs, this can be a
274                  * temporary failure if the system has limited
275                  * memory for allocating transmit buffers.
276                  * Mark the page dirty and avoid
277                  * folio_rotate_reclaimable but rate-limit the
278                  * messages but do not flag PageError like
279                  * the normal direct-to-bio case as it could
280                  * be temporary.
281                  */
282                 pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n",
283                                    ret, page_file_offset(page));
284                 for (p = 0; p < sio->pages; p++) {
285                         page = sio->bvec[p].bv_page;
286                         set_page_dirty(page);
287                         ClearPageReclaim(page);
288                 }
289         }
290
291         for (p = 0; p < sio->pages; p++)
292                 end_page_writeback(sio->bvec[p].bv_page);
293
294         mempool_free(sio, sio_pool);
295 }
296
297 static void swap_writepage_fs(struct folio *folio, struct writeback_control *wbc)
298 {
299         struct swap_iocb *sio = NULL;
300         struct swap_info_struct *sis = swp_swap_info(folio->swap);
301         struct file *swap_file = sis->swap_file;
302         loff_t pos = folio_file_pos(folio);
303
304         count_swpout_vm_event(folio);
305         folio_start_writeback(folio);
306         folio_unlock(folio);
307         if (wbc->swap_plug)
308                 sio = *wbc->swap_plug;
309         if (sio) {
310                 if (sio->iocb.ki_filp != swap_file ||
311                     sio->iocb.ki_pos + sio->len != pos) {
312                         swap_write_unplug(sio);
313                         sio = NULL;
314                 }
315         }
316         if (!sio) {
317                 sio = mempool_alloc(sio_pool, GFP_NOIO);
318                 init_sync_kiocb(&sio->iocb, swap_file);
319                 sio->iocb.ki_complete = sio_write_complete;
320                 sio->iocb.ki_pos = pos;
321                 sio->pages = 0;
322                 sio->len = 0;
323         }
324         bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
325         sio->len += folio_size(folio);
326         sio->pages += 1;
327         if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) {
328                 swap_write_unplug(sio);
329                 sio = NULL;
330         }
331         if (wbc->swap_plug)
332                 *wbc->swap_plug = sio;
333 }
334
335 static void swap_writepage_bdev_sync(struct folio *folio,
336                 struct writeback_control *wbc, struct swap_info_struct *sis)
337 {
338         struct bio_vec bv;
339         struct bio bio;
340
341         bio_init(&bio, sis->bdev, &bv, 1,
342                  REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc));
343         bio.bi_iter.bi_sector = swap_folio_sector(folio);
344         bio_add_folio_nofail(&bio, folio, folio_size(folio), 0);
345
346         bio_associate_blkg_from_page(&bio, folio);
347         count_swpout_vm_event(folio);
348
349         folio_start_writeback(folio);
350         folio_unlock(folio);
351
352         submit_bio_wait(&bio);
353         __end_swap_bio_write(&bio);
354 }
355
356 static void swap_writepage_bdev_async(struct folio *folio,
357                 struct writeback_control *wbc, struct swap_info_struct *sis)
358 {
359         struct bio *bio;
360
361         bio = bio_alloc(sis->bdev, 1,
362                         REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc),
363                         GFP_NOIO);
364         bio->bi_iter.bi_sector = swap_folio_sector(folio);
365         bio->bi_end_io = end_swap_bio_write;
366         bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
367
368         bio_associate_blkg_from_page(bio, folio);
369         count_swpout_vm_event(folio);
370         folio_start_writeback(folio);
371         folio_unlock(folio);
372         submit_bio(bio);
373 }
374
375 void __swap_writepage(struct folio *folio, struct writeback_control *wbc)
376 {
377         struct swap_info_struct *sis = swp_swap_info(folio->swap);
378
379         VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
380         /*
381          * ->flags can be updated non-atomicially (scan_swap_map_slots),
382          * but that will never affect SWP_FS_OPS, so the data_race
383          * is safe.
384          */
385         if (data_race(sis->flags & SWP_FS_OPS))
386                 swap_writepage_fs(folio, wbc);
387         else if (sis->flags & SWP_SYNCHRONOUS_IO)
388                 swap_writepage_bdev_sync(folio, wbc, sis);
389         else
390                 swap_writepage_bdev_async(folio, wbc, sis);
391 }
392
393 void swap_write_unplug(struct swap_iocb *sio)
394 {
395         struct iov_iter from;
396         struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
397         int ret;
398
399         iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len);
400         ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
401         if (ret != -EIOCBQUEUED)
402                 sio_write_complete(&sio->iocb, ret);
403 }
404
405 static void sio_read_complete(struct kiocb *iocb, long ret)
406 {
407         struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
408         int p;
409
410         if (ret == sio->len) {
411                 for (p = 0; p < sio->pages; p++) {
412                         struct folio *folio = page_folio(sio->bvec[p].bv_page);
413
414                         folio_mark_uptodate(folio);
415                         folio_unlock(folio);
416                 }
417                 count_vm_events(PSWPIN, sio->pages);
418         } else {
419                 for (p = 0; p < sio->pages; p++) {
420                         struct folio *folio = page_folio(sio->bvec[p].bv_page);
421
422                         folio_unlock(folio);
423                 }
424                 pr_alert_ratelimited("Read-error on swap-device\n");
425         }
426         mempool_free(sio, sio_pool);
427 }
428
429 static void swap_read_folio_fs(struct folio *folio, struct swap_iocb **plug)
430 {
431         struct swap_info_struct *sis = swp_swap_info(folio->swap);
432         struct swap_iocb *sio = NULL;
433         loff_t pos = folio_file_pos(folio);
434
435         if (plug)
436                 sio = *plug;
437         if (sio) {
438                 if (sio->iocb.ki_filp != sis->swap_file ||
439                     sio->iocb.ki_pos + sio->len != pos) {
440                         swap_read_unplug(sio);
441                         sio = NULL;
442                 }
443         }
444         if (!sio) {
445                 sio = mempool_alloc(sio_pool, GFP_KERNEL);
446                 init_sync_kiocb(&sio->iocb, sis->swap_file);
447                 sio->iocb.ki_pos = pos;
448                 sio->iocb.ki_complete = sio_read_complete;
449                 sio->pages = 0;
450                 sio->len = 0;
451         }
452         bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
453         sio->len += folio_size(folio);
454         sio->pages += 1;
455         if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) {
456                 swap_read_unplug(sio);
457                 sio = NULL;
458         }
459         if (plug)
460                 *plug = sio;
461 }
462
463 static void swap_read_folio_bdev_sync(struct folio *folio,
464                 struct swap_info_struct *sis)
465 {
466         struct bio_vec bv;
467         struct bio bio;
468
469         bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ);
470         bio.bi_iter.bi_sector = swap_folio_sector(folio);
471         bio_add_folio_nofail(&bio, folio, folio_size(folio), 0);
472         /*
473          * Keep this task valid during swap readpage because the oom killer may
474          * attempt to access it in the page fault retry time check.
475          */
476         get_task_struct(current);
477         count_vm_event(PSWPIN);
478         submit_bio_wait(&bio);
479         __end_swap_bio_read(&bio);
480         put_task_struct(current);
481 }
482
483 static void swap_read_folio_bdev_async(struct folio *folio,
484                 struct swap_info_struct *sis)
485 {
486         struct bio *bio;
487
488         bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL);
489         bio->bi_iter.bi_sector = swap_folio_sector(folio);
490         bio->bi_end_io = end_swap_bio_read;
491         bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
492         count_vm_event(PSWPIN);
493         submit_bio(bio);
494 }
495
496 void swap_read_folio(struct folio *folio, bool synchronous,
497                 struct swap_iocb **plug)
498 {
499         struct swap_info_struct *sis = swp_swap_info(folio->swap);
500         bool workingset = folio_test_workingset(folio);
501         unsigned long pflags;
502         bool in_thrashing;
503
504         VM_BUG_ON_FOLIO(!folio_test_swapcache(folio) && !synchronous, folio);
505         VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
506         VM_BUG_ON_FOLIO(folio_test_uptodate(folio), folio);
507
508         /*
509          * Count submission time as memory stall and delay. When the device
510          * is congested, or the submitting cgroup IO-throttled, submission
511          * can be a significant part of overall IO time.
512          */
513         if (workingset) {
514                 delayacct_thrashing_start(&in_thrashing);
515                 psi_memstall_enter(&pflags);
516         }
517         delayacct_swapin_start();
518
519         if (zswap_load(folio)) {
520                 folio_mark_uptodate(folio);
521                 folio_unlock(folio);
522         } else if (data_race(sis->flags & SWP_FS_OPS)) {
523                 swap_read_folio_fs(folio, plug);
524         } else if (synchronous || (sis->flags & SWP_SYNCHRONOUS_IO)) {
525                 swap_read_folio_bdev_sync(folio, sis);
526         } else {
527                 swap_read_folio_bdev_async(folio, sis);
528         }
529
530         if (workingset) {
531                 delayacct_thrashing_end(&in_thrashing);
532                 psi_memstall_leave(&pflags);
533         }
534         delayacct_swapin_end();
535 }
536
537 void __swap_read_unplug(struct swap_iocb *sio)
538 {
539         struct iov_iter from;
540         struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
541         int ret;
542
543         iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len);
544         ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
545         if (ret != -EIOCBQUEUED)
546                 sio_read_complete(&sio->iocb, ret);
547 }
This page took 0.063461 seconds and 4 git commands to generate.