]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * linux/mm/page_io.c | |
4 | * | |
5 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
6 | * | |
7 | * Swap reorganised 29.12.95, | |
8 | * Asynchronous swapping added 30.12.95. Stephen Tweedie | |
9 | * Removed race in async swapping. 14.4.1996. Bruno Haible | |
10 | * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie | |
11 | * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman | |
12 | */ | |
13 | ||
14 | #include <linux/mm.h> | |
15 | #include <linux/kernel_stat.h> | |
5a0e3ad6 | 16 | #include <linux/gfp.h> |
1da177e4 LT |
17 | #include <linux/pagemap.h> |
18 | #include <linux/swap.h> | |
19 | #include <linux/bio.h> | |
20 | #include <linux/swapops.h> | |
62c230bc | 21 | #include <linux/buffer_head.h> |
1da177e4 | 22 | #include <linux/writeback.h> |
38b5faf4 | 23 | #include <linux/frontswap.h> |
b430e9d1 | 24 | #include <linux/blkdev.h> |
93779069 | 25 | #include <linux/psi.h> |
e2e40f2c | 26 | #include <linux/uio.h> |
b0ba2d0f | 27 | #include <linux/sched/task.h> |
1da177e4 | 28 | |
f29ad6a9 | 29 | static struct bio *get_swap_bio(gfp_t gfp_flags, |
1da177e4 LT |
30 | struct page *page, bio_end_io_t end_io) |
31 | { | |
32 | struct bio *bio; | |
33 | ||
1a5f439c | 34 | bio = bio_alloc(gfp_flags, 1); |
1da177e4 | 35 | if (bio) { |
74d46992 CH |
36 | struct block_device *bdev; |
37 | ||
38 | bio->bi_iter.bi_sector = map_swap_page(page, &bdev); | |
39 | bio_set_dev(bio, bdev); | |
4f024f37 | 40 | bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9; |
1da177e4 | 41 | bio->bi_end_io = end_io; |
6cf66b4c | 42 | |
af3bbc12 | 43 | bio_add_page(bio, page, thp_size(page), 0); |
1da177e4 LT |
44 | } |
45 | return bio; | |
46 | } | |
47 | ||
4246a0b6 | 48 | void end_swap_bio_write(struct bio *bio) |
1da177e4 | 49 | { |
263663cd | 50 | struct page *page = bio_first_page_all(bio); |
1da177e4 | 51 | |
4e4cbee9 | 52 | if (bio->bi_status) { |
1da177e4 | 53 | SetPageError(page); |
6ddab3b9 PZ |
54 | /* |
55 | * We failed to write the page out to swap-space. | |
56 | * Re-dirty the page in order to avoid it being reclaimed. | |
57 | * Also print a dire warning that things will go BAD (tm) | |
58 | * very quickly. | |
59 | * | |
60 | * Also clear PG_reclaim to avoid rotate_reclaimable_page() | |
61 | */ | |
62 | set_page_dirty(page); | |
1170532b | 63 | pr_alert("Write-error on swap-device (%u:%u:%llu)\n", |
74d46992 | 64 | MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), |
1170532b | 65 | (unsigned long long)bio->bi_iter.bi_sector); |
6ddab3b9 PZ |
66 | ClearPageReclaim(page); |
67 | } | |
1da177e4 LT |
68 | end_page_writeback(page); |
69 | bio_put(bio); | |
1da177e4 LT |
70 | } |
71 | ||
3f2b1a04 MK |
72 | static void swap_slot_free_notify(struct page *page) |
73 | { | |
74 | struct swap_info_struct *sis; | |
75 | struct gendisk *disk; | |
5df373e9 | 76 | swp_entry_t entry; |
3f2b1a04 MK |
77 | |
78 | /* | |
79 | * There is no guarantee that the page is in swap cache - the software | |
80 | * suspend code (at least) uses end_swap_bio_read() against a non- | |
81 | * swapcache page. So we must check PG_swapcache before proceeding with | |
82 | * this optimization. | |
83 | */ | |
84 | if (unlikely(!PageSwapCache(page))) | |
85 | return; | |
86 | ||
87 | sis = page_swap_info(page); | |
7b37e226 | 88 | if (data_race(!(sis->flags & SWP_BLKDEV))) |
3f2b1a04 MK |
89 | return; |
90 | ||
91 | /* | |
92 | * The swap subsystem performs lazy swap slot freeing, | |
93 | * expecting that the page will be swapped out again. | |
94 | * So we can avoid an unnecessary write if the page | |
95 | * isn't redirtied. | |
96 | * This is good for real swap storage because we can | |
97 | * reduce unnecessary I/O and enhance wear-leveling | |
98 | * if an SSD is used as the as swap device. | |
99 | * But if in-memory swap device (eg zram) is used, | |
100 | * this causes a duplicated copy between uncompressed | |
101 | * data in VM-owned memory and compressed data in | |
102 | * zram-owned memory. So let's free zram-owned memory | |
103 | * and make the VM-owned decompressed page *dirty*, | |
104 | * so the page should be swapped out somewhere again if | |
105 | * we again wish to reclaim it. | |
106 | */ | |
107 | disk = sis->bdev->bd_disk; | |
5df373e9 VM |
108 | entry.val = page_private(page); |
109 | if (disk->fops->swap_slot_free_notify && __swap_count(entry) == 1) { | |
3f2b1a04 MK |
110 | unsigned long offset; |
111 | ||
3f2b1a04 MK |
112 | offset = swp_offset(entry); |
113 | ||
114 | SetPageDirty(page); | |
115 | disk->fops->swap_slot_free_notify(sis->bdev, | |
116 | offset); | |
117 | } | |
118 | } | |
119 | ||
4246a0b6 | 120 | static void end_swap_bio_read(struct bio *bio) |
1da177e4 | 121 | { |
263663cd | 122 | struct page *page = bio_first_page_all(bio); |
23955622 | 123 | struct task_struct *waiter = bio->bi_private; |
1da177e4 | 124 | |
4e4cbee9 | 125 | if (bio->bi_status) { |
1da177e4 LT |
126 | SetPageError(page); |
127 | ClearPageUptodate(page); | |
1170532b | 128 | pr_alert("Read-error on swap-device (%u:%u:%llu)\n", |
74d46992 | 129 | MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), |
1170532b | 130 | (unsigned long long)bio->bi_iter.bi_sector); |
b430e9d1 | 131 | goto out; |
1da177e4 | 132 | } |
b430e9d1 MK |
133 | |
134 | SetPageUptodate(page); | |
3f2b1a04 | 135 | swap_slot_free_notify(page); |
b430e9d1 | 136 | out: |
1da177e4 | 137 | unlock_page(page); |
23955622 | 138 | WRITE_ONCE(bio->bi_private, NULL); |
1da177e4 | 139 | bio_put(bio); |
87518530 ON |
140 | if (waiter) { |
141 | blk_wake_io_task(waiter); | |
142 | put_task_struct(waiter); | |
143 | } | |
1da177e4 LT |
144 | } |
145 | ||
a509bc1a MG |
146 | int generic_swapfile_activate(struct swap_info_struct *sis, |
147 | struct file *swap_file, | |
148 | sector_t *span) | |
149 | { | |
150 | struct address_space *mapping = swap_file->f_mapping; | |
151 | struct inode *inode = mapping->host; | |
152 | unsigned blocks_per_page; | |
153 | unsigned long page_no; | |
154 | unsigned blkbits; | |
155 | sector_t probe_block; | |
156 | sector_t last_block; | |
157 | sector_t lowest_block = -1; | |
158 | sector_t highest_block = 0; | |
159 | int nr_extents = 0; | |
160 | int ret; | |
161 | ||
162 | blkbits = inode->i_blkbits; | |
163 | blocks_per_page = PAGE_SIZE >> blkbits; | |
164 | ||
165 | /* | |
4efaceb1 | 166 | * Map all the blocks into the extent tree. This code doesn't try |
a509bc1a MG |
167 | * to be very smart. |
168 | */ | |
169 | probe_block = 0; | |
170 | page_no = 0; | |
171 | last_block = i_size_read(inode) >> blkbits; | |
172 | while ((probe_block + blocks_per_page) <= last_block && | |
173 | page_no < sis->max) { | |
174 | unsigned block_in_page; | |
175 | sector_t first_block; | |
176 | ||
7e4411bf MP |
177 | cond_resched(); |
178 | ||
30460e1e CM |
179 | first_block = probe_block; |
180 | ret = bmap(inode, &first_block); | |
181 | if (ret || !first_block) | |
a509bc1a MG |
182 | goto bad_bmap; |
183 | ||
184 | /* | |
185 | * It must be PAGE_SIZE aligned on-disk | |
186 | */ | |
187 | if (first_block & (blocks_per_page - 1)) { | |
188 | probe_block++; | |
189 | goto reprobe; | |
190 | } | |
191 | ||
192 | for (block_in_page = 1; block_in_page < blocks_per_page; | |
193 | block_in_page++) { | |
194 | sector_t block; | |
195 | ||
30460e1e CM |
196 | block = probe_block + block_in_page; |
197 | ret = bmap(inode, &block); | |
198 | if (ret || !block) | |
a509bc1a | 199 | goto bad_bmap; |
30460e1e | 200 | |
a509bc1a MG |
201 | if (block != first_block + block_in_page) { |
202 | /* Discontiguity */ | |
203 | probe_block++; | |
204 | goto reprobe; | |
205 | } | |
206 | } | |
207 | ||
208 | first_block >>= (PAGE_SHIFT - blkbits); | |
209 | if (page_no) { /* exclude the header page */ | |
210 | if (first_block < lowest_block) | |
211 | lowest_block = first_block; | |
212 | if (first_block > highest_block) | |
213 | highest_block = first_block; | |
214 | } | |
215 | ||
216 | /* | |
217 | * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks | |
218 | */ | |
219 | ret = add_swap_extent(sis, page_no, 1, first_block); | |
220 | if (ret < 0) | |
221 | goto out; | |
222 | nr_extents += ret; | |
223 | page_no++; | |
224 | probe_block += blocks_per_page; | |
225 | reprobe: | |
226 | continue; | |
227 | } | |
228 | ret = nr_extents; | |
229 | *span = 1 + highest_block - lowest_block; | |
230 | if (page_no == 0) | |
231 | page_no = 1; /* force Empty message */ | |
232 | sis->max = page_no; | |
233 | sis->pages = page_no - 1; | |
234 | sis->highest_bit = page_no - 1; | |
235 | out: | |
236 | return ret; | |
237 | bad_bmap: | |
1170532b | 238 | pr_err("swapon: swapfile has holes\n"); |
a509bc1a MG |
239 | ret = -EINVAL; |
240 | goto out; | |
241 | } | |
242 | ||
1da177e4 LT |
243 | /* |
244 | * We may have stale swap cache pages in memory: notice | |
245 | * them here and get rid of the unnecessary final write. | |
246 | */ | |
247 | int swap_writepage(struct page *page, struct writeback_control *wbc) | |
248 | { | |
2f772e6c | 249 | int ret = 0; |
1da177e4 | 250 | |
a2c43eed | 251 | if (try_to_free_swap(page)) { |
1da177e4 LT |
252 | unlock_page(page); |
253 | goto out; | |
254 | } | |
8a84802e SP |
255 | /* |
256 | * Arch code may have to preserve more data than just the page | |
257 | * contents, e.g. memory tags. | |
258 | */ | |
259 | ret = arch_prepare_to_swap(page); | |
260 | if (ret) { | |
261 | set_page_dirty(page); | |
262 | unlock_page(page); | |
263 | goto out; | |
264 | } | |
165c8aed | 265 | if (frontswap_store(page) == 0) { |
38b5faf4 DM |
266 | set_page_writeback(page); |
267 | unlock_page(page); | |
268 | end_page_writeback(page); | |
269 | goto out; | |
270 | } | |
1eec6702 | 271 | ret = __swap_writepage(page, wbc, end_swap_bio_write); |
2f772e6c SJ |
272 | out: |
273 | return ret; | |
274 | } | |
275 | ||
dd6bd0d9 MW |
276 | static sector_t swap_page_sector(struct page *page) |
277 | { | |
09cbfeaf | 278 | return (sector_t)__page_file_index(page) << (PAGE_SHIFT - 9); |
dd6bd0d9 MW |
279 | } |
280 | ||
225311a4 YH |
281 | static inline void count_swpout_vm_event(struct page *page) |
282 | { | |
283 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
284 | if (unlikely(PageTransHuge(page))) | |
285 | count_vm_event(THP_SWPOUT); | |
286 | #endif | |
6c357848 | 287 | count_vm_events(PSWPOUT, thp_nr_pages(page)); |
225311a4 YH |
288 | } |
289 | ||
a18b9b15 CH |
290 | #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) |
291 | static void bio_associate_blkg_from_page(struct bio *bio, struct page *page) | |
292 | { | |
293 | struct cgroup_subsys_state *css; | |
294 | ||
295 | if (!page->mem_cgroup) | |
296 | return; | |
297 | ||
298 | rcu_read_lock(); | |
299 | css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys); | |
300 | bio_associate_blkg_from_css(bio, css); | |
301 | rcu_read_unlock(); | |
302 | } | |
303 | #else | |
304 | #define bio_associate_blkg_from_page(bio, page) do { } while (0) | |
305 | #endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */ | |
306 | ||
1eec6702 | 307 | int __swap_writepage(struct page *page, struct writeback_control *wbc, |
4246a0b6 | 308 | bio_end_io_t end_write_func) |
2f772e6c SJ |
309 | { |
310 | struct bio *bio; | |
4e49ea4a | 311 | int ret; |
2f772e6c | 312 | struct swap_info_struct *sis = page_swap_info(page); |
62c230bc | 313 | |
cc30c5d6 | 314 | VM_BUG_ON_PAGE(!PageSwapCache(page), page); |
32646315 | 315 | if (data_race(sis->flags & SWP_FS_OPS)) { |
62c230bc MG |
316 | struct kiocb kiocb; |
317 | struct file *swap_file = sis->swap_file; | |
318 | struct address_space *mapping = swap_file->f_mapping; | |
62a8067a AV |
319 | struct bio_vec bv = { |
320 | .bv_page = page, | |
321 | .bv_len = PAGE_SIZE, | |
322 | .bv_offset = 0 | |
323 | }; | |
05afcb77 | 324 | struct iov_iter from; |
62c230bc | 325 | |
aa563d7b | 326 | iov_iter_bvec(&from, WRITE, &bv, 1, PAGE_SIZE); |
62c230bc MG |
327 | init_sync_kiocb(&kiocb, swap_file); |
328 | kiocb.ki_pos = page_file_offset(page); | |
62c230bc | 329 | |
0cdc444a | 330 | set_page_writeback(page); |
62c230bc | 331 | unlock_page(page); |
c8b8e32d | 332 | ret = mapping->a_ops->direct_IO(&kiocb, &from); |
62c230bc MG |
333 | if (ret == PAGE_SIZE) { |
334 | count_vm_event(PSWPOUT); | |
335 | ret = 0; | |
2d30d31e | 336 | } else { |
0cdc444a MG |
337 | /* |
338 | * In the case of swap-over-nfs, this can be a | |
339 | * temporary failure if the system has limited | |
340 | * memory for allocating transmit buffers. | |
341 | * Mark the page dirty and avoid | |
342 | * rotate_reclaimable_page but rate-limit the | |
343 | * messages but do not flag PageError like | |
344 | * the normal direct-to-bio case as it could | |
345 | * be temporary. | |
346 | */ | |
2d30d31e | 347 | set_page_dirty(page); |
0cdc444a | 348 | ClearPageReclaim(page); |
1170532b JP |
349 | pr_err_ratelimited("Write error on dio swapfile (%llu)\n", |
350 | page_file_offset(page)); | |
62c230bc | 351 | } |
0cdc444a | 352 | end_page_writeback(page); |
62c230bc MG |
353 | return ret; |
354 | } | |
355 | ||
dd6bd0d9 MW |
356 | ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc); |
357 | if (!ret) { | |
225311a4 | 358 | count_swpout_vm_event(page); |
dd6bd0d9 MW |
359 | return 0; |
360 | } | |
361 | ||
362 | ret = 0; | |
1eec6702 | 363 | bio = get_swap_bio(GFP_NOIO, page, end_write_func); |
1da177e4 LT |
364 | if (bio == NULL) { |
365 | set_page_dirty(page); | |
366 | unlock_page(page); | |
367 | ret = -ENOMEM; | |
368 | goto out; | |
369 | } | |
0d1e0c7c | 370 | bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc); |
6a7f6d86 | 371 | bio_associate_blkg_from_page(bio, page); |
225311a4 | 372 | count_swpout_vm_event(page); |
1da177e4 LT |
373 | set_page_writeback(page); |
374 | unlock_page(page); | |
4e49ea4a | 375 | submit_bio(bio); |
1da177e4 LT |
376 | out: |
377 | return ret; | |
378 | } | |
379 | ||
0bcac06f | 380 | int swap_readpage(struct page *page, bool synchronous) |
1da177e4 LT |
381 | { |
382 | struct bio *bio; | |
383 | int ret = 0; | |
62c230bc | 384 | struct swap_info_struct *sis = page_swap_info(page); |
23955622 | 385 | blk_qc_t qc; |
74d46992 | 386 | struct gendisk *disk; |
93779069 | 387 | unsigned long pflags; |
1da177e4 | 388 | |
0bcac06f | 389 | VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page); |
309381fe SL |
390 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
391 | VM_BUG_ON_PAGE(PageUptodate(page), page); | |
93779069 MK |
392 | |
393 | /* | |
394 | * Count submission time as memory stall. When the device is congested, | |
395 | * or the submitting cgroup IO-throttled, submission can be a | |
396 | * significant part of overall IO time. | |
397 | */ | |
398 | psi_memstall_enter(&pflags); | |
399 | ||
165c8aed | 400 | if (frontswap_load(page) == 0) { |
38b5faf4 DM |
401 | SetPageUptodate(page); |
402 | unlock_page(page); | |
403 | goto out; | |
404 | } | |
62c230bc | 405 | |
32646315 | 406 | if (data_race(sis->flags & SWP_FS_OPS)) { |
62c230bc MG |
407 | struct file *swap_file = sis->swap_file; |
408 | struct address_space *mapping = swap_file->f_mapping; | |
409 | ||
410 | ret = mapping->a_ops->readpage(swap_file, page); | |
411 | if (!ret) | |
412 | count_vm_event(PSWPIN); | |
93779069 | 413 | goto out; |
62c230bc MG |
414 | } |
415 | ||
5115db10 CH |
416 | if (sis->flags & SWP_SYNCHRONOUS_IO) { |
417 | ret = bdev_read_page(sis->bdev, swap_page_sector(page), page); | |
418 | if (!ret) { | |
419 | if (trylock_page(page)) { | |
420 | swap_slot_free_notify(page); | |
421 | unlock_page(page); | |
422 | } | |
b06bad17 | 423 | |
5115db10 CH |
424 | count_vm_event(PSWPIN); |
425 | goto out; | |
426 | } | |
dd6bd0d9 MW |
427 | } |
428 | ||
429 | ret = 0; | |
f29ad6a9 | 430 | bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read); |
1da177e4 LT |
431 | if (bio == NULL) { |
432 | unlock_page(page); | |
433 | ret = -ENOMEM; | |
434 | goto out; | |
435 | } | |
74d46992 | 436 | disk = bio->bi_disk; |
b0ba2d0f TH |
437 | /* |
438 | * Keep this task valid during swap readpage because the oom killer may | |
439 | * attempt to access it in the page fault retry time check. | |
440 | */ | |
95fe6c1a | 441 | bio_set_op_attrs(bio, REQ_OP_READ, 0); |
87518530 | 442 | if (synchronous) { |
b685a735 | 443 | bio->bi_opf |= REQ_HIPRI; |
87518530 ON |
444 | get_task_struct(current); |
445 | bio->bi_private = current; | |
446 | } | |
f8891e5e | 447 | count_vm_event(PSWPIN); |
23955622 SL |
448 | bio_get(bio); |
449 | qc = submit_bio(bio); | |
0bcac06f | 450 | while (synchronous) { |
1ac5cd49 | 451 | set_current_state(TASK_UNINTERRUPTIBLE); |
23955622 SL |
452 | if (!READ_ONCE(bio->bi_private)) |
453 | break; | |
454 | ||
0a1b8b87 | 455 | if (!blk_poll(disk->queue, qc, true)) |
0f190a7a | 456 | blk_io_schedule(); |
23955622 SL |
457 | } |
458 | __set_current_state(TASK_RUNNING); | |
459 | bio_put(bio); | |
460 | ||
1da177e4 | 461 | out: |
93779069 | 462 | psi_memstall_leave(&pflags); |
1da177e4 LT |
463 | return ret; |
464 | } | |
62c230bc MG |
465 | |
466 | int swap_set_page_dirty(struct page *page) | |
467 | { | |
468 | struct swap_info_struct *sis = page_swap_info(page); | |
469 | ||
32646315 | 470 | if (data_race(sis->flags & SWP_FS_OPS)) { |
62c230bc | 471 | struct address_space *mapping = sis->swap_file->f_mapping; |
cc30c5d6 AM |
472 | |
473 | VM_BUG_ON_PAGE(!PageSwapCache(page), page); | |
62c230bc MG |
474 | return mapping->a_ops->set_page_dirty(page); |
475 | } else { | |
476 | return __set_page_dirty_no_writeback(page); | |
477 | } | |
478 | } |