]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/mm/page_io.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
5 | * | |
6 | * Swap reorganised 29.12.95, | |
7 | * Asynchronous swapping added 30.12.95. Stephen Tweedie | |
8 | * Removed race in async swapping. 14.4.1996. Bruno Haible | |
9 | * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie | |
10 | * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman | |
11 | */ | |
12 | ||
13 | #include <linux/mm.h> | |
14 | #include <linux/kernel_stat.h> | |
5a0e3ad6 | 15 | #include <linux/gfp.h> |
1da177e4 LT |
16 | #include <linux/pagemap.h> |
17 | #include <linux/swap.h> | |
18 | #include <linux/bio.h> | |
19 | #include <linux/swapops.h> | |
62c230bc | 20 | #include <linux/buffer_head.h> |
1da177e4 | 21 | #include <linux/writeback.h> |
38b5faf4 | 22 | #include <linux/frontswap.h> |
b430e9d1 | 23 | #include <linux/blkdev.h> |
e2e40f2c | 24 | #include <linux/uio.h> |
1da177e4 LT |
25 | #include <asm/pgtable.h> |
26 | ||
f29ad6a9 | 27 | static struct bio *get_swap_bio(gfp_t gfp_flags, |
1da177e4 LT |
28 | struct page *page, bio_end_io_t end_io) |
29 | { | |
30 | struct bio *bio; | |
31 | ||
32 | bio = bio_alloc(gfp_flags, 1); | |
33 | if (bio) { | |
4f024f37 KO |
34 | bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev); |
35 | bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9; | |
1da177e4 | 36 | bio->bi_end_io = end_io; |
6cf66b4c KO |
37 | |
38 | bio_add_page(bio, page, PAGE_SIZE, 0); | |
39 | BUG_ON(bio->bi_iter.bi_size != PAGE_SIZE); | |
1da177e4 LT |
40 | } |
41 | return bio; | |
42 | } | |
43 | ||
4246a0b6 | 44 | void end_swap_bio_write(struct bio *bio) |
1da177e4 | 45 | { |
1da177e4 LT |
46 | struct page *page = bio->bi_io_vec[0].bv_page; |
47 | ||
4246a0b6 | 48 | if (bio->bi_error) { |
1da177e4 | 49 | SetPageError(page); |
6ddab3b9 PZ |
50 | /* |
51 | * We failed to write the page out to swap-space. | |
52 | * Re-dirty the page in order to avoid it being reclaimed. | |
53 | * Also print a dire warning that things will go BAD (tm) | |
54 | * very quickly. | |
55 | * | |
56 | * Also clear PG_reclaim to avoid rotate_reclaimable_page() | |
57 | */ | |
58 | set_page_dirty(page); | |
59 | printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n", | |
60 | imajor(bio->bi_bdev->bd_inode), | |
61 | iminor(bio->bi_bdev->bd_inode), | |
4f024f37 | 62 | (unsigned long long)bio->bi_iter.bi_sector); |
6ddab3b9 PZ |
63 | ClearPageReclaim(page); |
64 | } | |
1da177e4 LT |
65 | end_page_writeback(page); |
66 | bio_put(bio); | |
1da177e4 LT |
67 | } |
68 | ||
4246a0b6 | 69 | static void end_swap_bio_read(struct bio *bio) |
1da177e4 | 70 | { |
1da177e4 LT |
71 | struct page *page = bio->bi_io_vec[0].bv_page; |
72 | ||
4246a0b6 | 73 | if (bio->bi_error) { |
1da177e4 LT |
74 | SetPageError(page); |
75 | ClearPageUptodate(page); | |
6ddab3b9 PZ |
76 | printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n", |
77 | imajor(bio->bi_bdev->bd_inode), | |
78 | iminor(bio->bi_bdev->bd_inode), | |
4f024f37 | 79 | (unsigned long long)bio->bi_iter.bi_sector); |
b430e9d1 | 80 | goto out; |
1da177e4 | 81 | } |
b430e9d1 MK |
82 | |
83 | SetPageUptodate(page); | |
84 | ||
85 | /* | |
86 | * There is no guarantee that the page is in swap cache - the software | |
87 | * suspend code (at least) uses end_swap_bio_read() against a non- | |
88 | * swapcache page. So we must check PG_swapcache before proceeding with | |
89 | * this optimization. | |
90 | */ | |
91 | if (likely(PageSwapCache(page))) { | |
92 | struct swap_info_struct *sis; | |
93 | ||
94 | sis = page_swap_info(page); | |
95 | if (sis->flags & SWP_BLKDEV) { | |
96 | /* | |
97 | * The swap subsystem performs lazy swap slot freeing, | |
98 | * expecting that the page will be swapped out again. | |
99 | * So we can avoid an unnecessary write if the page | |
100 | * isn't redirtied. | |
101 | * This is good for real swap storage because we can | |
102 | * reduce unnecessary I/O and enhance wear-leveling | |
103 | * if an SSD is used as the as swap device. | |
104 | * But if in-memory swap device (eg zram) is used, | |
105 | * this causes a duplicated copy between uncompressed | |
106 | * data in VM-owned memory and compressed data in | |
107 | * zram-owned memory. So let's free zram-owned memory | |
108 | * and make the VM-owned decompressed page *dirty*, | |
109 | * so the page should be swapped out somewhere again if | |
110 | * we again wish to reclaim it. | |
111 | */ | |
112 | struct gendisk *disk = sis->bdev->bd_disk; | |
113 | if (disk->fops->swap_slot_free_notify) { | |
114 | swp_entry_t entry; | |
115 | unsigned long offset; | |
116 | ||
117 | entry.val = page_private(page); | |
118 | offset = swp_offset(entry); | |
119 | ||
120 | SetPageDirty(page); | |
121 | disk->fops->swap_slot_free_notify(sis->bdev, | |
122 | offset); | |
123 | } | |
124 | } | |
125 | } | |
126 | ||
127 | out: | |
1da177e4 LT |
128 | unlock_page(page); |
129 | bio_put(bio); | |
1da177e4 LT |
130 | } |
131 | ||
a509bc1a MG |
132 | int generic_swapfile_activate(struct swap_info_struct *sis, |
133 | struct file *swap_file, | |
134 | sector_t *span) | |
135 | { | |
136 | struct address_space *mapping = swap_file->f_mapping; | |
137 | struct inode *inode = mapping->host; | |
138 | unsigned blocks_per_page; | |
139 | unsigned long page_no; | |
140 | unsigned blkbits; | |
141 | sector_t probe_block; | |
142 | sector_t last_block; | |
143 | sector_t lowest_block = -1; | |
144 | sector_t highest_block = 0; | |
145 | int nr_extents = 0; | |
146 | int ret; | |
147 | ||
148 | blkbits = inode->i_blkbits; | |
149 | blocks_per_page = PAGE_SIZE >> blkbits; | |
150 | ||
151 | /* | |
152 | * Map all the blocks into the extent list. This code doesn't try | |
153 | * to be very smart. | |
154 | */ | |
155 | probe_block = 0; | |
156 | page_no = 0; | |
157 | last_block = i_size_read(inode) >> blkbits; | |
158 | while ((probe_block + blocks_per_page) <= last_block && | |
159 | page_no < sis->max) { | |
160 | unsigned block_in_page; | |
161 | sector_t first_block; | |
162 | ||
163 | first_block = bmap(inode, probe_block); | |
164 | if (first_block == 0) | |
165 | goto bad_bmap; | |
166 | ||
167 | /* | |
168 | * It must be PAGE_SIZE aligned on-disk | |
169 | */ | |
170 | if (first_block & (blocks_per_page - 1)) { | |
171 | probe_block++; | |
172 | goto reprobe; | |
173 | } | |
174 | ||
175 | for (block_in_page = 1; block_in_page < blocks_per_page; | |
176 | block_in_page++) { | |
177 | sector_t block; | |
178 | ||
179 | block = bmap(inode, probe_block + block_in_page); | |
180 | if (block == 0) | |
181 | goto bad_bmap; | |
182 | if (block != first_block + block_in_page) { | |
183 | /* Discontiguity */ | |
184 | probe_block++; | |
185 | goto reprobe; | |
186 | } | |
187 | } | |
188 | ||
189 | first_block >>= (PAGE_SHIFT - blkbits); | |
190 | if (page_no) { /* exclude the header page */ | |
191 | if (first_block < lowest_block) | |
192 | lowest_block = first_block; | |
193 | if (first_block > highest_block) | |
194 | highest_block = first_block; | |
195 | } | |
196 | ||
197 | /* | |
198 | * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks | |
199 | */ | |
200 | ret = add_swap_extent(sis, page_no, 1, first_block); | |
201 | if (ret < 0) | |
202 | goto out; | |
203 | nr_extents += ret; | |
204 | page_no++; | |
205 | probe_block += blocks_per_page; | |
206 | reprobe: | |
207 | continue; | |
208 | } | |
209 | ret = nr_extents; | |
210 | *span = 1 + highest_block - lowest_block; | |
211 | if (page_no == 0) | |
212 | page_no = 1; /* force Empty message */ | |
213 | sis->max = page_no; | |
214 | sis->pages = page_no - 1; | |
215 | sis->highest_bit = page_no - 1; | |
216 | out: | |
217 | return ret; | |
218 | bad_bmap: | |
219 | printk(KERN_ERR "swapon: swapfile has holes\n"); | |
220 | ret = -EINVAL; | |
221 | goto out; | |
222 | } | |
223 | ||
1da177e4 LT |
224 | /* |
225 | * We may have stale swap cache pages in memory: notice | |
226 | * them here and get rid of the unnecessary final write. | |
227 | */ | |
228 | int swap_writepage(struct page *page, struct writeback_control *wbc) | |
229 | { | |
2f772e6c | 230 | int ret = 0; |
1da177e4 | 231 | |
a2c43eed | 232 | if (try_to_free_swap(page)) { |
1da177e4 LT |
233 | unlock_page(page); |
234 | goto out; | |
235 | } | |
165c8aed | 236 | if (frontswap_store(page) == 0) { |
38b5faf4 DM |
237 | set_page_writeback(page); |
238 | unlock_page(page); | |
239 | end_page_writeback(page); | |
240 | goto out; | |
241 | } | |
1eec6702 | 242 | ret = __swap_writepage(page, wbc, end_swap_bio_write); |
2f772e6c SJ |
243 | out: |
244 | return ret; | |
245 | } | |
246 | ||
dd6bd0d9 MW |
247 | static sector_t swap_page_sector(struct page *page) |
248 | { | |
249 | return (sector_t)__page_file_index(page) << (PAGE_CACHE_SHIFT - 9); | |
250 | } | |
251 | ||
1eec6702 | 252 | int __swap_writepage(struct page *page, struct writeback_control *wbc, |
4246a0b6 | 253 | bio_end_io_t end_write_func) |
2f772e6c SJ |
254 | { |
255 | struct bio *bio; | |
dd6bd0d9 | 256 | int ret, rw = WRITE; |
2f772e6c | 257 | struct swap_info_struct *sis = page_swap_info(page); |
62c230bc MG |
258 | |
259 | if (sis->flags & SWP_FILE) { | |
260 | struct kiocb kiocb; | |
261 | struct file *swap_file = sis->swap_file; | |
262 | struct address_space *mapping = swap_file->f_mapping; | |
62a8067a AV |
263 | struct bio_vec bv = { |
264 | .bv_page = page, | |
265 | .bv_len = PAGE_SIZE, | |
266 | .bv_offset = 0 | |
267 | }; | |
05afcb77 | 268 | struct iov_iter from; |
62c230bc | 269 | |
05afcb77 | 270 | iov_iter_bvec(&from, ITER_BVEC | WRITE, &bv, 1, PAGE_SIZE); |
62c230bc MG |
271 | init_sync_kiocb(&kiocb, swap_file); |
272 | kiocb.ki_pos = page_file_offset(page); | |
62c230bc | 273 | |
0cdc444a | 274 | set_page_writeback(page); |
62c230bc | 275 | unlock_page(page); |
22c6186e | 276 | ret = mapping->a_ops->direct_IO(&kiocb, &from, kiocb.ki_pos); |
62c230bc MG |
277 | if (ret == PAGE_SIZE) { |
278 | count_vm_event(PSWPOUT); | |
279 | ret = 0; | |
2d30d31e | 280 | } else { |
0cdc444a MG |
281 | /* |
282 | * In the case of swap-over-nfs, this can be a | |
283 | * temporary failure if the system has limited | |
284 | * memory for allocating transmit buffers. | |
285 | * Mark the page dirty and avoid | |
286 | * rotate_reclaimable_page but rate-limit the | |
287 | * messages but do not flag PageError like | |
288 | * the normal direct-to-bio case as it could | |
289 | * be temporary. | |
290 | */ | |
2d30d31e | 291 | set_page_dirty(page); |
0cdc444a MG |
292 | ClearPageReclaim(page); |
293 | pr_err_ratelimited("Write error on dio swapfile (%Lu)\n", | |
294 | page_file_offset(page)); | |
62c230bc | 295 | } |
0cdc444a | 296 | end_page_writeback(page); |
62c230bc MG |
297 | return ret; |
298 | } | |
299 | ||
dd6bd0d9 MW |
300 | ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc); |
301 | if (!ret) { | |
302 | count_vm_event(PSWPOUT); | |
303 | return 0; | |
304 | } | |
305 | ||
306 | ret = 0; | |
1eec6702 | 307 | bio = get_swap_bio(GFP_NOIO, page, end_write_func); |
1da177e4 LT |
308 | if (bio == NULL) { |
309 | set_page_dirty(page); | |
310 | unlock_page(page); | |
311 | ret = -ENOMEM; | |
312 | goto out; | |
313 | } | |
314 | if (wbc->sync_mode == WB_SYNC_ALL) | |
721a9602 | 315 | rw |= REQ_SYNC; |
f8891e5e | 316 | count_vm_event(PSWPOUT); |
1da177e4 LT |
317 | set_page_writeback(page); |
318 | unlock_page(page); | |
319 | submit_bio(rw, bio); | |
320 | out: | |
321 | return ret; | |
322 | } | |
323 | ||
aca8bf32 | 324 | int swap_readpage(struct page *page) |
1da177e4 LT |
325 | { |
326 | struct bio *bio; | |
327 | int ret = 0; | |
62c230bc | 328 | struct swap_info_struct *sis = page_swap_info(page); |
1da177e4 | 329 | |
309381fe SL |
330 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
331 | VM_BUG_ON_PAGE(PageUptodate(page), page); | |
165c8aed | 332 | if (frontswap_load(page) == 0) { |
38b5faf4 DM |
333 | SetPageUptodate(page); |
334 | unlock_page(page); | |
335 | goto out; | |
336 | } | |
62c230bc MG |
337 | |
338 | if (sis->flags & SWP_FILE) { | |
339 | struct file *swap_file = sis->swap_file; | |
340 | struct address_space *mapping = swap_file->f_mapping; | |
341 | ||
342 | ret = mapping->a_ops->readpage(swap_file, page); | |
343 | if (!ret) | |
344 | count_vm_event(PSWPIN); | |
345 | return ret; | |
346 | } | |
347 | ||
dd6bd0d9 MW |
348 | ret = bdev_read_page(sis->bdev, swap_page_sector(page), page); |
349 | if (!ret) { | |
350 | count_vm_event(PSWPIN); | |
351 | return 0; | |
352 | } | |
353 | ||
354 | ret = 0; | |
f29ad6a9 | 355 | bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read); |
1da177e4 LT |
356 | if (bio == NULL) { |
357 | unlock_page(page); | |
358 | ret = -ENOMEM; | |
359 | goto out; | |
360 | } | |
f8891e5e | 361 | count_vm_event(PSWPIN); |
1da177e4 LT |
362 | submit_bio(READ, bio); |
363 | out: | |
364 | return ret; | |
365 | } | |
62c230bc MG |
366 | |
367 | int swap_set_page_dirty(struct page *page) | |
368 | { | |
369 | struct swap_info_struct *sis = page_swap_info(page); | |
370 | ||
371 | if (sis->flags & SWP_FILE) { | |
372 | struct address_space *mapping = sis->swap_file->f_mapping; | |
373 | return mapping->a_ops->set_page_dirty(page); | |
374 | } else { | |
375 | return __set_page_dirty_no_writeback(page); | |
376 | } | |
377 | } |