]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * fs/mpage.c | |
3 | * | |
4 | * Copyright (C) 2002, Linus Torvalds. | |
5 | * | |
6 | * Contains functions related to preparing and submitting BIOs which contain | |
7 | * multiple pagecache pages. | |
8 | * | |
e1f8e874 | 9 | * 15May2002 Andrew Morton |
1da177e4 LT |
10 | * Initial version |
11 | * 27Jun2002 [email protected] | |
12 | * use bio_add_page() to build bio's just the right size | |
13 | */ | |
14 | ||
15 | #include <linux/kernel.h> | |
630d9c47 | 16 | #include <linux/export.h> |
1da177e4 LT |
17 | #include <linux/mm.h> |
18 | #include <linux/kdev_t.h> | |
5a0e3ad6 | 19 | #include <linux/gfp.h> |
1da177e4 LT |
20 | #include <linux/bio.h> |
21 | #include <linux/fs.h> | |
22 | #include <linux/buffer_head.h> | |
23 | #include <linux/blkdev.h> | |
24 | #include <linux/highmem.h> | |
25 | #include <linux/prefetch.h> | |
26 | #include <linux/mpage.h> | |
27 | #include <linux/writeback.h> | |
28 | #include <linux/backing-dev.h> | |
29 | #include <linux/pagevec.h> | |
c515e1fd | 30 | #include <linux/cleancache.h> |
4db96b71 | 31 | #include "internal.h" |
1da177e4 LT |
32 | |
33 | /* | |
34 | * I/O completion handler for multipage BIOs. | |
35 | * | |
36 | * The mpage code never puts partial pages into a BIO (except for end-of-file). | |
37 | * If a page does not map to a contiguous run of blocks then it simply falls | |
38 | * back to block_read_full_page(). | |
39 | * | |
40 | * Why is this? If a page's completion depends on a number of different BIOs | |
41 | * which can complete in any order (or at the same time) then determining the | |
42 | * status of that page is hard. See end_buffer_async_read() for the details. | |
43 | * There is no point in duplicating all that complexity. | |
44 | */ | |
4246a0b6 | 45 | static void mpage_end_io(struct bio *bio) |
1da177e4 | 46 | { |
2c30c71b KO |
47 | struct bio_vec *bv; |
48 | int i; | |
1da177e4 | 49 | |
2c30c71b KO |
50 | bio_for_each_segment_all(bv, bio, i) { |
51 | struct page *page = bv->bv_page; | |
4246a0b6 | 52 | page_endio(page, bio_data_dir(bio), bio->bi_error); |
2c30c71b KO |
53 | } |
54 | ||
1da177e4 | 55 | bio_put(bio); |
1da177e4 LT |
56 | } |
57 | ||
ced117c7 | 58 | static struct bio *mpage_bio_submit(int rw, struct bio *bio) |
1da177e4 | 59 | { |
c32b0d4b | 60 | bio->bi_end_io = mpage_end_io; |
4db96b71 | 61 | guard_bio_eod(rw, bio); |
1da177e4 LT |
62 | submit_bio(rw, bio); |
63 | return NULL; | |
64 | } | |
65 | ||
66 | static struct bio * | |
67 | mpage_alloc(struct block_device *bdev, | |
68 | sector_t first_sector, int nr_vecs, | |
dd0fc66f | 69 | gfp_t gfp_flags) |
1da177e4 LT |
70 | { |
71 | struct bio *bio; | |
72 | ||
73 | bio = bio_alloc(gfp_flags, nr_vecs); | |
74 | ||
75 | if (bio == NULL && (current->flags & PF_MEMALLOC)) { | |
76 | while (!bio && (nr_vecs /= 2)) | |
77 | bio = bio_alloc(gfp_flags, nr_vecs); | |
78 | } | |
79 | ||
80 | if (bio) { | |
81 | bio->bi_bdev = bdev; | |
4f024f37 | 82 | bio->bi_iter.bi_sector = first_sector; |
1da177e4 LT |
83 | } |
84 | return bio; | |
85 | } | |
86 | ||
87 | /* | |
88 | * support function for mpage_readpages. The fs supplied get_block might | |
89 | * return an up to date buffer. This is used to map that buffer into | |
90 | * the page, which allows readpage to avoid triggering a duplicate call | |
91 | * to get_block. | |
92 | * | |
93 | * The idea is to avoid adding buffers to pages that don't already have | |
94 | * them. So when the buffer is up to date and the page size == block size, | |
95 | * this marks the page up to date instead of adding new buffers. | |
96 | */ | |
97 | static void | |
98 | map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) | |
99 | { | |
100 | struct inode *inode = page->mapping->host; | |
101 | struct buffer_head *page_bh, *head; | |
102 | int block = 0; | |
103 | ||
104 | if (!page_has_buffers(page)) { | |
105 | /* | |
106 | * don't make any buffers if there is only one buffer on | |
107 | * the page and the page just needs to be set up to date | |
108 | */ | |
109 | if (inode->i_blkbits == PAGE_CACHE_SHIFT && | |
110 | buffer_uptodate(bh)) { | |
111 | SetPageUptodate(page); | |
112 | return; | |
113 | } | |
114 | create_empty_buffers(page, 1 << inode->i_blkbits, 0); | |
115 | } | |
116 | head = page_buffers(page); | |
117 | page_bh = head; | |
118 | do { | |
119 | if (block == page_block) { | |
120 | page_bh->b_state = bh->b_state; | |
121 | page_bh->b_bdev = bh->b_bdev; | |
122 | page_bh->b_blocknr = bh->b_blocknr; | |
123 | break; | |
124 | } | |
125 | page_bh = page_bh->b_this_page; | |
126 | block++; | |
127 | } while (page_bh != head); | |
128 | } | |
129 | ||
fa30bd05 BP |
130 | /* |
131 | * This is the worker routine which does all the work of mapping the disk | |
132 | * blocks and constructs largest possible bios, submits them for IO if the | |
133 | * blocks are not contiguous on the disk. | |
134 | * | |
135 | * We pass a buffer_head back and forth and use its buffer_mapped() flag to | |
136 | * represent the validity of its disk mapping and to decide when to do the next | |
137 | * get_block() call. | |
138 | */ | |
1da177e4 LT |
139 | static struct bio * |
140 | do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, | |
fa30bd05 | 141 | sector_t *last_block_in_bio, struct buffer_head *map_bh, |
063d99b4 MH |
142 | unsigned long *first_logical_block, get_block_t get_block, |
143 | gfp_t gfp) | |
1da177e4 LT |
144 | { |
145 | struct inode *inode = page->mapping->host; | |
146 | const unsigned blkbits = inode->i_blkbits; | |
147 | const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; | |
148 | const unsigned blocksize = 1 << blkbits; | |
149 | sector_t block_in_file; | |
150 | sector_t last_block; | |
fa30bd05 | 151 | sector_t last_block_in_file; |
1da177e4 LT |
152 | sector_t blocks[MAX_BUF_PER_PAGE]; |
153 | unsigned page_block; | |
154 | unsigned first_hole = blocks_per_page; | |
155 | struct block_device *bdev = NULL; | |
1da177e4 LT |
156 | int length; |
157 | int fully_mapped = 1; | |
fa30bd05 BP |
158 | unsigned nblocks; |
159 | unsigned relative_block; | |
1da177e4 LT |
160 | |
161 | if (page_has_buffers(page)) | |
162 | goto confused; | |
163 | ||
54b21a79 | 164 | block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); |
fa30bd05 BP |
165 | last_block = block_in_file + nr_pages * blocks_per_page; |
166 | last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; | |
167 | if (last_block > last_block_in_file) | |
168 | last_block = last_block_in_file; | |
169 | page_block = 0; | |
170 | ||
171 | /* | |
172 | * Map blocks using the result from the previous get_blocks call first. | |
173 | */ | |
174 | nblocks = map_bh->b_size >> blkbits; | |
175 | if (buffer_mapped(map_bh) && block_in_file > *first_logical_block && | |
176 | block_in_file < (*first_logical_block + nblocks)) { | |
177 | unsigned map_offset = block_in_file - *first_logical_block; | |
178 | unsigned last = nblocks - map_offset; | |
179 | ||
180 | for (relative_block = 0; ; relative_block++) { | |
181 | if (relative_block == last) { | |
182 | clear_buffer_mapped(map_bh); | |
183 | break; | |
184 | } | |
185 | if (page_block == blocks_per_page) | |
186 | break; | |
187 | blocks[page_block] = map_bh->b_blocknr + map_offset + | |
188 | relative_block; | |
189 | page_block++; | |
190 | block_in_file++; | |
191 | } | |
192 | bdev = map_bh->b_bdev; | |
193 | } | |
194 | ||
195 | /* | |
196 | * Then do more get_blocks calls until we are done with this page. | |
197 | */ | |
198 | map_bh->b_page = page; | |
199 | while (page_block < blocks_per_page) { | |
200 | map_bh->b_state = 0; | |
201 | map_bh->b_size = 0; | |
1da177e4 | 202 | |
1da177e4 | 203 | if (block_in_file < last_block) { |
fa30bd05 BP |
204 | map_bh->b_size = (last_block-block_in_file) << blkbits; |
205 | if (get_block(inode, block_in_file, map_bh, 0)) | |
1da177e4 | 206 | goto confused; |
fa30bd05 | 207 | *first_logical_block = block_in_file; |
1da177e4 LT |
208 | } |
209 | ||
fa30bd05 | 210 | if (!buffer_mapped(map_bh)) { |
1da177e4 LT |
211 | fully_mapped = 0; |
212 | if (first_hole == blocks_per_page) | |
213 | first_hole = page_block; | |
fa30bd05 BP |
214 | page_block++; |
215 | block_in_file++; | |
1da177e4 LT |
216 | continue; |
217 | } | |
218 | ||
219 | /* some filesystems will copy data into the page during | |
220 | * the get_block call, in which case we don't want to | |
221 | * read it again. map_buffer_to_page copies the data | |
222 | * we just collected from get_block into the page's buffers | |
223 | * so readpage doesn't have to repeat the get_block call | |
224 | */ | |
fa30bd05 BP |
225 | if (buffer_uptodate(map_bh)) { |
226 | map_buffer_to_page(page, map_bh, page_block); | |
1da177e4 LT |
227 | goto confused; |
228 | } | |
229 | ||
230 | if (first_hole != blocks_per_page) | |
231 | goto confused; /* hole -> non-hole */ | |
232 | ||
233 | /* Contiguous blocks? */ | |
fa30bd05 | 234 | if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1) |
1da177e4 | 235 | goto confused; |
fa30bd05 BP |
236 | nblocks = map_bh->b_size >> blkbits; |
237 | for (relative_block = 0; ; relative_block++) { | |
238 | if (relative_block == nblocks) { | |
239 | clear_buffer_mapped(map_bh); | |
240 | break; | |
241 | } else if (page_block == blocks_per_page) | |
242 | break; | |
243 | blocks[page_block] = map_bh->b_blocknr+relative_block; | |
244 | page_block++; | |
245 | block_in_file++; | |
246 | } | |
247 | bdev = map_bh->b_bdev; | |
1da177e4 LT |
248 | } |
249 | ||
250 | if (first_hole != blocks_per_page) { | |
eebd2aa3 | 251 | zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE); |
1da177e4 LT |
252 | if (first_hole == 0) { |
253 | SetPageUptodate(page); | |
254 | unlock_page(page); | |
255 | goto out; | |
256 | } | |
257 | } else if (fully_mapped) { | |
258 | SetPageMappedToDisk(page); | |
259 | } | |
260 | ||
c515e1fd DM |
261 | if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) && |
262 | cleancache_get_page(page) == 0) { | |
263 | SetPageUptodate(page); | |
264 | goto confused; | |
265 | } | |
266 | ||
1da177e4 LT |
267 | /* |
268 | * This page will go to BIO. Do we need to send this BIO off first? | |
269 | */ | |
270 | if (bio && (*last_block_in_bio != blocks[0] - 1)) | |
271 | bio = mpage_bio_submit(READ, bio); | |
272 | ||
273 | alloc_new: | |
274 | if (bio == NULL) { | |
47a191fd MW |
275 | if (first_hole == blocks_per_page) { |
276 | if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9), | |
277 | page)) | |
278 | goto out; | |
279 | } | |
1da177e4 | 280 | bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), |
063d99b4 | 281 | min_t(int, nr_pages, BIO_MAX_PAGES), gfp); |
1da177e4 LT |
282 | if (bio == NULL) |
283 | goto confused; | |
284 | } | |
285 | ||
286 | length = first_hole << blkbits; | |
287 | if (bio_add_page(bio, page, length, 0) < length) { | |
288 | bio = mpage_bio_submit(READ, bio); | |
289 | goto alloc_new; | |
290 | } | |
291 | ||
38c8e618 MS |
292 | relative_block = block_in_file - *first_logical_block; |
293 | nblocks = map_bh->b_size >> blkbits; | |
294 | if ((buffer_boundary(map_bh) && relative_block == nblocks) || | |
295 | (first_hole != blocks_per_page)) | |
1da177e4 LT |
296 | bio = mpage_bio_submit(READ, bio); |
297 | else | |
298 | *last_block_in_bio = blocks[blocks_per_page - 1]; | |
299 | out: | |
300 | return bio; | |
301 | ||
302 | confused: | |
303 | if (bio) | |
304 | bio = mpage_bio_submit(READ, bio); | |
305 | if (!PageUptodate(page)) | |
306 | block_read_full_page(page, get_block); | |
307 | else | |
308 | unlock_page(page); | |
309 | goto out; | |
310 | } | |
311 | ||
67be2dd1 | 312 | /** |
78a4a50a | 313 | * mpage_readpages - populate an address space with some pages & start reads against them |
67be2dd1 MW |
314 | * @mapping: the address_space |
315 | * @pages: The address of a list_head which contains the target pages. These | |
316 | * pages have their ->index populated and are otherwise uninitialised. | |
67be2dd1 MW |
317 | * The page at @pages->prev has the lowest file offset, and reads should be |
318 | * issued in @pages->prev to @pages->next order. | |
67be2dd1 MW |
319 | * @nr_pages: The number of pages at *@pages |
320 | * @get_block: The filesystem's block mapper function. | |
321 | * | |
322 | * This function walks the pages and the blocks within each page, building and | |
323 | * emitting large BIOs. | |
324 | * | |
325 | * If anything unusual happens, such as: | |
326 | * | |
327 | * - encountering a page which has buffers | |
328 | * - encountering a page which has a non-hole after a hole | |
329 | * - encountering a page with non-contiguous blocks | |
330 | * | |
331 | * then this code just gives up and calls the buffer_head-based read function. | |
332 | * It does handle a page which has holes at the end - that is a common case: | |
333 | * the end-of-file on blocksize < PAGE_CACHE_SIZE setups. | |
334 | * | |
335 | * BH_Boundary explanation: | |
336 | * | |
337 | * There is a problem. The mpage read code assembles several pages, gets all | |
338 | * their disk mappings, and then submits them all. That's fine, but obtaining | |
339 | * the disk mappings may require I/O. Reads of indirect blocks, for example. | |
340 | * | |
341 | * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be | |
342 | * submitted in the following order: | |
343 | * 12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16 | |
78a4a50a | 344 | * |
67be2dd1 MW |
345 | * because the indirect block has to be read to get the mappings of blocks |
346 | * 13,14,15,16. Obviously, this impacts performance. | |
347 | * | |
348 | * So what we do it to allow the filesystem's get_block() function to set | |
349 | * BH_Boundary when it maps block 11. BH_Boundary says: mapping of the block | |
350 | * after this one will require I/O against a block which is probably close to | |
351 | * this one. So you should push what I/O you have currently accumulated. | |
352 | * | |
353 | * This all causes the disk requests to be issued in the correct order. | |
354 | */ | |
1da177e4 LT |
355 | int |
356 | mpage_readpages(struct address_space *mapping, struct list_head *pages, | |
357 | unsigned nr_pages, get_block_t get_block) | |
358 | { | |
359 | struct bio *bio = NULL; | |
360 | unsigned page_idx; | |
361 | sector_t last_block_in_bio = 0; | |
fa30bd05 BP |
362 | struct buffer_head map_bh; |
363 | unsigned long first_logical_block = 0; | |
c62d2555 | 364 | gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL); |
1da177e4 | 365 | |
79ffab34 AK |
366 | map_bh.b_state = 0; |
367 | map_bh.b_size = 0; | |
1da177e4 LT |
368 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { |
369 | struct page *page = list_entry(pages->prev, struct page, lru); | |
370 | ||
371 | prefetchw(&page->flags); | |
372 | list_del(&page->lru); | |
eb2be189 | 373 | if (!add_to_page_cache_lru(page, mapping, |
063d99b4 MH |
374 | page->index, |
375 | gfp)) { | |
1da177e4 LT |
376 | bio = do_mpage_readpage(bio, page, |
377 | nr_pages - page_idx, | |
fa30bd05 BP |
378 | &last_block_in_bio, &map_bh, |
379 | &first_logical_block, | |
063d99b4 | 380 | get_block, gfp); |
1da177e4 | 381 | } |
eb2be189 | 382 | page_cache_release(page); |
1da177e4 | 383 | } |
1da177e4 LT |
384 | BUG_ON(!list_empty(pages)); |
385 | if (bio) | |
386 | mpage_bio_submit(READ, bio); | |
387 | return 0; | |
388 | } | |
389 | EXPORT_SYMBOL(mpage_readpages); | |
390 | ||
391 | /* | |
392 | * This isn't called much at all | |
393 | */ | |
394 | int mpage_readpage(struct page *page, get_block_t get_block) | |
395 | { | |
396 | struct bio *bio = NULL; | |
397 | sector_t last_block_in_bio = 0; | |
fa30bd05 BP |
398 | struct buffer_head map_bh; |
399 | unsigned long first_logical_block = 0; | |
c62d2555 | 400 | gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL); |
1da177e4 | 401 | |
79ffab34 AK |
402 | map_bh.b_state = 0; |
403 | map_bh.b_size = 0; | |
fa30bd05 | 404 | bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio, |
063d99b4 | 405 | &map_bh, &first_logical_block, get_block, gfp); |
1da177e4 LT |
406 | if (bio) |
407 | mpage_bio_submit(READ, bio); | |
408 | return 0; | |
409 | } | |
410 | EXPORT_SYMBOL(mpage_readpage); | |
411 | ||
412 | /* | |
413 | * Writing is not so simple. | |
414 | * | |
415 | * If the page has buffers then they will be used for obtaining the disk | |
416 | * mapping. We only support pages which are fully mapped-and-dirty, with a | |
417 | * special case for pages which are unmapped at the end: end-of-file. | |
418 | * | |
419 | * If the page has no buffers (preferred) then the page is mapped here. | |
420 | * | |
421 | * If all blocks are found to be contiguous then the page can go into the | |
422 | * BIO. Otherwise fall back to the mapping's writepage(). | |
423 | * | |
424 | * FIXME: This code wants an estimate of how many pages are still to be | |
425 | * written, so it can intelligently allocate a suitably-sized BIO. For now, | |
426 | * just allocate full-size (16-page) BIOs. | |
427 | */ | |
0ea97180 | 428 | |
ced117c7 DV |
429 | struct mpage_data { |
430 | struct bio *bio; | |
431 | sector_t last_block_in_bio; | |
432 | get_block_t *get_block; | |
433 | unsigned use_writepage; | |
434 | }; | |
435 | ||
90768eee MW |
436 | /* |
437 | * We have our BIO, so we can now mark the buffers clean. Make | |
438 | * sure to only clean buffers which we know we'll be writing. | |
439 | */ | |
440 | static void clean_buffers(struct page *page, unsigned first_unmapped) | |
441 | { | |
442 | unsigned buffer_counter = 0; | |
443 | struct buffer_head *bh, *head; | |
444 | if (!page_has_buffers(page)) | |
445 | return; | |
446 | head = page_buffers(page); | |
447 | bh = head; | |
448 | ||
449 | do { | |
450 | if (buffer_counter++ == first_unmapped) | |
451 | break; | |
452 | clear_buffer_dirty(bh); | |
453 | bh = bh->b_this_page; | |
454 | } while (bh != head); | |
455 | ||
456 | /* | |
457 | * we cannot drop the bh if the page is not uptodate or a concurrent | |
458 | * readpage would fail to serialize with the bh and it would read from | |
459 | * disk before we reach the platter. | |
460 | */ | |
461 | if (buffer_heads_over_limit && PageUptodate(page)) | |
462 | try_to_free_buffers(page); | |
463 | } | |
464 | ||
ced117c7 | 465 | static int __mpage_writepage(struct page *page, struct writeback_control *wbc, |
29a814d2 | 466 | void *data) |
1da177e4 | 467 | { |
0ea97180 MS |
468 | struct mpage_data *mpd = data; |
469 | struct bio *bio = mpd->bio; | |
1da177e4 LT |
470 | struct address_space *mapping = page->mapping; |
471 | struct inode *inode = page->mapping->host; | |
472 | const unsigned blkbits = inode->i_blkbits; | |
473 | unsigned long end_index; | |
474 | const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; | |
475 | sector_t last_block; | |
476 | sector_t block_in_file; | |
477 | sector_t blocks[MAX_BUF_PER_PAGE]; | |
478 | unsigned page_block; | |
479 | unsigned first_unmapped = blocks_per_page; | |
480 | struct block_device *bdev = NULL; | |
481 | int boundary = 0; | |
482 | sector_t boundary_block = 0; | |
483 | struct block_device *boundary_bdev = NULL; | |
484 | int length; | |
485 | struct buffer_head map_bh; | |
486 | loff_t i_size = i_size_read(inode); | |
0ea97180 | 487 | int ret = 0; |
5948edbc | 488 | int wr = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); |
1da177e4 LT |
489 | |
490 | if (page_has_buffers(page)) { | |
491 | struct buffer_head *head = page_buffers(page); | |
492 | struct buffer_head *bh = head; | |
493 | ||
494 | /* If they're all mapped and dirty, do it */ | |
495 | page_block = 0; | |
496 | do { | |
497 | BUG_ON(buffer_locked(bh)); | |
498 | if (!buffer_mapped(bh)) { | |
499 | /* | |
500 | * unmapped dirty buffers are created by | |
501 | * __set_page_dirty_buffers -> mmapped data | |
502 | */ | |
503 | if (buffer_dirty(bh)) | |
504 | goto confused; | |
505 | if (first_unmapped == blocks_per_page) | |
506 | first_unmapped = page_block; | |
507 | continue; | |
508 | } | |
509 | ||
510 | if (first_unmapped != blocks_per_page) | |
511 | goto confused; /* hole -> non-hole */ | |
512 | ||
513 | if (!buffer_dirty(bh) || !buffer_uptodate(bh)) | |
514 | goto confused; | |
515 | if (page_block) { | |
516 | if (bh->b_blocknr != blocks[page_block-1] + 1) | |
517 | goto confused; | |
518 | } | |
519 | blocks[page_block++] = bh->b_blocknr; | |
520 | boundary = buffer_boundary(bh); | |
521 | if (boundary) { | |
522 | boundary_block = bh->b_blocknr; | |
523 | boundary_bdev = bh->b_bdev; | |
524 | } | |
525 | bdev = bh->b_bdev; | |
526 | } while ((bh = bh->b_this_page) != head); | |
527 | ||
528 | if (first_unmapped) | |
529 | goto page_is_mapped; | |
530 | ||
531 | /* | |
532 | * Page has buffers, but they are all unmapped. The page was | |
533 | * created by pagein or read over a hole which was handled by | |
534 | * block_read_full_page(). If this address_space is also | |
535 | * using mpage_readpages then this can rarely happen. | |
536 | */ | |
537 | goto confused; | |
538 | } | |
539 | ||
540 | /* | |
541 | * The page has no buffers: map it to disk | |
542 | */ | |
543 | BUG_ON(!PageUptodate(page)); | |
54b21a79 | 544 | block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); |
1da177e4 LT |
545 | last_block = (i_size - 1) >> blkbits; |
546 | map_bh.b_page = page; | |
547 | for (page_block = 0; page_block < blocks_per_page; ) { | |
548 | ||
549 | map_bh.b_state = 0; | |
b0cf2321 | 550 | map_bh.b_size = 1 << blkbits; |
0ea97180 | 551 | if (mpd->get_block(inode, block_in_file, &map_bh, 1)) |
1da177e4 LT |
552 | goto confused; |
553 | if (buffer_new(&map_bh)) | |
554 | unmap_underlying_metadata(map_bh.b_bdev, | |
555 | map_bh.b_blocknr); | |
556 | if (buffer_boundary(&map_bh)) { | |
557 | boundary_block = map_bh.b_blocknr; | |
558 | boundary_bdev = map_bh.b_bdev; | |
559 | } | |
560 | if (page_block) { | |
561 | if (map_bh.b_blocknr != blocks[page_block-1] + 1) | |
562 | goto confused; | |
563 | } | |
564 | blocks[page_block++] = map_bh.b_blocknr; | |
565 | boundary = buffer_boundary(&map_bh); | |
566 | bdev = map_bh.b_bdev; | |
567 | if (block_in_file == last_block) | |
568 | break; | |
569 | block_in_file++; | |
570 | } | |
571 | BUG_ON(page_block == 0); | |
572 | ||
573 | first_unmapped = page_block; | |
574 | ||
575 | page_is_mapped: | |
576 | end_index = i_size >> PAGE_CACHE_SHIFT; | |
577 | if (page->index >= end_index) { | |
578 | /* | |
579 | * The page straddles i_size. It must be zeroed out on each | |
2a61aa40 | 580 | * and every writepage invocation because it may be mmapped. |
1da177e4 LT |
581 | * "A file is mapped in multiples of the page size. For a file |
582 | * that is not a multiple of the page size, the remaining memory | |
583 | * is zeroed when mapped, and writes to that region are not | |
584 | * written out to the file." | |
585 | */ | |
586 | unsigned offset = i_size & (PAGE_CACHE_SIZE - 1); | |
1da177e4 LT |
587 | |
588 | if (page->index > end_index || !offset) | |
589 | goto confused; | |
eebd2aa3 | 590 | zero_user_segment(page, offset, PAGE_CACHE_SIZE); |
1da177e4 LT |
591 | } |
592 | ||
593 | /* | |
594 | * This page will go to BIO. Do we need to send this BIO off first? | |
595 | */ | |
0ea97180 | 596 | if (bio && mpd->last_block_in_bio != blocks[0] - 1) |
5948edbc | 597 | bio = mpage_bio_submit(wr, bio); |
1da177e4 LT |
598 | |
599 | alloc_new: | |
600 | if (bio == NULL) { | |
47a191fd MW |
601 | if (first_unmapped == blocks_per_page) { |
602 | if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9), | |
603 | page, wbc)) { | |
604 | clean_buffers(page, first_unmapped); | |
605 | goto out; | |
606 | } | |
607 | } | |
1da177e4 | 608 | bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), |
b54ffb73 | 609 | BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH); |
1da177e4 LT |
610 | if (bio == NULL) |
611 | goto confused; | |
429b3fb0 | 612 | |
b16b1deb | 613 | wbc_init_bio(wbc, bio); |
1da177e4 LT |
614 | } |
615 | ||
616 | /* | |
617 | * Must try to add the page before marking the buffer clean or | |
618 | * the confused fail path above (OOM) will be very confused when | |
619 | * it finds all bh marked clean (i.e. it will not write anything) | |
620 | */ | |
2a814908 | 621 | wbc_account_io(wbc, page, PAGE_SIZE); |
1da177e4 LT |
622 | length = first_unmapped << blkbits; |
623 | if (bio_add_page(bio, page, length, 0) < length) { | |
5948edbc | 624 | bio = mpage_bio_submit(wr, bio); |
1da177e4 LT |
625 | goto alloc_new; |
626 | } | |
627 | ||
90768eee | 628 | clean_buffers(page, first_unmapped); |
1da177e4 LT |
629 | |
630 | BUG_ON(PageWriteback(page)); | |
631 | set_page_writeback(page); | |
632 | unlock_page(page); | |
633 | if (boundary || (first_unmapped != blocks_per_page)) { | |
5948edbc | 634 | bio = mpage_bio_submit(wr, bio); |
1da177e4 LT |
635 | if (boundary_block) { |
636 | write_boundary_block(boundary_bdev, | |
637 | boundary_block, 1 << blkbits); | |
638 | } | |
639 | } else { | |
0ea97180 | 640 | mpd->last_block_in_bio = blocks[blocks_per_page - 1]; |
1da177e4 LT |
641 | } |
642 | goto out; | |
643 | ||
644 | confused: | |
645 | if (bio) | |
5948edbc | 646 | bio = mpage_bio_submit(wr, bio); |
1da177e4 | 647 | |
0ea97180 MS |
648 | if (mpd->use_writepage) { |
649 | ret = mapping->a_ops->writepage(page, wbc); | |
1da177e4 | 650 | } else { |
0ea97180 | 651 | ret = -EAGAIN; |
1da177e4 LT |
652 | goto out; |
653 | } | |
654 | /* | |
655 | * The caller has a ref on the inode, so *mapping is stable | |
656 | */ | |
0ea97180 | 657 | mapping_set_error(mapping, ret); |
1da177e4 | 658 | out: |
0ea97180 MS |
659 | mpd->bio = bio; |
660 | return ret; | |
1da177e4 LT |
661 | } |
662 | ||
663 | /** | |
78a4a50a | 664 | * mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them |
1da177e4 LT |
665 | * @mapping: address space structure to write |
666 | * @wbc: subtract the number of written pages from *@wbc->nr_to_write | |
667 | * @get_block: the filesystem's block mapper function. | |
668 | * If this is NULL then use a_ops->writepage. Otherwise, go | |
669 | * direct-to-BIO. | |
670 | * | |
671 | * This is a library function, which implements the writepages() | |
672 | * address_space_operation. | |
673 | * | |
674 | * If a page is already under I/O, generic_writepages() skips it, even | |
675 | * if it's dirty. This is desirable behaviour for memory-cleaning writeback, | |
676 | * but it is INCORRECT for data-integrity system calls such as fsync(). fsync() | |
677 | * and msync() need to guarantee that all the data which was dirty at the time | |
678 | * the call was made get new I/O started against them. If wbc->sync_mode is | |
679 | * WB_SYNC_ALL then we were called for data integrity and we must wait for | |
680 | * existing IO to complete. | |
681 | */ | |
682 | int | |
683 | mpage_writepages(struct address_space *mapping, | |
684 | struct writeback_control *wbc, get_block_t get_block) | |
1da177e4 | 685 | { |
2ed1a6bc | 686 | struct blk_plug plug; |
0ea97180 MS |
687 | int ret; |
688 | ||
2ed1a6bc JA |
689 | blk_start_plug(&plug); |
690 | ||
0ea97180 MS |
691 | if (!get_block) |
692 | ret = generic_writepages(mapping, wbc); | |
693 | else { | |
694 | struct mpage_data mpd = { | |
695 | .bio = NULL, | |
696 | .last_block_in_bio = 0, | |
697 | .get_block = get_block, | |
698 | .use_writepage = 1, | |
699 | }; | |
700 | ||
701 | ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd); | |
5948edbc RP |
702 | if (mpd.bio) { |
703 | int wr = (wbc->sync_mode == WB_SYNC_ALL ? | |
704 | WRITE_SYNC : WRITE); | |
705 | mpage_bio_submit(wr, mpd.bio); | |
706 | } | |
1da177e4 | 707 | } |
2ed1a6bc | 708 | blk_finish_plug(&plug); |
1da177e4 LT |
709 | return ret; |
710 | } | |
711 | EXPORT_SYMBOL(mpage_writepages); | |
1da177e4 LT |
712 | |
713 | int mpage_writepage(struct page *page, get_block_t get_block, | |
714 | struct writeback_control *wbc) | |
715 | { | |
0ea97180 MS |
716 | struct mpage_data mpd = { |
717 | .bio = NULL, | |
718 | .last_block_in_bio = 0, | |
719 | .get_block = get_block, | |
720 | .use_writepage = 0, | |
721 | }; | |
722 | int ret = __mpage_writepage(page, wbc, &mpd); | |
5948edbc RP |
723 | if (mpd.bio) { |
724 | int wr = (wbc->sync_mode == WB_SYNC_ALL ? | |
725 | WRITE_SYNC : WRITE); | |
726 | mpage_bio_submit(wr, mpd.bio); | |
727 | } | |
1da177e4 LT |
728 | return ret; |
729 | } | |
730 | EXPORT_SYMBOL(mpage_writepage); |