]>
Commit | Line | Data |
---|---|---|
ae259a9c CH |
1 | /* |
2 | * Copyright (C) 2010 Red Hat, Inc. | |
72b4daa2 | 3 | * Copyright (c) 2016-2018 Christoph Hellwig. |
ae259a9c CH |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | #include <linux/module.h> | |
15 | #include <linux/compiler.h> | |
16 | #include <linux/fs.h> | |
17 | #include <linux/iomap.h> | |
18 | #include <linux/uaccess.h> | |
19 | #include <linux/gfp.h> | |
9dc55f13 | 20 | #include <linux/migrate.h> |
ae259a9c | 21 | #include <linux/mm.h> |
72b4daa2 | 22 | #include <linux/mm_inline.h> |
ae259a9c CH |
23 | #include <linux/swap.h> |
24 | #include <linux/pagemap.h> | |
8a78cb1f | 25 | #include <linux/pagevec.h> |
ae259a9c CH |
26 | #include <linux/file.h> |
27 | #include <linux/uio.h> | |
28 | #include <linux/backing-dev.h> | |
29 | #include <linux/buffer_head.h> | |
ff6a9292 | 30 | #include <linux/task_io_accounting_ops.h> |
9a286f0e | 31 | #include <linux/dax.h> |
f361bf4a IM |
32 | #include <linux/sched/signal.h> |
33 | ||
ae259a9c CH |
34 | #include "internal.h" |
35 | ||
ae259a9c CH |
36 | /* |
37 | * Execute a iomap write on a segment of the mapping that spans a | |
38 | * contiguous range of pages that have identical block mapping state. | |
39 | * | |
40 | * This avoids the need to map pages individually, do individual allocations | |
41 | * for each page and most importantly avoid the need for filesystem specific | |
42 | * locking per page. Instead, all the operations are amortised over the entire | |
43 | * range of pages. It is assumed that the filesystems will lock whatever | |
44 | * resources they require in the iomap_begin call, and release them in the | |
45 | * iomap_end call. | |
46 | */ | |
befb503c | 47 | loff_t |
ae259a9c | 48 | iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags, |
8ff6daa1 | 49 | const struct iomap_ops *ops, void *data, iomap_actor_t actor) |
ae259a9c CH |
50 | { |
51 | struct iomap iomap = { 0 }; | |
52 | loff_t written = 0, ret; | |
53 | ||
54 | /* | |
55 | * Need to map a range from start position for length bytes. This can | |
56 | * span multiple pages - it is only guaranteed to return a range of a | |
57 | * single type of pages (e.g. all into a hole, all mapped or all | |
58 | * unwritten). Failure at this point has nothing to undo. | |
59 | * | |
60 | * If allocation is required for this range, reserve the space now so | |
61 | * that the allocation is guaranteed to succeed later on. Once we copy | |
62 | * the data into the page cache pages, then we cannot fail otherwise we | |
63 | * expose transient stale data. If the reserve fails, we can safely | |
64 | * back out at this point as there is nothing to undo. | |
65 | */ | |
66 | ret = ops->iomap_begin(inode, pos, length, flags, &iomap); | |
67 | if (ret) | |
68 | return ret; | |
69 | if (WARN_ON(iomap.offset > pos)) | |
70 | return -EIO; | |
0c6dda7a DW |
71 | if (WARN_ON(iomap.length == 0)) |
72 | return -EIO; | |
ae259a9c CH |
73 | |
74 | /* | |
75 | * Cut down the length to the one actually provided by the filesystem, | |
76 | * as it might not be able to give us the whole size that we requested. | |
77 | */ | |
78 | if (iomap.offset + iomap.length < pos + length) | |
79 | length = iomap.offset + iomap.length - pos; | |
80 | ||
81 | /* | |
82 | * Now that we have guaranteed that the space allocation will succeed. | |
83 | * we can do the copy-in page by page without having to worry about | |
84 | * failures exposing transient data. | |
85 | */ | |
86 | written = actor(inode, pos, length, data, &iomap); | |
87 | ||
88 | /* | |
89 | * Now the data has been copied, commit the range we've copied. This | |
90 | * should not fail unless the filesystem has had a fatal error. | |
91 | */ | |
f20ac7ab CH |
92 | if (ops->iomap_end) { |
93 | ret = ops->iomap_end(inode, pos, length, | |
94 | written > 0 ? written : 0, | |
95 | flags, &iomap); | |
96 | } | |
ae259a9c CH |
97 | |
98 | return written ? written : ret; | |
99 | } | |
100 | ||
57fc505d CH |
101 | static sector_t |
102 | iomap_sector(struct iomap *iomap, loff_t pos) | |
103 | { | |
104 | return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT; | |
105 | } | |
106 | ||
9dc55f13 CH |
107 | static struct iomap_page * |
108 | iomap_page_create(struct inode *inode, struct page *page) | |
109 | { | |
110 | struct iomap_page *iop = to_iomap_page(page); | |
111 | ||
112 | if (iop || i_blocksize(inode) == PAGE_SIZE) | |
113 | return iop; | |
114 | ||
115 | iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL); | |
116 | atomic_set(&iop->read_count, 0); | |
117 | atomic_set(&iop->write_count, 0); | |
118 | bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE); | |
8e47a457 PJ |
119 | |
120 | /* | |
121 | * migrate_page_move_mapping() assumes that pages with private data have | |
122 | * their count elevated by 1. | |
123 | */ | |
124 | get_page(page); | |
9dc55f13 CH |
125 | set_page_private(page, (unsigned long)iop); |
126 | SetPagePrivate(page); | |
127 | return iop; | |
128 | } | |
129 | ||
130 | static void | |
131 | iomap_page_release(struct page *page) | |
132 | { | |
133 | struct iomap_page *iop = to_iomap_page(page); | |
134 | ||
135 | if (!iop) | |
136 | return; | |
137 | WARN_ON_ONCE(atomic_read(&iop->read_count)); | |
138 | WARN_ON_ONCE(atomic_read(&iop->write_count)); | |
139 | ClearPagePrivate(page); | |
140 | set_page_private(page, 0); | |
8e47a457 | 141 | put_page(page); |
9dc55f13 CH |
142 | kfree(iop); |
143 | } | |
144 | ||
145 | /* | |
146 | * Calculate the range inside the page that we actually need to read. | |
147 | */ | |
148 | static void | |
149 | iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop, | |
150 | loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp) | |
151 | { | |
8c110d43 DC |
152 | loff_t orig_pos = *pos; |
153 | loff_t isize = i_size_read(inode); | |
9dc55f13 CH |
154 | unsigned block_bits = inode->i_blkbits; |
155 | unsigned block_size = (1 << block_bits); | |
10259de1 | 156 | unsigned poff = offset_in_page(*pos); |
9dc55f13 CH |
157 | unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length); |
158 | unsigned first = poff >> block_bits; | |
159 | unsigned last = (poff + plen - 1) >> block_bits; | |
9dc55f13 CH |
160 | |
161 | /* | |
162 | * If the block size is smaller than the page size we need to check the | |
163 | * per-block uptodate status and adjust the offset and length if needed | |
164 | * to avoid reading in already uptodate ranges. | |
165 | */ | |
166 | if (iop) { | |
167 | unsigned int i; | |
168 | ||
169 | /* move forward for each leading block marked uptodate */ | |
170 | for (i = first; i <= last; i++) { | |
171 | if (!test_bit(i, iop->uptodate)) | |
172 | break; | |
173 | *pos += block_size; | |
174 | poff += block_size; | |
175 | plen -= block_size; | |
176 | first++; | |
177 | } | |
178 | ||
179 | /* truncate len if we find any trailing uptodate block(s) */ | |
180 | for ( ; i <= last; i++) { | |
181 | if (test_bit(i, iop->uptodate)) { | |
182 | plen -= (last - i + 1) * block_size; | |
183 | last = i - 1; | |
184 | break; | |
185 | } | |
186 | } | |
187 | } | |
188 | ||
189 | /* | |
190 | * If the extent spans the block that contains the i_size we need to | |
191 | * handle both halves separately so that we properly zero data in the | |
192 | * page cache for blocks that are entirely outside of i_size. | |
193 | */ | |
8c110d43 DC |
194 | if (orig_pos <= isize && orig_pos + length > isize) { |
195 | unsigned end = offset_in_page(isize - 1) >> block_bits; | |
196 | ||
197 | if (first <= end && last > end) | |
198 | plen -= (last - end) * block_size; | |
199 | } | |
9dc55f13 CH |
200 | |
201 | *offp = poff; | |
202 | *lenp = plen; | |
203 | } | |
204 | ||
205 | static void | |
206 | iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len) | |
207 | { | |
208 | struct iomap_page *iop = to_iomap_page(page); | |
209 | struct inode *inode = page->mapping->host; | |
210 | unsigned first = off >> inode->i_blkbits; | |
211 | unsigned last = (off + len - 1) >> inode->i_blkbits; | |
212 | unsigned int i; | |
213 | bool uptodate = true; | |
214 | ||
215 | if (iop) { | |
216 | for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) { | |
217 | if (i >= first && i <= last) | |
218 | set_bit(i, iop->uptodate); | |
219 | else if (!test_bit(i, iop->uptodate)) | |
220 | uptodate = false; | |
221 | } | |
222 | } | |
223 | ||
224 | if (uptodate && !PageError(page)) | |
225 | SetPageUptodate(page); | |
226 | } | |
227 | ||
228 | static void | |
229 | iomap_read_finish(struct iomap_page *iop, struct page *page) | |
230 | { | |
231 | if (!iop || atomic_dec_and_test(&iop->read_count)) | |
232 | unlock_page(page); | |
233 | } | |
234 | ||
235 | static void | |
236 | iomap_read_page_end_io(struct bio_vec *bvec, int error) | |
237 | { | |
238 | struct page *page = bvec->bv_page; | |
239 | struct iomap_page *iop = to_iomap_page(page); | |
240 | ||
241 | if (unlikely(error)) { | |
242 | ClearPageUptodate(page); | |
243 | SetPageError(page); | |
244 | } else { | |
245 | iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len); | |
246 | } | |
247 | ||
248 | iomap_read_finish(iop, page); | |
249 | } | |
250 | ||
19e0c58f AG |
251 | static void |
252 | iomap_read_inline_data(struct inode *inode, struct page *page, | |
253 | struct iomap *iomap) | |
254 | { | |
255 | size_t size = i_size_read(inode); | |
256 | void *addr; | |
257 | ||
258 | if (PageUptodate(page)) | |
259 | return; | |
260 | ||
261 | BUG_ON(page->index); | |
262 | BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data)); | |
263 | ||
264 | addr = kmap_atomic(page); | |
265 | memcpy(addr, iomap->inline_data, size); | |
266 | memset(addr + size, 0, PAGE_SIZE - size); | |
267 | kunmap_atomic(addr); | |
268 | SetPageUptodate(page); | |
269 | } | |
270 | ||
ae259a9c | 271 | static void |
72b4daa2 CH |
272 | iomap_read_end_io(struct bio *bio) |
273 | { | |
274 | int error = blk_status_to_errno(bio->bi_status); | |
275 | struct bio_vec *bvec; | |
276 | int i; | |
6dc4f100 | 277 | struct bvec_iter_all iter_all; |
72b4daa2 | 278 | |
6dc4f100 | 279 | bio_for_each_segment_all(bvec, bio, i, iter_all) |
9dc55f13 | 280 | iomap_read_page_end_io(bvec, error); |
72b4daa2 CH |
281 | bio_put(bio); |
282 | } | |
283 | ||
284 | struct iomap_readpage_ctx { | |
285 | struct page *cur_page; | |
286 | bool cur_page_in_bio; | |
287 | bool is_readahead; | |
288 | struct bio *bio; | |
289 | struct list_head *pages; | |
290 | }; | |
291 | ||
292 | static loff_t | |
293 | iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data, | |
294 | struct iomap *iomap) | |
295 | { | |
296 | struct iomap_readpage_ctx *ctx = data; | |
297 | struct page *page = ctx->cur_page; | |
9dc55f13 | 298 | struct iomap_page *iop = iomap_page_create(inode, page); |
72b4daa2 | 299 | bool is_contig = false; |
9dc55f13 CH |
300 | loff_t orig_pos = pos; |
301 | unsigned poff, plen; | |
72b4daa2 CH |
302 | sector_t sector; |
303 | ||
806a1477 | 304 | if (iomap->type == IOMAP_INLINE) { |
7d5e049e | 305 | WARN_ON_ONCE(pos); |
806a1477 AG |
306 | iomap_read_inline_data(inode, page, iomap); |
307 | return PAGE_SIZE; | |
308 | } | |
309 | ||
9dc55f13 CH |
310 | /* zero post-eof blocks as the page may be mapped */ |
311 | iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen); | |
312 | if (plen == 0) | |
313 | goto done; | |
72b4daa2 CH |
314 | |
315 | if (iomap->type != IOMAP_MAPPED || pos >= i_size_read(inode)) { | |
316 | zero_user(page, poff, plen); | |
9dc55f13 | 317 | iomap_set_range_uptodate(page, poff, plen); |
72b4daa2 CH |
318 | goto done; |
319 | } | |
320 | ||
321 | ctx->cur_page_in_bio = true; | |
322 | ||
323 | /* | |
324 | * Try to merge into a previous segment if we can. | |
325 | */ | |
326 | sector = iomap_sector(iomap, pos); | |
327 | if (ctx->bio && bio_end_sector(ctx->bio) == sector) { | |
07173c3e | 328 | if (__bio_try_merge_page(ctx->bio, page, plen, poff, true)) |
72b4daa2 CH |
329 | goto done; |
330 | is_contig = true; | |
331 | } | |
332 | ||
9dc55f13 CH |
333 | /* |
334 | * If we start a new segment we need to increase the read count, and we | |
335 | * need to do so before submitting any previous full bio to make sure | |
336 | * that we don't prematurely unlock the page. | |
337 | */ | |
338 | if (iop) | |
339 | atomic_inc(&iop->read_count); | |
340 | ||
72b4daa2 CH |
341 | if (!ctx->bio || !is_contig || bio_full(ctx->bio)) { |
342 | gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL); | |
343 | int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
344 | ||
345 | if (ctx->bio) | |
346 | submit_bio(ctx->bio); | |
347 | ||
348 | if (ctx->is_readahead) /* same as readahead_gfp_mask */ | |
349 | gfp |= __GFP_NORETRY | __GFP_NOWARN; | |
350 | ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs)); | |
351 | ctx->bio->bi_opf = REQ_OP_READ; | |
352 | if (ctx->is_readahead) | |
353 | ctx->bio->bi_opf |= REQ_RAHEAD; | |
354 | ctx->bio->bi_iter.bi_sector = sector; | |
355 | bio_set_dev(ctx->bio, iomap->bdev); | |
356 | ctx->bio->bi_end_io = iomap_read_end_io; | |
357 | } | |
358 | ||
07173c3e | 359 | bio_add_page(ctx->bio, page, plen, poff); |
72b4daa2 | 360 | done: |
9dc55f13 CH |
361 | /* |
362 | * Move the caller beyond our range so that it keeps making progress. | |
363 | * For that we have to include any leading non-uptodate ranges, but | |
364 | * we can skip trailing ones as they will be handled in the next | |
365 | * iteration. | |
366 | */ | |
367 | return pos - orig_pos + plen; | |
72b4daa2 CH |
368 | } |
369 | ||
370 | int | |
371 | iomap_readpage(struct page *page, const struct iomap_ops *ops) | |
372 | { | |
373 | struct iomap_readpage_ctx ctx = { .cur_page = page }; | |
374 | struct inode *inode = page->mapping->host; | |
375 | unsigned poff; | |
376 | loff_t ret; | |
377 | ||
72b4daa2 CH |
378 | for (poff = 0; poff < PAGE_SIZE; poff += ret) { |
379 | ret = iomap_apply(inode, page_offset(page) + poff, | |
380 | PAGE_SIZE - poff, 0, ops, &ctx, | |
381 | iomap_readpage_actor); | |
382 | if (ret <= 0) { | |
383 | WARN_ON_ONCE(ret == 0); | |
384 | SetPageError(page); | |
385 | break; | |
386 | } | |
387 | } | |
388 | ||
389 | if (ctx.bio) { | |
390 | submit_bio(ctx.bio); | |
391 | WARN_ON_ONCE(!ctx.cur_page_in_bio); | |
392 | } else { | |
393 | WARN_ON_ONCE(ctx.cur_page_in_bio); | |
394 | unlock_page(page); | |
395 | } | |
396 | ||
397 | /* | |
398 | * Just like mpage_readpages and block_read_full_page we always | |
399 | * return 0 and just mark the page as PageError on errors. This | |
400 | * should be cleaned up all through the stack eventually. | |
401 | */ | |
402 | return 0; | |
403 | } | |
404 | EXPORT_SYMBOL_GPL(iomap_readpage); | |
405 | ||
406 | static struct page * | |
407 | iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos, | |
408 | loff_t length, loff_t *done) | |
409 | { | |
410 | while (!list_empty(pages)) { | |
411 | struct page *page = lru_to_page(pages); | |
412 | ||
413 | if (page_offset(page) >= (u64)pos + length) | |
414 | break; | |
415 | ||
416 | list_del(&page->lru); | |
417 | if (!add_to_page_cache_lru(page, inode->i_mapping, page->index, | |
418 | GFP_NOFS)) | |
419 | return page; | |
420 | ||
421 | /* | |
422 | * If we already have a page in the page cache at index we are | |
423 | * done. Upper layers don't care if it is uptodate after the | |
424 | * readpages call itself as every page gets checked again once | |
425 | * actually needed. | |
426 | */ | |
427 | *done += PAGE_SIZE; | |
428 | put_page(page); | |
429 | } | |
430 | ||
431 | return NULL; | |
432 | } | |
433 | ||
434 | static loff_t | |
435 | iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length, | |
436 | void *data, struct iomap *iomap) | |
437 | { | |
438 | struct iomap_readpage_ctx *ctx = data; | |
439 | loff_t done, ret; | |
440 | ||
441 | for (done = 0; done < length; done += ret) { | |
10259de1 | 442 | if (ctx->cur_page && offset_in_page(pos + done) == 0) { |
72b4daa2 CH |
443 | if (!ctx->cur_page_in_bio) |
444 | unlock_page(ctx->cur_page); | |
445 | put_page(ctx->cur_page); | |
446 | ctx->cur_page = NULL; | |
447 | } | |
448 | if (!ctx->cur_page) { | |
449 | ctx->cur_page = iomap_next_page(inode, ctx->pages, | |
450 | pos, length, &done); | |
451 | if (!ctx->cur_page) | |
452 | break; | |
453 | ctx->cur_page_in_bio = false; | |
454 | } | |
455 | ret = iomap_readpage_actor(inode, pos + done, length - done, | |
456 | ctx, iomap); | |
457 | } | |
458 | ||
459 | return done; | |
460 | } | |
461 | ||
462 | int | |
463 | iomap_readpages(struct address_space *mapping, struct list_head *pages, | |
464 | unsigned nr_pages, const struct iomap_ops *ops) | |
465 | { | |
466 | struct iomap_readpage_ctx ctx = { | |
467 | .pages = pages, | |
468 | .is_readahead = true, | |
469 | }; | |
470 | loff_t pos = page_offset(list_entry(pages->prev, struct page, lru)); | |
471 | loff_t last = page_offset(list_entry(pages->next, struct page, lru)); | |
472 | loff_t length = last - pos + PAGE_SIZE, ret = 0; | |
473 | ||
474 | while (length > 0) { | |
475 | ret = iomap_apply(mapping->host, pos, length, 0, ops, | |
476 | &ctx, iomap_readpages_actor); | |
477 | if (ret <= 0) { | |
478 | WARN_ON_ONCE(ret == 0); | |
479 | goto done; | |
480 | } | |
481 | pos += ret; | |
482 | length -= ret; | |
483 | } | |
484 | ret = 0; | |
485 | done: | |
486 | if (ctx.bio) | |
487 | submit_bio(ctx.bio); | |
488 | if (ctx.cur_page) { | |
489 | if (!ctx.cur_page_in_bio) | |
490 | unlock_page(ctx.cur_page); | |
491 | put_page(ctx.cur_page); | |
492 | } | |
493 | ||
494 | /* | |
495 | * Check that we didn't lose a page due to the arcance calling | |
496 | * conventions.. | |
497 | */ | |
498 | WARN_ON_ONCE(!ret && !list_empty(ctx.pages)); | |
499 | return ret; | |
500 | } | |
501 | EXPORT_SYMBOL_GPL(iomap_readpages); | |
502 | ||
3cc31fa6 ES |
503 | /* |
504 | * iomap_is_partially_uptodate checks whether blocks within a page are | |
505 | * uptodate or not. | |
506 | * | |
507 | * Returns true if all blocks which correspond to a file portion | |
508 | * we want to read within the page are uptodate. | |
509 | */ | |
9dc55f13 CH |
510 | int |
511 | iomap_is_partially_uptodate(struct page *page, unsigned long from, | |
512 | unsigned long count) | |
513 | { | |
514 | struct iomap_page *iop = to_iomap_page(page); | |
515 | struct inode *inode = page->mapping->host; | |
3cc31fa6 | 516 | unsigned len, first, last; |
9dc55f13 CH |
517 | unsigned i; |
518 | ||
3cc31fa6 ES |
519 | /* Limit range to one page */ |
520 | len = min_t(unsigned, PAGE_SIZE - from, count); | |
521 | ||
522 | /* First and last blocks in range within page */ | |
523 | first = from >> inode->i_blkbits; | |
524 | last = (from + len - 1) >> inode->i_blkbits; | |
525 | ||
9dc55f13 CH |
526 | if (iop) { |
527 | for (i = first; i <= last; i++) | |
528 | if (!test_bit(i, iop->uptodate)) | |
529 | return 0; | |
530 | return 1; | |
531 | } | |
532 | ||
533 | return 0; | |
534 | } | |
535 | EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); | |
536 | ||
537 | int | |
538 | iomap_releasepage(struct page *page, gfp_t gfp_mask) | |
539 | { | |
540 | /* | |
541 | * mm accommodates an old ext3 case where clean pages might not have had | |
542 | * the dirty bit cleared. Thus, it can send actual dirty pages to | |
543 | * ->releasepage() via shrink_active_list(), skip those here. | |
544 | */ | |
545 | if (PageDirty(page) || PageWriteback(page)) | |
546 | return 0; | |
547 | iomap_page_release(page); | |
548 | return 1; | |
549 | } | |
550 | EXPORT_SYMBOL_GPL(iomap_releasepage); | |
551 | ||
552 | void | |
553 | iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len) | |
554 | { | |
555 | /* | |
556 | * If we are invalidating the entire page, clear the dirty state from it | |
557 | * and release it to avoid unnecessary buildup of the LRU. | |
558 | */ | |
559 | if (offset == 0 && len == PAGE_SIZE) { | |
560 | WARN_ON_ONCE(PageWriteback(page)); | |
561 | cancel_dirty_page(page); | |
562 | iomap_page_release(page); | |
563 | } | |
564 | } | |
565 | EXPORT_SYMBOL_GPL(iomap_invalidatepage); | |
566 | ||
567 | #ifdef CONFIG_MIGRATION | |
568 | int | |
569 | iomap_migrate_page(struct address_space *mapping, struct page *newpage, | |
570 | struct page *page, enum migrate_mode mode) | |
571 | { | |
572 | int ret; | |
573 | ||
ab41ee68 | 574 | ret = migrate_page_move_mapping(mapping, newpage, page, mode, 0); |
9dc55f13 CH |
575 | if (ret != MIGRATEPAGE_SUCCESS) |
576 | return ret; | |
577 | ||
578 | if (page_has_private(page)) { | |
579 | ClearPagePrivate(page); | |
8e47a457 | 580 | get_page(newpage); |
9dc55f13 CH |
581 | set_page_private(newpage, page_private(page)); |
582 | set_page_private(page, 0); | |
8e47a457 | 583 | put_page(page); |
9dc55f13 CH |
584 | SetPagePrivate(newpage); |
585 | } | |
586 | ||
587 | if (mode != MIGRATE_SYNC_NO_COPY) | |
588 | migrate_page_copy(newpage, page); | |
589 | else | |
590 | migrate_page_states(newpage, page); | |
591 | return MIGRATEPAGE_SUCCESS; | |
592 | } | |
593 | EXPORT_SYMBOL_GPL(iomap_migrate_page); | |
594 | #endif /* CONFIG_MIGRATION */ | |
595 | ||
ae259a9c CH |
596 | static void |
597 | iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) | |
598 | { | |
599 | loff_t i_size = i_size_read(inode); | |
600 | ||
601 | /* | |
602 | * Only truncate newly allocated pages beyoned EOF, even if the | |
603 | * write started inside the existing inode size. | |
604 | */ | |
605 | if (pos + len > i_size) | |
606 | truncate_pagecache_range(inode, max(pos, i_size), pos + len); | |
607 | } | |
608 | ||
c03cea42 CH |
609 | static int |
610 | iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page, | |
611 | unsigned poff, unsigned plen, unsigned from, unsigned to, | |
612 | struct iomap *iomap) | |
613 | { | |
614 | struct bio_vec bvec; | |
615 | struct bio bio; | |
616 | ||
617 | if (iomap->type != IOMAP_MAPPED || block_start >= i_size_read(inode)) { | |
618 | zero_user_segments(page, poff, from, to, poff + plen); | |
9dc55f13 | 619 | iomap_set_range_uptodate(page, poff, plen); |
c03cea42 CH |
620 | return 0; |
621 | } | |
622 | ||
623 | bio_init(&bio, &bvec, 1); | |
624 | bio.bi_opf = REQ_OP_READ; | |
625 | bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); | |
626 | bio_set_dev(&bio, iomap->bdev); | |
627 | __bio_add_page(&bio, page, plen, poff); | |
628 | return submit_bio_wait(&bio); | |
629 | } | |
630 | ||
631 | static int | |
632 | __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, | |
633 | struct page *page, struct iomap *iomap) | |
634 | { | |
9dc55f13 | 635 | struct iomap_page *iop = iomap_page_create(inode, page); |
c03cea42 CH |
636 | loff_t block_size = i_blocksize(inode); |
637 | loff_t block_start = pos & ~(block_size - 1); | |
638 | loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1); | |
10259de1 | 639 | unsigned from = offset_in_page(pos), to = from + len, poff, plen; |
9dc55f13 | 640 | int status = 0; |
c03cea42 CH |
641 | |
642 | if (PageUptodate(page)) | |
643 | return 0; | |
9dc55f13 CH |
644 | |
645 | do { | |
646 | iomap_adjust_read_range(inode, iop, &block_start, | |
647 | block_end - block_start, &poff, &plen); | |
648 | if (plen == 0) | |
649 | break; | |
650 | ||
651 | if ((from > poff && from < poff + plen) || | |
652 | (to > poff && to < poff + plen)) { | |
653 | status = iomap_read_page_sync(inode, block_start, page, | |
654 | poff, plen, from, to, iomap); | |
655 | if (status) | |
656 | break; | |
657 | } | |
658 | ||
659 | } while ((block_start += plen) < block_end); | |
660 | ||
661 | return status; | |
c03cea42 CH |
662 | } |
663 | ||
ae259a9c CH |
664 | static int |
665 | iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags, | |
666 | struct page **pagep, struct iomap *iomap) | |
667 | { | |
668 | pgoff_t index = pos >> PAGE_SHIFT; | |
669 | struct page *page; | |
670 | int status = 0; | |
671 | ||
672 | BUG_ON(pos + len > iomap->offset + iomap->length); | |
673 | ||
d1908f52 MH |
674 | if (fatal_signal_pending(current)) |
675 | return -EINTR; | |
676 | ||
ae259a9c CH |
677 | page = grab_cache_page_write_begin(inode->i_mapping, index, flags); |
678 | if (!page) | |
679 | return -ENOMEM; | |
680 | ||
19e0c58f AG |
681 | if (iomap->type == IOMAP_INLINE) |
682 | iomap_read_inline_data(inode, page, iomap); | |
c03cea42 | 683 | else if (iomap->flags & IOMAP_F_BUFFER_HEAD) |
19e0c58f | 684 | status = __block_write_begin_int(page, pos, len, NULL, iomap); |
c03cea42 CH |
685 | else |
686 | status = __iomap_write_begin(inode, pos, len, page, iomap); | |
ae259a9c CH |
687 | if (unlikely(status)) { |
688 | unlock_page(page); | |
689 | put_page(page); | |
690 | page = NULL; | |
691 | ||
692 | iomap_write_failed(inode, pos, len); | |
693 | } | |
694 | ||
695 | *pagep = page; | |
696 | return status; | |
697 | } | |
698 | ||
c03cea42 CH |
699 | int |
700 | iomap_set_page_dirty(struct page *page) | |
701 | { | |
702 | struct address_space *mapping = page_mapping(page); | |
703 | int newly_dirty; | |
704 | ||
705 | if (unlikely(!mapping)) | |
706 | return !TestSetPageDirty(page); | |
707 | ||
708 | /* | |
709 | * Lock out page->mem_cgroup migration to keep PageDirty | |
710 | * synchronized with per-memcg dirty page counters. | |
711 | */ | |
712 | lock_page_memcg(page); | |
713 | newly_dirty = !TestSetPageDirty(page); | |
714 | if (newly_dirty) | |
715 | __set_page_dirty(page, mapping, 0); | |
716 | unlock_page_memcg(page); | |
717 | ||
718 | if (newly_dirty) | |
719 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); | |
720 | return newly_dirty; | |
721 | } | |
722 | EXPORT_SYMBOL_GPL(iomap_set_page_dirty); | |
723 | ||
724 | static int | |
725 | __iomap_write_end(struct inode *inode, loff_t pos, unsigned len, | |
726 | unsigned copied, struct page *page, struct iomap *iomap) | |
727 | { | |
728 | flush_dcache_page(page); | |
729 | ||
730 | /* | |
731 | * The blocks that were entirely written will now be uptodate, so we | |
732 | * don't have to worry about a readpage reading them and overwriting a | |
733 | * partial write. However if we have encountered a short write and only | |
734 | * partially written into a block, it will not be marked uptodate, so a | |
735 | * readpage might come in and destroy our partial write. | |
736 | * | |
737 | * Do the simplest thing, and just treat any short write to a non | |
738 | * uptodate page as a zero-length write, and force the caller to redo | |
739 | * the whole thing. | |
740 | */ | |
741 | if (unlikely(copied < len && !PageUptodate(page))) { | |
742 | copied = 0; | |
743 | } else { | |
10259de1 | 744 | iomap_set_range_uptodate(page, offset_in_page(pos), len); |
c03cea42 CH |
745 | iomap_set_page_dirty(page); |
746 | } | |
747 | return __generic_write_end(inode, pos, copied, page); | |
748 | } | |
749 | ||
19e0c58f AG |
750 | static int |
751 | iomap_write_end_inline(struct inode *inode, struct page *page, | |
752 | struct iomap *iomap, loff_t pos, unsigned copied) | |
753 | { | |
754 | void *addr; | |
755 | ||
756 | WARN_ON_ONCE(!PageUptodate(page)); | |
757 | BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data)); | |
758 | ||
759 | addr = kmap_atomic(page); | |
760 | memcpy(iomap->inline_data + pos, addr + pos, copied); | |
761 | kunmap_atomic(addr); | |
762 | ||
763 | mark_inode_dirty(inode); | |
764 | __generic_write_end(inode, pos, copied, page); | |
765 | return copied; | |
766 | } | |
767 | ||
ae259a9c CH |
768 | static int |
769 | iomap_write_end(struct inode *inode, loff_t pos, unsigned len, | |
19e0c58f | 770 | unsigned copied, struct page *page, struct iomap *iomap) |
ae259a9c CH |
771 | { |
772 | int ret; | |
773 | ||
19e0c58f AG |
774 | if (iomap->type == IOMAP_INLINE) { |
775 | ret = iomap_write_end_inline(inode, page, iomap, pos, copied); | |
c03cea42 | 776 | } else if (iomap->flags & IOMAP_F_BUFFER_HEAD) { |
19e0c58f AG |
777 | ret = generic_write_end(NULL, inode->i_mapping, pos, len, |
778 | copied, page, NULL); | |
c03cea42 CH |
779 | } else { |
780 | ret = __iomap_write_end(inode, pos, len, copied, page, iomap); | |
19e0c58f AG |
781 | } |
782 | ||
63899c6f CH |
783 | if (iomap->page_done) |
784 | iomap->page_done(inode, pos, copied, page, iomap); | |
785 | ||
ae259a9c CH |
786 | if (ret < len) |
787 | iomap_write_failed(inode, pos, len); | |
788 | return ret; | |
789 | } | |
790 | ||
791 | static loff_t | |
792 | iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data, | |
793 | struct iomap *iomap) | |
794 | { | |
795 | struct iov_iter *i = data; | |
796 | long status = 0; | |
797 | ssize_t written = 0; | |
798 | unsigned int flags = AOP_FLAG_NOFS; | |
799 | ||
ae259a9c CH |
800 | do { |
801 | struct page *page; | |
802 | unsigned long offset; /* Offset into pagecache page */ | |
803 | unsigned long bytes; /* Bytes to write to page */ | |
804 | size_t copied; /* Bytes copied from user */ | |
805 | ||
10259de1 | 806 | offset = offset_in_page(pos); |
ae259a9c CH |
807 | bytes = min_t(unsigned long, PAGE_SIZE - offset, |
808 | iov_iter_count(i)); | |
809 | again: | |
810 | if (bytes > length) | |
811 | bytes = length; | |
812 | ||
813 | /* | |
814 | * Bring in the user page that we will copy from _first_. | |
815 | * Otherwise there's a nasty deadlock on copying from the | |
816 | * same page as we're writing to, without it being marked | |
817 | * up-to-date. | |
818 | * | |
819 | * Not only is this an optimisation, but it is also required | |
820 | * to check that the address is actually valid, when atomic | |
821 | * usercopies are used, below. | |
822 | */ | |
823 | if (unlikely(iov_iter_fault_in_readable(i, bytes))) { | |
824 | status = -EFAULT; | |
825 | break; | |
826 | } | |
827 | ||
828 | status = iomap_write_begin(inode, pos, bytes, flags, &page, | |
829 | iomap); | |
830 | if (unlikely(status)) | |
831 | break; | |
832 | ||
833 | if (mapping_writably_mapped(inode->i_mapping)) | |
834 | flush_dcache_page(page); | |
835 | ||
ae259a9c | 836 | copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); |
ae259a9c CH |
837 | |
838 | flush_dcache_page(page); | |
ae259a9c | 839 | |
19e0c58f AG |
840 | status = iomap_write_end(inode, pos, bytes, copied, page, |
841 | iomap); | |
ae259a9c CH |
842 | if (unlikely(status < 0)) |
843 | break; | |
844 | copied = status; | |
845 | ||
846 | cond_resched(); | |
847 | ||
848 | iov_iter_advance(i, copied); | |
849 | if (unlikely(copied == 0)) { | |
850 | /* | |
851 | * If we were unable to copy any data at all, we must | |
852 | * fall back to a single segment length write. | |
853 | * | |
854 | * If we didn't fallback here, we could livelock | |
855 | * because not all segments in the iov can be copied at | |
856 | * once without a pagefault. | |
857 | */ | |
858 | bytes = min_t(unsigned long, PAGE_SIZE - offset, | |
859 | iov_iter_single_seg_count(i)); | |
860 | goto again; | |
861 | } | |
862 | pos += copied; | |
863 | written += copied; | |
864 | length -= copied; | |
865 | ||
866 | balance_dirty_pages_ratelimited(inode->i_mapping); | |
867 | } while (iov_iter_count(i) && length); | |
868 | ||
869 | return written ? written : status; | |
870 | } | |
871 | ||
872 | ssize_t | |
873 | iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter, | |
8ff6daa1 | 874 | const struct iomap_ops *ops) |
ae259a9c CH |
875 | { |
876 | struct inode *inode = iocb->ki_filp->f_mapping->host; | |
877 | loff_t pos = iocb->ki_pos, ret = 0, written = 0; | |
878 | ||
879 | while (iov_iter_count(iter)) { | |
880 | ret = iomap_apply(inode, pos, iov_iter_count(iter), | |
881 | IOMAP_WRITE, ops, iter, iomap_write_actor); | |
882 | if (ret <= 0) | |
883 | break; | |
884 | pos += ret; | |
885 | written += ret; | |
886 | } | |
887 | ||
888 | return written ? written : ret; | |
889 | } | |
890 | EXPORT_SYMBOL_GPL(iomap_file_buffered_write); | |
891 | ||
5f4e5752 CH |
892 | static struct page * |
893 | __iomap_read_page(struct inode *inode, loff_t offset) | |
894 | { | |
895 | struct address_space *mapping = inode->i_mapping; | |
896 | struct page *page; | |
897 | ||
898 | page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL); | |
899 | if (IS_ERR(page)) | |
900 | return page; | |
901 | if (!PageUptodate(page)) { | |
902 | put_page(page); | |
903 | return ERR_PTR(-EIO); | |
904 | } | |
905 | return page; | |
906 | } | |
907 | ||
908 | static loff_t | |
909 | iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data, | |
910 | struct iomap *iomap) | |
911 | { | |
912 | long status = 0; | |
913 | ssize_t written = 0; | |
914 | ||
915 | do { | |
916 | struct page *page, *rpage; | |
917 | unsigned long offset; /* Offset into pagecache page */ | |
918 | unsigned long bytes; /* Bytes to write to page */ | |
919 | ||
10259de1 | 920 | offset = offset_in_page(pos); |
e28ae8e4 | 921 | bytes = min_t(loff_t, PAGE_SIZE - offset, length); |
5f4e5752 CH |
922 | |
923 | rpage = __iomap_read_page(inode, pos); | |
924 | if (IS_ERR(rpage)) | |
925 | return PTR_ERR(rpage); | |
926 | ||
927 | status = iomap_write_begin(inode, pos, bytes, | |
c718a975 | 928 | AOP_FLAG_NOFS, &page, iomap); |
5f4e5752 CH |
929 | put_page(rpage); |
930 | if (unlikely(status)) | |
931 | return status; | |
932 | ||
933 | WARN_ON_ONCE(!PageUptodate(page)); | |
934 | ||
19e0c58f | 935 | status = iomap_write_end(inode, pos, bytes, bytes, page, iomap); |
5f4e5752 CH |
936 | if (unlikely(status <= 0)) { |
937 | if (WARN_ON_ONCE(status == 0)) | |
938 | return -EIO; | |
939 | return status; | |
940 | } | |
941 | ||
942 | cond_resched(); | |
943 | ||
944 | pos += status; | |
945 | written += status; | |
946 | length -= status; | |
947 | ||
948 | balance_dirty_pages_ratelimited(inode->i_mapping); | |
949 | } while (length); | |
950 | ||
951 | return written; | |
952 | } | |
953 | ||
954 | int | |
955 | iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len, | |
8ff6daa1 | 956 | const struct iomap_ops *ops) |
5f4e5752 CH |
957 | { |
958 | loff_t ret; | |
959 | ||
960 | while (len) { | |
961 | ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL, | |
962 | iomap_dirty_actor); | |
963 | if (ret <= 0) | |
964 | return ret; | |
965 | pos += ret; | |
966 | len -= ret; | |
967 | } | |
968 | ||
969 | return 0; | |
970 | } | |
971 | EXPORT_SYMBOL_GPL(iomap_file_dirty); | |
972 | ||
ae259a9c CH |
973 | static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset, |
974 | unsigned bytes, struct iomap *iomap) | |
975 | { | |
976 | struct page *page; | |
977 | int status; | |
978 | ||
c718a975 TH |
979 | status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page, |
980 | iomap); | |
ae259a9c CH |
981 | if (status) |
982 | return status; | |
983 | ||
984 | zero_user(page, offset, bytes); | |
985 | mark_page_accessed(page); | |
986 | ||
19e0c58f | 987 | return iomap_write_end(inode, pos, bytes, bytes, page, iomap); |
ae259a9c CH |
988 | } |
989 | ||
9a286f0e CH |
990 | static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes, |
991 | struct iomap *iomap) | |
992 | { | |
57fc505d CH |
993 | return __dax_zero_page_range(iomap->bdev, iomap->dax_dev, |
994 | iomap_sector(iomap, pos & PAGE_MASK), offset, bytes); | |
9a286f0e CH |
995 | } |
996 | ||
ae259a9c CH |
997 | static loff_t |
998 | iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count, | |
999 | void *data, struct iomap *iomap) | |
1000 | { | |
1001 | bool *did_zero = data; | |
1002 | loff_t written = 0; | |
1003 | int status; | |
1004 | ||
1005 | /* already zeroed? we're done. */ | |
1006 | if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) | |
1007 | return count; | |
1008 | ||
1009 | do { | |
1010 | unsigned offset, bytes; | |
1011 | ||
10259de1 | 1012 | offset = offset_in_page(pos); |
e28ae8e4 | 1013 | bytes = min_t(loff_t, PAGE_SIZE - offset, count); |
ae259a9c | 1014 | |
9a286f0e CH |
1015 | if (IS_DAX(inode)) |
1016 | status = iomap_dax_zero(pos, offset, bytes, iomap); | |
1017 | else | |
1018 | status = iomap_zero(inode, pos, offset, bytes, iomap); | |
ae259a9c CH |
1019 | if (status < 0) |
1020 | return status; | |
1021 | ||
1022 | pos += bytes; | |
1023 | count -= bytes; | |
1024 | written += bytes; | |
1025 | if (did_zero) | |
1026 | *did_zero = true; | |
1027 | } while (count > 0); | |
1028 | ||
1029 | return written; | |
1030 | } | |
1031 | ||
1032 | int | |
1033 | iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, | |
8ff6daa1 | 1034 | const struct iomap_ops *ops) |
ae259a9c CH |
1035 | { |
1036 | loff_t ret; | |
1037 | ||
1038 | while (len > 0) { | |
1039 | ret = iomap_apply(inode, pos, len, IOMAP_ZERO, | |
1040 | ops, did_zero, iomap_zero_range_actor); | |
1041 | if (ret <= 0) | |
1042 | return ret; | |
1043 | ||
1044 | pos += ret; | |
1045 | len -= ret; | |
1046 | } | |
1047 | ||
1048 | return 0; | |
1049 | } | |
1050 | EXPORT_SYMBOL_GPL(iomap_zero_range); | |
1051 | ||
1052 | int | |
1053 | iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, | |
8ff6daa1 | 1054 | const struct iomap_ops *ops) |
ae259a9c | 1055 | { |
93407472 FF |
1056 | unsigned int blocksize = i_blocksize(inode); |
1057 | unsigned int off = pos & (blocksize - 1); | |
ae259a9c CH |
1058 | |
1059 | /* Block boundary? Nothing to do */ | |
1060 | if (!off) | |
1061 | return 0; | |
1062 | return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops); | |
1063 | } | |
1064 | EXPORT_SYMBOL_GPL(iomap_truncate_page); | |
1065 | ||
1066 | static loff_t | |
1067 | iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length, | |
1068 | void *data, struct iomap *iomap) | |
1069 | { | |
1070 | struct page *page = data; | |
1071 | int ret; | |
1072 | ||
c03cea42 CH |
1073 | if (iomap->flags & IOMAP_F_BUFFER_HEAD) { |
1074 | ret = __block_write_begin_int(page, pos, length, NULL, iomap); | |
1075 | if (ret) | |
1076 | return ret; | |
1077 | block_commit_write(page, 0, length); | |
1078 | } else { | |
1079 | WARN_ON_ONCE(!PageUptodate(page)); | |
9dc55f13 | 1080 | iomap_page_create(inode, page); |
561295a3 | 1081 | set_page_dirty(page); |
c03cea42 | 1082 | } |
ae259a9c | 1083 | |
ae259a9c CH |
1084 | return length; |
1085 | } | |
1086 | ||
5780a02f | 1087 | vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) |
ae259a9c CH |
1088 | { |
1089 | struct page *page = vmf->page; | |
11bac800 | 1090 | struct inode *inode = file_inode(vmf->vma->vm_file); |
ae259a9c CH |
1091 | unsigned long length; |
1092 | loff_t offset, size; | |
1093 | ssize_t ret; | |
1094 | ||
1095 | lock_page(page); | |
1096 | size = i_size_read(inode); | |
1097 | if ((page->mapping != inode->i_mapping) || | |
1098 | (page_offset(page) > size)) { | |
1099 | /* We overload EFAULT to mean page got truncated */ | |
1100 | ret = -EFAULT; | |
1101 | goto out_unlock; | |
1102 | } | |
1103 | ||
1104 | /* page is wholly or partially inside EOF */ | |
1105 | if (((page->index + 1) << PAGE_SHIFT) > size) | |
10259de1 | 1106 | length = offset_in_page(size); |
ae259a9c CH |
1107 | else |
1108 | length = PAGE_SIZE; | |
1109 | ||
1110 | offset = page_offset(page); | |
1111 | while (length > 0) { | |
9484ab1b JK |
1112 | ret = iomap_apply(inode, offset, length, |
1113 | IOMAP_WRITE | IOMAP_FAULT, ops, page, | |
1114 | iomap_page_mkwrite_actor); | |
ae259a9c CH |
1115 | if (unlikely(ret <= 0)) |
1116 | goto out_unlock; | |
1117 | offset += ret; | |
1118 | length -= ret; | |
1119 | } | |
1120 | ||
ae259a9c | 1121 | wait_for_stable_page(page); |
e7647fb4 | 1122 | return VM_FAULT_LOCKED; |
ae259a9c CH |
1123 | out_unlock: |
1124 | unlock_page(page); | |
e7647fb4 | 1125 | return block_page_mkwrite_return(ret); |
ae259a9c CH |
1126 | } |
1127 | EXPORT_SYMBOL_GPL(iomap_page_mkwrite); | |
8be9f564 CH |
1128 | |
1129 | struct fiemap_ctx { | |
1130 | struct fiemap_extent_info *fi; | |
1131 | struct iomap prev; | |
1132 | }; | |
1133 | ||
1134 | static int iomap_to_fiemap(struct fiemap_extent_info *fi, | |
1135 | struct iomap *iomap, u32 flags) | |
1136 | { | |
1137 | switch (iomap->type) { | |
1138 | case IOMAP_HOLE: | |
1139 | /* skip holes */ | |
1140 | return 0; | |
1141 | case IOMAP_DELALLOC: | |
1142 | flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN; | |
1143 | break; | |
19319b53 CH |
1144 | case IOMAP_MAPPED: |
1145 | break; | |
8be9f564 CH |
1146 | case IOMAP_UNWRITTEN: |
1147 | flags |= FIEMAP_EXTENT_UNWRITTEN; | |
1148 | break; | |
19319b53 CH |
1149 | case IOMAP_INLINE: |
1150 | flags |= FIEMAP_EXTENT_DATA_INLINE; | |
8be9f564 CH |
1151 | break; |
1152 | } | |
1153 | ||
17de0a9f CH |
1154 | if (iomap->flags & IOMAP_F_MERGED) |
1155 | flags |= FIEMAP_EXTENT_MERGED; | |
e43c460d DW |
1156 | if (iomap->flags & IOMAP_F_SHARED) |
1157 | flags |= FIEMAP_EXTENT_SHARED; | |
17de0a9f | 1158 | |
8be9f564 | 1159 | return fiemap_fill_next_extent(fi, iomap->offset, |
19fe5f64 | 1160 | iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0, |
17de0a9f | 1161 | iomap->length, flags); |
8be9f564 CH |
1162 | } |
1163 | ||
1164 | static loff_t | |
1165 | iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, | |
1166 | struct iomap *iomap) | |
1167 | { | |
1168 | struct fiemap_ctx *ctx = data; | |
1169 | loff_t ret = length; | |
1170 | ||
1171 | if (iomap->type == IOMAP_HOLE) | |
1172 | return length; | |
1173 | ||
1174 | ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0); | |
1175 | ctx->prev = *iomap; | |
1176 | switch (ret) { | |
1177 | case 0: /* success */ | |
1178 | return length; | |
1179 | case 1: /* extent array full */ | |
1180 | return 0; | |
1181 | default: | |
1182 | return ret; | |
1183 | } | |
1184 | } | |
1185 | ||
1186 | int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi, | |
8ff6daa1 | 1187 | loff_t start, loff_t len, const struct iomap_ops *ops) |
8be9f564 CH |
1188 | { |
1189 | struct fiemap_ctx ctx; | |
1190 | loff_t ret; | |
1191 | ||
1192 | memset(&ctx, 0, sizeof(ctx)); | |
1193 | ctx.fi = fi; | |
1194 | ctx.prev.type = IOMAP_HOLE; | |
1195 | ||
1196 | ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC); | |
1197 | if (ret) | |
1198 | return ret; | |
1199 | ||
8896b8f6 DC |
1200 | if (fi->fi_flags & FIEMAP_FLAG_SYNC) { |
1201 | ret = filemap_write_and_wait(inode->i_mapping); | |
1202 | if (ret) | |
1203 | return ret; | |
1204 | } | |
8be9f564 CH |
1205 | |
1206 | while (len > 0) { | |
d33fd776 | 1207 | ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx, |
8be9f564 | 1208 | iomap_fiemap_actor); |
ac2dc058 DC |
1209 | /* inode with no (attribute) mapping will give ENOENT */ |
1210 | if (ret == -ENOENT) | |
1211 | break; | |
8be9f564 CH |
1212 | if (ret < 0) |
1213 | return ret; | |
1214 | if (ret == 0) | |
1215 | break; | |
1216 | ||
1217 | start += ret; | |
1218 | len -= ret; | |
1219 | } | |
1220 | ||
1221 | if (ctx.prev.type != IOMAP_HOLE) { | |
1222 | ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST); | |
1223 | if (ret < 0) | |
1224 | return ret; | |
1225 | } | |
1226 | ||
1227 | return 0; | |
1228 | } | |
1229 | EXPORT_SYMBOL_GPL(iomap_fiemap); | |
ff6a9292 | 1230 | |
8a78cb1f CH |
1231 | /* |
1232 | * Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff. | |
afd9d6a1 | 1233 | * Returns true if found and updates @lastoff to the offset in file. |
8a78cb1f | 1234 | */ |
afd9d6a1 CH |
1235 | static bool |
1236 | page_seek_hole_data(struct inode *inode, struct page *page, loff_t *lastoff, | |
1237 | int whence) | |
8a78cb1f | 1238 | { |
afd9d6a1 CH |
1239 | const struct address_space_operations *ops = inode->i_mapping->a_ops; |
1240 | unsigned int bsize = i_blocksize(inode), off; | |
8a78cb1f | 1241 | bool seek_data = whence == SEEK_DATA; |
afd9d6a1 | 1242 | loff_t poff = page_offset(page); |
8a78cb1f | 1243 | |
afd9d6a1 CH |
1244 | if (WARN_ON_ONCE(*lastoff >= poff + PAGE_SIZE)) |
1245 | return false; | |
8a78cb1f | 1246 | |
afd9d6a1 | 1247 | if (*lastoff < poff) { |
8a78cb1f | 1248 | /* |
afd9d6a1 CH |
1249 | * Last offset smaller than the start of the page means we found |
1250 | * a hole: | |
8a78cb1f | 1251 | */ |
afd9d6a1 CH |
1252 | if (whence == SEEK_HOLE) |
1253 | return true; | |
1254 | *lastoff = poff; | |
1255 | } | |
8a78cb1f | 1256 | |
afd9d6a1 CH |
1257 | /* |
1258 | * Just check the page unless we can and should check block ranges: | |
1259 | */ | |
1260 | if (bsize == PAGE_SIZE || !ops->is_partially_uptodate) | |
1261 | return PageUptodate(page) == seek_data; | |
1262 | ||
1263 | lock_page(page); | |
1264 | if (unlikely(page->mapping != inode->i_mapping)) | |
1265 | goto out_unlock_not_found; | |
1266 | ||
1267 | for (off = 0; off < PAGE_SIZE; off += bsize) { | |
10259de1 | 1268 | if (offset_in_page(*lastoff) >= off + bsize) |
afd9d6a1 CH |
1269 | continue; |
1270 | if (ops->is_partially_uptodate(page, off, bsize) == seek_data) { | |
1271 | unlock_page(page); | |
1272 | return true; | |
1273 | } | |
1274 | *lastoff = poff + off + bsize; | |
1275 | } | |
1276 | ||
1277 | out_unlock_not_found: | |
1278 | unlock_page(page); | |
1279 | return false; | |
8a78cb1f CH |
1280 | } |
1281 | ||
1282 | /* | |
1283 | * Seek for SEEK_DATA / SEEK_HOLE in the page cache. | |
1284 | * | |
1285 | * Within unwritten extents, the page cache determines which parts are holes | |
bd56b3e1 CH |
1286 | * and which are data: uptodate buffer heads count as data; everything else |
1287 | * counts as a hole. | |
8a78cb1f CH |
1288 | * |
1289 | * Returns the resulting offset on successs, and -ENOENT otherwise. | |
1290 | */ | |
1291 | static loff_t | |
1292 | page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length, | |
1293 | int whence) | |
1294 | { | |
1295 | pgoff_t index = offset >> PAGE_SHIFT; | |
1296 | pgoff_t end = DIV_ROUND_UP(offset + length, PAGE_SIZE); | |
1297 | loff_t lastoff = offset; | |
1298 | struct pagevec pvec; | |
1299 | ||
1300 | if (length <= 0) | |
1301 | return -ENOENT; | |
1302 | ||
1303 | pagevec_init(&pvec); | |
1304 | ||
1305 | do { | |
1306 | unsigned nr_pages, i; | |
1307 | ||
1308 | nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index, | |
1309 | end - 1); | |
1310 | if (nr_pages == 0) | |
1311 | break; | |
1312 | ||
1313 | for (i = 0; i < nr_pages; i++) { | |
1314 | struct page *page = pvec.pages[i]; | |
1315 | ||
afd9d6a1 | 1316 | if (page_seek_hole_data(inode, page, &lastoff, whence)) |
8a78cb1f | 1317 | goto check_range; |
8a78cb1f CH |
1318 | lastoff = page_offset(page) + PAGE_SIZE; |
1319 | } | |
1320 | pagevec_release(&pvec); | |
1321 | } while (index < end); | |
1322 | ||
1323 | /* When no page at lastoff and we are not done, we found a hole. */ | |
1324 | if (whence != SEEK_HOLE) | |
1325 | goto not_found; | |
1326 | ||
1327 | check_range: | |
1328 | if (lastoff < offset + length) | |
1329 | goto out; | |
1330 | not_found: | |
1331 | lastoff = -ENOENT; | |
1332 | out: | |
1333 | pagevec_release(&pvec); | |
1334 | return lastoff; | |
1335 | } | |
1336 | ||
1337 | ||
0ed3b0d4 AG |
1338 | static loff_t |
1339 | iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length, | |
1340 | void *data, struct iomap *iomap) | |
1341 | { | |
1342 | switch (iomap->type) { | |
1343 | case IOMAP_UNWRITTEN: | |
1344 | offset = page_cache_seek_hole_data(inode, offset, length, | |
1345 | SEEK_HOLE); | |
1346 | if (offset < 0) | |
1347 | return length; | |
1348 | /* fall through */ | |
1349 | case IOMAP_HOLE: | |
1350 | *(loff_t *)data = offset; | |
1351 | return 0; | |
1352 | default: | |
1353 | return length; | |
1354 | } | |
1355 | } | |
1356 | ||
1357 | loff_t | |
1358 | iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops) | |
1359 | { | |
1360 | loff_t size = i_size_read(inode); | |
1361 | loff_t length = size - offset; | |
1362 | loff_t ret; | |
1363 | ||
d6ab17f2 DW |
1364 | /* Nothing to be found before or beyond the end of the file. */ |
1365 | if (offset < 0 || offset >= size) | |
0ed3b0d4 AG |
1366 | return -ENXIO; |
1367 | ||
1368 | while (length > 0) { | |
1369 | ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops, | |
1370 | &offset, iomap_seek_hole_actor); | |
1371 | if (ret < 0) | |
1372 | return ret; | |
1373 | if (ret == 0) | |
1374 | break; | |
1375 | ||
1376 | offset += ret; | |
1377 | length -= ret; | |
1378 | } | |
1379 | ||
1380 | return offset; | |
1381 | } | |
1382 | EXPORT_SYMBOL_GPL(iomap_seek_hole); | |
1383 | ||
1384 | static loff_t | |
1385 | iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length, | |
1386 | void *data, struct iomap *iomap) | |
1387 | { | |
1388 | switch (iomap->type) { | |
1389 | case IOMAP_HOLE: | |
1390 | return length; | |
1391 | case IOMAP_UNWRITTEN: | |
1392 | offset = page_cache_seek_hole_data(inode, offset, length, | |
1393 | SEEK_DATA); | |
1394 | if (offset < 0) | |
1395 | return length; | |
1396 | /*FALLTHRU*/ | |
1397 | default: | |
1398 | *(loff_t *)data = offset; | |
1399 | return 0; | |
1400 | } | |
1401 | } | |
1402 | ||
1403 | loff_t | |
1404 | iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops) | |
1405 | { | |
1406 | loff_t size = i_size_read(inode); | |
1407 | loff_t length = size - offset; | |
1408 | loff_t ret; | |
1409 | ||
d6ab17f2 DW |
1410 | /* Nothing to be found before or beyond the end of the file. */ |
1411 | if (offset < 0 || offset >= size) | |
0ed3b0d4 AG |
1412 | return -ENXIO; |
1413 | ||
1414 | while (length > 0) { | |
1415 | ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops, | |
1416 | &offset, iomap_seek_data_actor); | |
1417 | if (ret < 0) | |
1418 | return ret; | |
1419 | if (ret == 0) | |
1420 | break; | |
1421 | ||
1422 | offset += ret; | |
1423 | length -= ret; | |
1424 | } | |
1425 | ||
1426 | if (length <= 0) | |
1427 | return -ENXIO; | |
1428 | return offset; | |
1429 | } | |
1430 | EXPORT_SYMBOL_GPL(iomap_seek_data); | |
1431 | ||
ff6a9292 CH |
1432 | /* |
1433 | * Private flags for iomap_dio, must not overlap with the public ones in | |
1434 | * iomap.h: | |
1435 | */ | |
3460cac1 | 1436 | #define IOMAP_DIO_WRITE_FUA (1 << 28) |
4f8ff44b | 1437 | #define IOMAP_DIO_NEED_SYNC (1 << 29) |
ff6a9292 CH |
1438 | #define IOMAP_DIO_WRITE (1 << 30) |
1439 | #define IOMAP_DIO_DIRTY (1 << 31) | |
1440 | ||
1441 | struct iomap_dio { | |
1442 | struct kiocb *iocb; | |
1443 | iomap_dio_end_io_t *end_io; | |
1444 | loff_t i_size; | |
1445 | loff_t size; | |
1446 | atomic_t ref; | |
1447 | unsigned flags; | |
1448 | int error; | |
ebf00be3 | 1449 | bool wait_for_completion; |
ff6a9292 CH |
1450 | |
1451 | union { | |
1452 | /* used during submission and for synchronous completion: */ | |
1453 | struct { | |
1454 | struct iov_iter *iter; | |
1455 | struct task_struct *waiter; | |
1456 | struct request_queue *last_queue; | |
1457 | blk_qc_t cookie; | |
1458 | } submit; | |
1459 | ||
1460 | /* used for aio completion: */ | |
1461 | struct { | |
1462 | struct work_struct work; | |
1463 | } aio; | |
1464 | }; | |
1465 | }; | |
1466 | ||
81214bab CH |
1467 | int iomap_dio_iopoll(struct kiocb *kiocb, bool spin) |
1468 | { | |
1469 | struct request_queue *q = READ_ONCE(kiocb->private); | |
1470 | ||
1471 | if (!q) | |
1472 | return 0; | |
1473 | return blk_poll(q, READ_ONCE(kiocb->ki_cookie), spin); | |
1474 | } | |
1475 | EXPORT_SYMBOL_GPL(iomap_dio_iopoll); | |
1476 | ||
1477 | static void iomap_dio_submit_bio(struct iomap_dio *dio, struct iomap *iomap, | |
1478 | struct bio *bio) | |
1479 | { | |
1480 | atomic_inc(&dio->ref); | |
1481 | ||
1482 | if (dio->iocb->ki_flags & IOCB_HIPRI) | |
1483 | bio_set_polled(bio, dio->iocb); | |
1484 | ||
1485 | dio->submit.last_queue = bdev_get_queue(iomap->bdev); | |
1486 | dio->submit.cookie = submit_bio(bio); | |
1487 | } | |
1488 | ||
ff6a9292 CH |
1489 | static ssize_t iomap_dio_complete(struct iomap_dio *dio) |
1490 | { | |
1491 | struct kiocb *iocb = dio->iocb; | |
332391a9 | 1492 | struct inode *inode = file_inode(iocb->ki_filp); |
5e25c269 | 1493 | loff_t offset = iocb->ki_pos; |
ff6a9292 CH |
1494 | ssize_t ret; |
1495 | ||
1496 | if (dio->end_io) { | |
1497 | ret = dio->end_io(iocb, | |
1498 | dio->error ? dio->error : dio->size, | |
1499 | dio->flags); | |
1500 | } else { | |
1501 | ret = dio->error; | |
1502 | } | |
1503 | ||
1504 | if (likely(!ret)) { | |
1505 | ret = dio->size; | |
1506 | /* check for short read */ | |
5e25c269 | 1507 | if (offset + ret > dio->i_size && |
ff6a9292 | 1508 | !(dio->flags & IOMAP_DIO_WRITE)) |
5e25c269 | 1509 | ret = dio->i_size - offset; |
ff6a9292 CH |
1510 | iocb->ki_pos += ret; |
1511 | } | |
1512 | ||
5e25c269 EG |
1513 | /* |
1514 | * Try again to invalidate clean pages which might have been cached by | |
1515 | * non-direct readahead, or faulted in by get_user_pages() if the source | |
1516 | * of the write was an mmap'ed region of the file we're writing. Either | |
1517 | * one is a pretty crazy thing to do, so we don't support it 100%. If | |
1518 | * this invalidation fails, tough, the write still worked... | |
1519 | * | |
1520 | * And this page cache invalidation has to be after dio->end_io(), as | |
1521 | * some filesystems convert unwritten extents to real allocations in | |
1522 | * end_io() when necessary, otherwise a racing buffer read would cache | |
1523 | * zeros from unwritten extents. | |
1524 | */ | |
1525 | if (!dio->error && | |
1526 | (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) { | |
1527 | int err; | |
1528 | err = invalidate_inode_pages2_range(inode->i_mapping, | |
1529 | offset >> PAGE_SHIFT, | |
1530 | (offset + dio->size - 1) >> PAGE_SHIFT); | |
5a9d929d DW |
1531 | if (err) |
1532 | dio_warn_stale_pagecache(iocb->ki_filp); | |
5e25c269 EG |
1533 | } |
1534 | ||
4f8ff44b DC |
1535 | /* |
1536 | * If this is a DSYNC write, make sure we push it to stable storage now | |
1537 | * that we've written data. | |
1538 | */ | |
1539 | if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC)) | |
1540 | ret = generic_write_sync(iocb, ret); | |
1541 | ||
ff6a9292 CH |
1542 | inode_dio_end(file_inode(iocb->ki_filp)); |
1543 | kfree(dio); | |
1544 | ||
1545 | return ret; | |
1546 | } | |
1547 | ||
1548 | static void iomap_dio_complete_work(struct work_struct *work) | |
1549 | { | |
1550 | struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work); | |
1551 | struct kiocb *iocb = dio->iocb; | |
ff6a9292 | 1552 | |
4f8ff44b | 1553 | iocb->ki_complete(iocb, iomap_dio_complete(dio), 0); |
ff6a9292 CH |
1554 | } |
1555 | ||
1556 | /* | |
1557 | * Set an error in the dio if none is set yet. We have to use cmpxchg | |
1558 | * as the submission context and the completion context(s) can race to | |
1559 | * update the error. | |
1560 | */ | |
1561 | static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret) | |
1562 | { | |
1563 | cmpxchg(&dio->error, 0, ret); | |
1564 | } | |
1565 | ||
1566 | static void iomap_dio_bio_end_io(struct bio *bio) | |
1567 | { | |
1568 | struct iomap_dio *dio = bio->bi_private; | |
1569 | bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); | |
1570 | ||
4e4cbee9 CH |
1571 | if (bio->bi_status) |
1572 | iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status)); | |
ff6a9292 CH |
1573 | |
1574 | if (atomic_dec_and_test(&dio->ref)) { | |
ebf00be3 | 1575 | if (dio->wait_for_completion) { |
ff6a9292 | 1576 | struct task_struct *waiter = dio->submit.waiter; |
ff6a9292 | 1577 | WRITE_ONCE(dio->submit.waiter, NULL); |
0619317f | 1578 | blk_wake_io_task(waiter); |
ff6a9292 CH |
1579 | } else if (dio->flags & IOMAP_DIO_WRITE) { |
1580 | struct inode *inode = file_inode(dio->iocb->ki_filp); | |
1581 | ||
1582 | INIT_WORK(&dio->aio.work, iomap_dio_complete_work); | |
1583 | queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work); | |
1584 | } else { | |
1585 | iomap_dio_complete_work(&dio->aio.work); | |
1586 | } | |
1587 | } | |
1588 | ||
1589 | if (should_dirty) { | |
1590 | bio_check_pages_dirty(bio); | |
1591 | } else { | |
1592 | struct bio_vec *bvec; | |
1593 | int i; | |
6dc4f100 | 1594 | struct bvec_iter_all iter_all; |
ff6a9292 | 1595 | |
6dc4f100 | 1596 | bio_for_each_segment_all(bvec, bio, i, iter_all) |
ff6a9292 CH |
1597 | put_page(bvec->bv_page); |
1598 | bio_put(bio); | |
1599 | } | |
1600 | } | |
1601 | ||
81214bab | 1602 | static void |
ff6a9292 CH |
1603 | iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos, |
1604 | unsigned len) | |
1605 | { | |
1606 | struct page *page = ZERO_PAGE(0); | |
d1e36282 | 1607 | int flags = REQ_SYNC | REQ_IDLE; |
ff6a9292 CH |
1608 | struct bio *bio; |
1609 | ||
1610 | bio = bio_alloc(GFP_KERNEL, 1); | |
74d46992 | 1611 | bio_set_dev(bio, iomap->bdev); |
57fc505d | 1612 | bio->bi_iter.bi_sector = iomap_sector(iomap, pos); |
ff6a9292 CH |
1613 | bio->bi_private = dio; |
1614 | bio->bi_end_io = iomap_dio_bio_end_io; | |
1615 | ||
1616 | get_page(page); | |
6533b4e4 | 1617 | __bio_add_page(bio, page, len, 0); |
d1e36282 | 1618 | bio_set_op_attrs(bio, REQ_OP_WRITE, flags); |
81214bab | 1619 | iomap_dio_submit_bio(dio, iomap, bio); |
ff6a9292 CH |
1620 | } |
1621 | ||
1622 | static loff_t | |
09230435 CH |
1623 | iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length, |
1624 | struct iomap_dio *dio, struct iomap *iomap) | |
ff6a9292 | 1625 | { |
93407472 FF |
1626 | unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev)); |
1627 | unsigned int fs_block_size = i_blocksize(inode), pad; | |
1628 | unsigned int align = iov_iter_alignment(dio->submit.iter); | |
ff6a9292 CH |
1629 | struct iov_iter iter; |
1630 | struct bio *bio; | |
1631 | bool need_zeroout = false; | |
3460cac1 | 1632 | bool use_fua = false; |
4721a601 | 1633 | int nr_pages, ret = 0; |
cfe057f7 | 1634 | size_t copied = 0; |
ff6a9292 CH |
1635 | |
1636 | if ((pos | length | align) & ((1 << blkbits) - 1)) | |
1637 | return -EINVAL; | |
1638 | ||
09230435 | 1639 | if (iomap->type == IOMAP_UNWRITTEN) { |
ff6a9292 CH |
1640 | dio->flags |= IOMAP_DIO_UNWRITTEN; |
1641 | need_zeroout = true; | |
09230435 CH |
1642 | } |
1643 | ||
1644 | if (iomap->flags & IOMAP_F_SHARED) | |
1645 | dio->flags |= IOMAP_DIO_COW; | |
1646 | ||
1647 | if (iomap->flags & IOMAP_F_NEW) { | |
1648 | need_zeroout = true; | |
0929d858 | 1649 | } else if (iomap->type == IOMAP_MAPPED) { |
09230435 | 1650 | /* |
0929d858 DC |
1651 | * Use a FUA write if we need datasync semantics, this is a pure |
1652 | * data IO that doesn't require any metadata updates (including | |
1653 | * after IO completion such as unwritten extent conversion) and | |
1654 | * the underlying device supports FUA. This allows us to avoid | |
1655 | * cache flushes on IO completion. | |
09230435 CH |
1656 | */ |
1657 | if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) && | |
1658 | (dio->flags & IOMAP_DIO_WRITE_FUA) && | |
1659 | blk_queue_fua(bdev_get_queue(iomap->bdev))) | |
1660 | use_fua = true; | |
ff6a9292 CH |
1661 | } |
1662 | ||
1663 | /* | |
1664 | * Operate on a partial iter trimmed to the extent we were called for. | |
1665 | * We'll update the iter in the dio once we're done with this extent. | |
1666 | */ | |
1667 | iter = *dio->submit.iter; | |
1668 | iov_iter_truncate(&iter, length); | |
1669 | ||
1670 | nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES); | |
1671 | if (nr_pages <= 0) | |
1672 | return nr_pages; | |
1673 | ||
1674 | if (need_zeroout) { | |
1675 | /* zero out from the start of the block to the write offset */ | |
1676 | pad = pos & (fs_block_size - 1); | |
1677 | if (pad) | |
1678 | iomap_dio_zero(dio, iomap, pos - pad, pad); | |
1679 | } | |
1680 | ||
1681 | do { | |
cfe057f7 AV |
1682 | size_t n; |
1683 | if (dio->error) { | |
1684 | iov_iter_revert(dio->submit.iter, copied); | |
ff6a9292 | 1685 | return 0; |
cfe057f7 | 1686 | } |
ff6a9292 CH |
1687 | |
1688 | bio = bio_alloc(GFP_KERNEL, nr_pages); | |
74d46992 | 1689 | bio_set_dev(bio, iomap->bdev); |
57fc505d | 1690 | bio->bi_iter.bi_sector = iomap_sector(iomap, pos); |
45d06cf7 | 1691 | bio->bi_write_hint = dio->iocb->ki_hint; |
087e5669 | 1692 | bio->bi_ioprio = dio->iocb->ki_ioprio; |
ff6a9292 CH |
1693 | bio->bi_private = dio; |
1694 | bio->bi_end_io = iomap_dio_bio_end_io; | |
1695 | ||
1696 | ret = bio_iov_iter_get_pages(bio, &iter); | |
1697 | if (unlikely(ret)) { | |
4721a601 DC |
1698 | /* |
1699 | * We have to stop part way through an IO. We must fall | |
1700 | * through to the sub-block tail zeroing here, otherwise | |
1701 | * this short IO may expose stale data in the tail of | |
1702 | * the block we haven't written data to. | |
1703 | */ | |
ff6a9292 | 1704 | bio_put(bio); |
4721a601 | 1705 | goto zero_tail; |
ff6a9292 CH |
1706 | } |
1707 | ||
cfe057f7 | 1708 | n = bio->bi_iter.bi_size; |
ff6a9292 | 1709 | if (dio->flags & IOMAP_DIO_WRITE) { |
3460cac1 DC |
1710 | bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; |
1711 | if (use_fua) | |
1712 | bio->bi_opf |= REQ_FUA; | |
1713 | else | |
1714 | dio->flags &= ~IOMAP_DIO_WRITE_FUA; | |
cfe057f7 | 1715 | task_io_account_write(n); |
ff6a9292 | 1716 | } else { |
3460cac1 | 1717 | bio->bi_opf = REQ_OP_READ; |
ff6a9292 CH |
1718 | if (dio->flags & IOMAP_DIO_DIRTY) |
1719 | bio_set_pages_dirty(bio); | |
1720 | } | |
1721 | ||
cfe057f7 AV |
1722 | iov_iter_advance(dio->submit.iter, n); |
1723 | ||
1724 | dio->size += n; | |
1725 | pos += n; | |
1726 | copied += n; | |
ff6a9292 CH |
1727 | |
1728 | nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES); | |
81214bab | 1729 | iomap_dio_submit_bio(dio, iomap, bio); |
ff6a9292 CH |
1730 | } while (nr_pages); |
1731 | ||
b450672f DC |
1732 | /* |
1733 | * We need to zeroout the tail of a sub-block write if the extent type | |
1734 | * requires zeroing or the write extends beyond EOF. If we don't zero | |
1735 | * the block tail in the latter case, we can expose stale data via mmap | |
1736 | * reads of the EOF block. | |
1737 | */ | |
4721a601 | 1738 | zero_tail: |
b450672f DC |
1739 | if (need_zeroout || |
1740 | ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) { | |
ff6a9292 CH |
1741 | /* zero out from the end of the write to the end of the block */ |
1742 | pad = pos & (fs_block_size - 1); | |
1743 | if (pad) | |
1744 | iomap_dio_zero(dio, iomap, pos, fs_block_size - pad); | |
1745 | } | |
4721a601 | 1746 | return copied ? copied : ret; |
ff6a9292 CH |
1747 | } |
1748 | ||
09230435 CH |
1749 | static loff_t |
1750 | iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio) | |
1751 | { | |
1752 | length = iov_iter_zero(length, dio->submit.iter); | |
1753 | dio->size += length; | |
1754 | return length; | |
1755 | } | |
1756 | ||
ec181f67 AG |
1757 | static loff_t |
1758 | iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length, | |
1759 | struct iomap_dio *dio, struct iomap *iomap) | |
1760 | { | |
1761 | struct iov_iter *iter = dio->submit.iter; | |
1762 | size_t copied; | |
1763 | ||
1764 | BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data)); | |
1765 | ||
1766 | if (dio->flags & IOMAP_DIO_WRITE) { | |
1767 | loff_t size = inode->i_size; | |
1768 | ||
1769 | if (pos > size) | |
1770 | memset(iomap->inline_data + size, 0, pos - size); | |
1771 | copied = copy_from_iter(iomap->inline_data + pos, length, iter); | |
1772 | if (copied) { | |
1773 | if (pos + copied > size) | |
1774 | i_size_write(inode, pos + copied); | |
1775 | mark_inode_dirty(inode); | |
1776 | } | |
1777 | } else { | |
1778 | copied = copy_to_iter(iomap->inline_data + pos, length, iter); | |
1779 | } | |
1780 | dio->size += copied; | |
1781 | return copied; | |
1782 | } | |
1783 | ||
09230435 CH |
1784 | static loff_t |
1785 | iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length, | |
1786 | void *data, struct iomap *iomap) | |
1787 | { | |
1788 | struct iomap_dio *dio = data; | |
1789 | ||
1790 | switch (iomap->type) { | |
1791 | case IOMAP_HOLE: | |
1792 | if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE)) | |
1793 | return -EIO; | |
1794 | return iomap_dio_hole_actor(length, dio); | |
1795 | case IOMAP_UNWRITTEN: | |
1796 | if (!(dio->flags & IOMAP_DIO_WRITE)) | |
1797 | return iomap_dio_hole_actor(length, dio); | |
1798 | return iomap_dio_bio_actor(inode, pos, length, dio, iomap); | |
1799 | case IOMAP_MAPPED: | |
1800 | return iomap_dio_bio_actor(inode, pos, length, dio, iomap); | |
ec181f67 AG |
1801 | case IOMAP_INLINE: |
1802 | return iomap_dio_inline_actor(inode, pos, length, dio, iomap); | |
09230435 CH |
1803 | default: |
1804 | WARN_ON_ONCE(1); | |
1805 | return -EIO; | |
1806 | } | |
1807 | } | |
1808 | ||
4f8ff44b DC |
1809 | /* |
1810 | * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO | |
3460cac1 DC |
1811 | * is being issued as AIO or not. This allows us to optimise pure data writes |
1812 | * to use REQ_FUA rather than requiring generic_write_sync() to issue a | |
1813 | * REQ_FLUSH post write. This is slightly tricky because a single request here | |
1814 | * can be mapped into multiple disjoint IOs and only a subset of the IOs issued | |
1815 | * may be pure data writes. In that case, we still need to do a full data sync | |
1816 | * completion. | |
4f8ff44b | 1817 | */ |
ff6a9292 | 1818 | ssize_t |
8ff6daa1 CH |
1819 | iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, |
1820 | const struct iomap_ops *ops, iomap_dio_end_io_t end_io) | |
ff6a9292 CH |
1821 | { |
1822 | struct address_space *mapping = iocb->ki_filp->f_mapping; | |
1823 | struct inode *inode = file_inode(iocb->ki_filp); | |
1824 | size_t count = iov_iter_count(iter); | |
c771c14b EG |
1825 | loff_t pos = iocb->ki_pos, start = pos; |
1826 | loff_t end = iocb->ki_pos + count - 1, ret = 0; | |
ff6a9292 | 1827 | unsigned int flags = IOMAP_DIRECT; |
4ea899ea | 1828 | bool wait_for_completion = is_sync_kiocb(iocb); |
ff6a9292 CH |
1829 | struct blk_plug plug; |
1830 | struct iomap_dio *dio; | |
1831 | ||
1832 | lockdep_assert_held(&inode->i_rwsem); | |
1833 | ||
1834 | if (!count) | |
1835 | return 0; | |
1836 | ||
1837 | dio = kmalloc(sizeof(*dio), GFP_KERNEL); | |
1838 | if (!dio) | |
1839 | return -ENOMEM; | |
1840 | ||
1841 | dio->iocb = iocb; | |
1842 | atomic_set(&dio->ref, 1); | |
1843 | dio->size = 0; | |
1844 | dio->i_size = i_size_read(inode); | |
1845 | dio->end_io = end_io; | |
1846 | dio->error = 0; | |
1847 | dio->flags = 0; | |
1848 | ||
1849 | dio->submit.iter = iter; | |
ebf00be3 AG |
1850 | dio->submit.waiter = current; |
1851 | dio->submit.cookie = BLK_QC_T_NONE; | |
1852 | dio->submit.last_queue = NULL; | |
ff6a9292 CH |
1853 | |
1854 | if (iov_iter_rw(iter) == READ) { | |
1855 | if (pos >= dio->i_size) | |
1856 | goto out_free_dio; | |
1857 | ||
00e23707 | 1858 | if (iter_is_iovec(iter) && iov_iter_rw(iter) == READ) |
ff6a9292 CH |
1859 | dio->flags |= IOMAP_DIO_DIRTY; |
1860 | } else { | |
3460cac1 | 1861 | flags |= IOMAP_WRITE; |
ff6a9292 | 1862 | dio->flags |= IOMAP_DIO_WRITE; |
3460cac1 DC |
1863 | |
1864 | /* for data sync or sync, we need sync completion processing */ | |
4f8ff44b DC |
1865 | if (iocb->ki_flags & IOCB_DSYNC) |
1866 | dio->flags |= IOMAP_DIO_NEED_SYNC; | |
3460cac1 DC |
1867 | |
1868 | /* | |
1869 | * For datasync only writes, we optimistically try using FUA for | |
1870 | * this IO. Any non-FUA write that occurs will clear this flag, | |
1871 | * hence we know before completion whether a cache flush is | |
1872 | * necessary. | |
1873 | */ | |
1874 | if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC) | |
1875 | dio->flags |= IOMAP_DIO_WRITE_FUA; | |
ff6a9292 CH |
1876 | } |
1877 | ||
a38d1243 GR |
1878 | if (iocb->ki_flags & IOCB_NOWAIT) { |
1879 | if (filemap_range_has_page(mapping, start, end)) { | |
1880 | ret = -EAGAIN; | |
1881 | goto out_free_dio; | |
1882 | } | |
1883 | flags |= IOMAP_NOWAIT; | |
1884 | } | |
1885 | ||
55635ba7 AR |
1886 | ret = filemap_write_and_wait_range(mapping, start, end); |
1887 | if (ret) | |
1888 | goto out_free_dio; | |
ff6a9292 | 1889 | |
5a9d929d DW |
1890 | /* |
1891 | * Try to invalidate cache pages for the range we're direct | |
1892 | * writing. If this invalidation fails, tough, the write will | |
1893 | * still work, but racing two incompatible write paths is a | |
1894 | * pretty crazy thing to do, so we don't support it 100%. | |
1895 | */ | |
55635ba7 AR |
1896 | ret = invalidate_inode_pages2_range(mapping, |
1897 | start >> PAGE_SHIFT, end >> PAGE_SHIFT); | |
5a9d929d DW |
1898 | if (ret) |
1899 | dio_warn_stale_pagecache(iocb->ki_filp); | |
55635ba7 | 1900 | ret = 0; |
ff6a9292 | 1901 | |
4ea899ea | 1902 | if (iov_iter_rw(iter) == WRITE && !wait_for_completion && |
546e7be8 CR |
1903 | !inode->i_sb->s_dio_done_wq) { |
1904 | ret = sb_init_dio_done_wq(inode->i_sb); | |
1905 | if (ret < 0) | |
1906 | goto out_free_dio; | |
1907 | } | |
1908 | ||
ff6a9292 CH |
1909 | inode_dio_begin(inode); |
1910 | ||
1911 | blk_start_plug(&plug); | |
1912 | do { | |
1913 | ret = iomap_apply(inode, pos, count, flags, ops, dio, | |
1914 | iomap_dio_actor); | |
1915 | if (ret <= 0) { | |
1916 | /* magic error code to fall back to buffered I/O */ | |
ebf00be3 | 1917 | if (ret == -ENOTBLK) { |
4ea899ea | 1918 | wait_for_completion = true; |
ff6a9292 | 1919 | ret = 0; |
ebf00be3 | 1920 | } |
ff6a9292 CH |
1921 | break; |
1922 | } | |
1923 | pos += ret; | |
a008c31c CR |
1924 | |
1925 | if (iov_iter_rw(iter) == READ && pos >= dio->i_size) | |
1926 | break; | |
ff6a9292 CH |
1927 | } while ((count = iov_iter_count(iter)) > 0); |
1928 | blk_finish_plug(&plug); | |
1929 | ||
1930 | if (ret < 0) | |
1931 | iomap_dio_set_error(dio, ret); | |
1932 | ||
3460cac1 DC |
1933 | /* |
1934 | * If all the writes we issued were FUA, we don't need to flush the | |
1935 | * cache on IO completion. Clear the sync flag for this case. | |
1936 | */ | |
1937 | if (dio->flags & IOMAP_DIO_WRITE_FUA) | |
1938 | dio->flags &= ~IOMAP_DIO_NEED_SYNC; | |
1939 | ||
81214bab CH |
1940 | WRITE_ONCE(iocb->ki_cookie, dio->submit.cookie); |
1941 | WRITE_ONCE(iocb->private, dio->submit.last_queue); | |
1942 | ||
4ea899ea CH |
1943 | /* |
1944 | * We are about to drop our additional submission reference, which | |
1945 | * might be the last reference to the dio. There are three three | |
1946 | * different ways we can progress here: | |
1947 | * | |
1948 | * (a) If this is the last reference we will always complete and free | |
1949 | * the dio ourselves. | |
1950 | * (b) If this is not the last reference, and we serve an asynchronous | |
1951 | * iocb, we must never touch the dio after the decrement, the | |
1952 | * I/O completion handler will complete and free it. | |
1953 | * (c) If this is not the last reference, but we serve a synchronous | |
1954 | * iocb, the I/O completion handler will wake us up on the drop | |
1955 | * of the final reference, and we will complete and free it here | |
1956 | * after we got woken by the I/O completion handler. | |
1957 | */ | |
1958 | dio->wait_for_completion = wait_for_completion; | |
ff6a9292 | 1959 | if (!atomic_dec_and_test(&dio->ref)) { |
4ea899ea | 1960 | if (!wait_for_completion) |
ff6a9292 CH |
1961 | return -EIOCBQUEUED; |
1962 | ||
1963 | for (;;) { | |
1ac5cd49 | 1964 | set_current_state(TASK_UNINTERRUPTIBLE); |
ff6a9292 CH |
1965 | if (!READ_ONCE(dio->submit.waiter)) |
1966 | break; | |
1967 | ||
1968 | if (!(iocb->ki_flags & IOCB_HIPRI) || | |
1969 | !dio->submit.last_queue || | |
ea435e1b | 1970 | !blk_poll(dio->submit.last_queue, |
0a1b8b87 | 1971 | dio->submit.cookie, true)) |
ff6a9292 CH |
1972 | io_schedule(); |
1973 | } | |
1974 | __set_current_state(TASK_RUNNING); | |
1975 | } | |
1976 | ||
4ea899ea | 1977 | return iomap_dio_complete(dio); |
ff6a9292 CH |
1978 | |
1979 | out_free_dio: | |
1980 | kfree(dio); | |
1981 | return ret; | |
1982 | } | |
1983 | EXPORT_SYMBOL_GPL(iomap_dio_rw); | |
67482129 DW |
1984 | |
1985 | /* Swapfile activation */ | |
1986 | ||
1987 | #ifdef CONFIG_SWAP | |
1988 | struct iomap_swapfile_info { | |
1989 | struct iomap iomap; /* accumulated iomap */ | |
1990 | struct swap_info_struct *sis; | |
1991 | uint64_t lowest_ppage; /* lowest physical addr seen (pages) */ | |
1992 | uint64_t highest_ppage; /* highest physical addr seen (pages) */ | |
1993 | unsigned long nr_pages; /* number of pages collected */ | |
1994 | int nr_extents; /* extent count */ | |
1995 | }; | |
1996 | ||
1997 | /* | |
1998 | * Collect physical extents for this swap file. Physical extents reported to | |
1999 | * the swap code must be trimmed to align to a page boundary. The logical | |
2000 | * offset within the file is irrelevant since the swapfile code maps logical | |
2001 | * page numbers of the swap device to the physical page-aligned extents. | |
2002 | */ | |
2003 | static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi) | |
2004 | { | |
2005 | struct iomap *iomap = &isi->iomap; | |
2006 | unsigned long nr_pages; | |
2007 | uint64_t first_ppage; | |
2008 | uint64_t first_ppage_reported; | |
2009 | uint64_t next_ppage; | |
2010 | int error; | |
2011 | ||
2012 | /* | |
2013 | * Round the start up and the end down so that the physical | |
2014 | * extent aligns to a page boundary. | |
2015 | */ | |
2016 | first_ppage = ALIGN(iomap->addr, PAGE_SIZE) >> PAGE_SHIFT; | |
2017 | next_ppage = ALIGN_DOWN(iomap->addr + iomap->length, PAGE_SIZE) >> | |
2018 | PAGE_SHIFT; | |
2019 | ||
2020 | /* Skip too-short physical extents. */ | |
2021 | if (first_ppage >= next_ppage) | |
2022 | return 0; | |
2023 | nr_pages = next_ppage - first_ppage; | |
2024 | ||
2025 | /* | |
2026 | * Calculate how much swap space we're adding; the first page contains | |
2027 | * the swap header and doesn't count. The mm still wants that first | |
2028 | * page fed to add_swap_extent, however. | |
2029 | */ | |
2030 | first_ppage_reported = first_ppage; | |
2031 | if (iomap->offset == 0) | |
2032 | first_ppage_reported++; | |
2033 | if (isi->lowest_ppage > first_ppage_reported) | |
2034 | isi->lowest_ppage = first_ppage_reported; | |
2035 | if (isi->highest_ppage < (next_ppage - 1)) | |
2036 | isi->highest_ppage = next_ppage - 1; | |
2037 | ||
2038 | /* Add extent, set up for the next call. */ | |
2039 | error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage); | |
2040 | if (error < 0) | |
2041 | return error; | |
2042 | isi->nr_extents += error; | |
2043 | isi->nr_pages += nr_pages; | |
2044 | return 0; | |
2045 | } | |
2046 | ||
2047 | /* | |
2048 | * Accumulate iomaps for this swap file. We have to accumulate iomaps because | |
2049 | * swap only cares about contiguous page-aligned physical extents and makes no | |
2050 | * distinction between written and unwritten extents. | |
2051 | */ | |
2052 | static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos, | |
2053 | loff_t count, void *data, struct iomap *iomap) | |
2054 | { | |
2055 | struct iomap_swapfile_info *isi = data; | |
2056 | int error; | |
2057 | ||
19319b53 CH |
2058 | switch (iomap->type) { |
2059 | case IOMAP_MAPPED: | |
2060 | case IOMAP_UNWRITTEN: | |
2061 | /* Only real or unwritten extents. */ | |
2062 | break; | |
2063 | case IOMAP_INLINE: | |
2064 | /* No inline data. */ | |
ec601924 OS |
2065 | pr_err("swapon: file is inline\n"); |
2066 | return -EINVAL; | |
19319b53 | 2067 | default: |
ec601924 OS |
2068 | pr_err("swapon: file has unallocated extents\n"); |
2069 | return -EINVAL; | |
2070 | } | |
67482129 | 2071 | |
ec601924 OS |
2072 | /* No uncommitted metadata or shared blocks. */ |
2073 | if (iomap->flags & IOMAP_F_DIRTY) { | |
2074 | pr_err("swapon: file is not committed\n"); | |
2075 | return -EINVAL; | |
2076 | } | |
2077 | if (iomap->flags & IOMAP_F_SHARED) { | |
2078 | pr_err("swapon: file has shared extents\n"); | |
2079 | return -EINVAL; | |
2080 | } | |
67482129 | 2081 | |
ec601924 OS |
2082 | /* Only one bdev per swap file. */ |
2083 | if (iomap->bdev != isi->sis->bdev) { | |
2084 | pr_err("swapon: file is on multiple devices\n"); | |
2085 | return -EINVAL; | |
2086 | } | |
67482129 DW |
2087 | |
2088 | if (isi->iomap.length == 0) { | |
2089 | /* No accumulated extent, so just store it. */ | |
2090 | memcpy(&isi->iomap, iomap, sizeof(isi->iomap)); | |
2091 | } else if (isi->iomap.addr + isi->iomap.length == iomap->addr) { | |
2092 | /* Append this to the accumulated extent. */ | |
2093 | isi->iomap.length += iomap->length; | |
2094 | } else { | |
2095 | /* Otherwise, add the retained iomap and store this one. */ | |
2096 | error = iomap_swapfile_add_extent(isi); | |
2097 | if (error) | |
2098 | return error; | |
2099 | memcpy(&isi->iomap, iomap, sizeof(isi->iomap)); | |
2100 | } | |
67482129 | 2101 | return count; |
67482129 DW |
2102 | } |
2103 | ||
2104 | /* | |
2105 | * Iterate a swap file's iomaps to construct physical extents that can be | |
2106 | * passed to the swapfile subsystem. | |
2107 | */ | |
2108 | int iomap_swapfile_activate(struct swap_info_struct *sis, | |
2109 | struct file *swap_file, sector_t *pagespan, | |
2110 | const struct iomap_ops *ops) | |
2111 | { | |
2112 | struct iomap_swapfile_info isi = { | |
2113 | .sis = sis, | |
2114 | .lowest_ppage = (sector_t)-1ULL, | |
2115 | }; | |
2116 | struct address_space *mapping = swap_file->f_mapping; | |
2117 | struct inode *inode = mapping->host; | |
2118 | loff_t pos = 0; | |
2119 | loff_t len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE); | |
2120 | loff_t ret; | |
2121 | ||
117a148f DW |
2122 | /* |
2123 | * Persist all file mapping metadata so that we won't have any | |
2124 | * IOMAP_F_DIRTY iomaps. | |
2125 | */ | |
2126 | ret = vfs_fsync(swap_file, 1); | |
67482129 DW |
2127 | if (ret) |
2128 | return ret; | |
2129 | ||
2130 | while (len > 0) { | |
2131 | ret = iomap_apply(inode, pos, len, IOMAP_REPORT, | |
2132 | ops, &isi, iomap_swapfile_activate_actor); | |
2133 | if (ret <= 0) | |
2134 | return ret; | |
2135 | ||
2136 | pos += ret; | |
2137 | len -= ret; | |
2138 | } | |
2139 | ||
2140 | if (isi.iomap.length) { | |
2141 | ret = iomap_swapfile_add_extent(&isi); | |
2142 | if (ret) | |
2143 | return ret; | |
2144 | } | |
2145 | ||
2146 | *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage; | |
2147 | sis->max = isi.nr_pages; | |
2148 | sis->pages = isi.nr_pages - 1; | |
2149 | sis->highest_bit = isi.nr_pages - 1; | |
2150 | return isi.nr_extents; | |
2151 | } | |
2152 | EXPORT_SYMBOL_GPL(iomap_swapfile_activate); | |
2153 | #endif /* CONFIG_SWAP */ | |
89eb1906 CH |
2154 | |
2155 | static loff_t | |
2156 | iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length, | |
2157 | void *data, struct iomap *iomap) | |
2158 | { | |
2159 | sector_t *bno = data, addr; | |
2160 | ||
2161 | if (iomap->type == IOMAP_MAPPED) { | |
2162 | addr = (pos - iomap->offset + iomap->addr) >> inode->i_blkbits; | |
2163 | if (addr > INT_MAX) | |
2164 | WARN(1, "would truncate bmap result\n"); | |
2165 | else | |
2166 | *bno = addr; | |
2167 | } | |
2168 | return 0; | |
2169 | } | |
2170 | ||
2171 | /* legacy ->bmap interface. 0 is the error return (!) */ | |
2172 | sector_t | |
2173 | iomap_bmap(struct address_space *mapping, sector_t bno, | |
2174 | const struct iomap_ops *ops) | |
2175 | { | |
2176 | struct inode *inode = mapping->host; | |
79b3dbe4 | 2177 | loff_t pos = bno << inode->i_blkbits; |
89eb1906 CH |
2178 | unsigned blocksize = i_blocksize(inode); |
2179 | ||
2180 | if (filemap_write_and_wait(mapping)) | |
2181 | return 0; | |
2182 | ||
2183 | bno = 0; | |
2184 | iomap_apply(inode, pos, blocksize, 0, ops, &bno, iomap_bmap_actor); | |
2185 | return bno; | |
2186 | } | |
2187 | EXPORT_SYMBOL_GPL(iomap_bmap); |