]> Git Repo - linux.git/blame - fs/iomap.c
iomap: add an iomap-based readpage and readpages implementation
[linux.git] / fs / iomap.c
CommitLineData
ae259a9c
CH
1/*
2 * Copyright (C) 2010 Red Hat, Inc.
72b4daa2 3 * Copyright (c) 2016-2018 Christoph Hellwig.
ae259a9c
CH
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#include <linux/module.h>
15#include <linux/compiler.h>
16#include <linux/fs.h>
17#include <linux/iomap.h>
18#include <linux/uaccess.h>
19#include <linux/gfp.h>
20#include <linux/mm.h>
72b4daa2 21#include <linux/mm_inline.h>
ae259a9c
CH
22#include <linux/swap.h>
23#include <linux/pagemap.h>
8a78cb1f 24#include <linux/pagevec.h>
ae259a9c
CH
25#include <linux/file.h>
26#include <linux/uio.h>
27#include <linux/backing-dev.h>
28#include <linux/buffer_head.h>
ff6a9292 29#include <linux/task_io_accounting_ops.h>
9a286f0e 30#include <linux/dax.h>
f361bf4a 31#include <linux/sched/signal.h>
67482129 32#include <linux/swap.h>
f361bf4a 33
ae259a9c
CH
34#include "internal.h"
35
ae259a9c
CH
36/*
37 * Execute a iomap write on a segment of the mapping that spans a
38 * contiguous range of pages that have identical block mapping state.
39 *
40 * This avoids the need to map pages individually, do individual allocations
41 * for each page and most importantly avoid the need for filesystem specific
42 * locking per page. Instead, all the operations are amortised over the entire
43 * range of pages. It is assumed that the filesystems will lock whatever
44 * resources they require in the iomap_begin call, and release them in the
45 * iomap_end call.
46 */
befb503c 47loff_t
ae259a9c 48iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
8ff6daa1 49 const struct iomap_ops *ops, void *data, iomap_actor_t actor)
ae259a9c
CH
50{
51 struct iomap iomap = { 0 };
52 loff_t written = 0, ret;
53
54 /*
55 * Need to map a range from start position for length bytes. This can
56 * span multiple pages - it is only guaranteed to return a range of a
57 * single type of pages (e.g. all into a hole, all mapped or all
58 * unwritten). Failure at this point has nothing to undo.
59 *
60 * If allocation is required for this range, reserve the space now so
61 * that the allocation is guaranteed to succeed later on. Once we copy
62 * the data into the page cache pages, then we cannot fail otherwise we
63 * expose transient stale data. If the reserve fails, we can safely
64 * back out at this point as there is nothing to undo.
65 */
66 ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
67 if (ret)
68 return ret;
69 if (WARN_ON(iomap.offset > pos))
70 return -EIO;
0c6dda7a
DW
71 if (WARN_ON(iomap.length == 0))
72 return -EIO;
ae259a9c
CH
73
74 /*
75 * Cut down the length to the one actually provided by the filesystem,
76 * as it might not be able to give us the whole size that we requested.
77 */
78 if (iomap.offset + iomap.length < pos + length)
79 length = iomap.offset + iomap.length - pos;
80
81 /*
82 * Now that we have guaranteed that the space allocation will succeed.
83 * we can do the copy-in page by page without having to worry about
84 * failures exposing transient data.
85 */
86 written = actor(inode, pos, length, data, &iomap);
87
88 /*
89 * Now the data has been copied, commit the range we've copied. This
90 * should not fail unless the filesystem has had a fatal error.
91 */
f20ac7ab
CH
92 if (ops->iomap_end) {
93 ret = ops->iomap_end(inode, pos, length,
94 written > 0 ? written : 0,
95 flags, &iomap);
96 }
ae259a9c
CH
97
98 return written ? written : ret;
99}
100
57fc505d
CH
101static sector_t
102iomap_sector(struct iomap *iomap, loff_t pos)
103{
104 return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
105}
106
19e0c58f
AG
107static void
108iomap_read_inline_data(struct inode *inode, struct page *page,
109 struct iomap *iomap)
110{
111 size_t size = i_size_read(inode);
112 void *addr;
113
114 if (PageUptodate(page))
115 return;
116
117 BUG_ON(page->index);
118 BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
119
120 addr = kmap_atomic(page);
121 memcpy(addr, iomap->inline_data, size);
122 memset(addr + size, 0, PAGE_SIZE - size);
123 kunmap_atomic(addr);
124 SetPageUptodate(page);
125}
126
ae259a9c 127static void
72b4daa2
CH
128iomap_read_end_io(struct bio *bio)
129{
130 int error = blk_status_to_errno(bio->bi_status);
131 struct bio_vec *bvec;
132 int i;
133
134 bio_for_each_segment_all(bvec, bio, i)
135 page_endio(bvec->bv_page, false, error);
136 bio_put(bio);
137}
138
139struct iomap_readpage_ctx {
140 struct page *cur_page;
141 bool cur_page_in_bio;
142 bool is_readahead;
143 struct bio *bio;
144 struct list_head *pages;
145};
146
147static loff_t
148iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
149 struct iomap *iomap)
150{
151 struct iomap_readpage_ctx *ctx = data;
152 struct page *page = ctx->cur_page;
153 unsigned poff = pos & (PAGE_SIZE - 1);
154 unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
155 bool is_contig = false;
156 sector_t sector;
157
158 /* we don't support blocksize < PAGE_SIZE quite yet. */
159 WARN_ON_ONCE(pos != page_offset(page));
160 WARN_ON_ONCE(plen != PAGE_SIZE);
161
162 if (iomap->type != IOMAP_MAPPED || pos >= i_size_read(inode)) {
163 zero_user(page, poff, plen);
164 SetPageUptodate(page);
165 goto done;
166 }
167
168 ctx->cur_page_in_bio = true;
169
170 /*
171 * Try to merge into a previous segment if we can.
172 */
173 sector = iomap_sector(iomap, pos);
174 if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
175 if (__bio_try_merge_page(ctx->bio, page, plen, poff))
176 goto done;
177 is_contig = true;
178 }
179
180 if (!ctx->bio || !is_contig || bio_full(ctx->bio)) {
181 gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
182 int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
183
184 if (ctx->bio)
185 submit_bio(ctx->bio);
186
187 if (ctx->is_readahead) /* same as readahead_gfp_mask */
188 gfp |= __GFP_NORETRY | __GFP_NOWARN;
189 ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
190 ctx->bio->bi_opf = REQ_OP_READ;
191 if (ctx->is_readahead)
192 ctx->bio->bi_opf |= REQ_RAHEAD;
193 ctx->bio->bi_iter.bi_sector = sector;
194 bio_set_dev(ctx->bio, iomap->bdev);
195 ctx->bio->bi_end_io = iomap_read_end_io;
196 }
197
198 __bio_add_page(ctx->bio, page, plen, poff);
199done:
200 return plen;
201}
202
203int
204iomap_readpage(struct page *page, const struct iomap_ops *ops)
205{
206 struct iomap_readpage_ctx ctx = { .cur_page = page };
207 struct inode *inode = page->mapping->host;
208 unsigned poff;
209 loff_t ret;
210
211 WARN_ON_ONCE(page_has_buffers(page));
212
213 for (poff = 0; poff < PAGE_SIZE; poff += ret) {
214 ret = iomap_apply(inode, page_offset(page) + poff,
215 PAGE_SIZE - poff, 0, ops, &ctx,
216 iomap_readpage_actor);
217 if (ret <= 0) {
218 WARN_ON_ONCE(ret == 0);
219 SetPageError(page);
220 break;
221 }
222 }
223
224 if (ctx.bio) {
225 submit_bio(ctx.bio);
226 WARN_ON_ONCE(!ctx.cur_page_in_bio);
227 } else {
228 WARN_ON_ONCE(ctx.cur_page_in_bio);
229 unlock_page(page);
230 }
231
232 /*
233 * Just like mpage_readpages and block_read_full_page we always
234 * return 0 and just mark the page as PageError on errors. This
235 * should be cleaned up all through the stack eventually.
236 */
237 return 0;
238}
239EXPORT_SYMBOL_GPL(iomap_readpage);
240
241static struct page *
242iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos,
243 loff_t length, loff_t *done)
244{
245 while (!list_empty(pages)) {
246 struct page *page = lru_to_page(pages);
247
248 if (page_offset(page) >= (u64)pos + length)
249 break;
250
251 list_del(&page->lru);
252 if (!add_to_page_cache_lru(page, inode->i_mapping, page->index,
253 GFP_NOFS))
254 return page;
255
256 /*
257 * If we already have a page in the page cache at index we are
258 * done. Upper layers don't care if it is uptodate after the
259 * readpages call itself as every page gets checked again once
260 * actually needed.
261 */
262 *done += PAGE_SIZE;
263 put_page(page);
264 }
265
266 return NULL;
267}
268
269static loff_t
270iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
271 void *data, struct iomap *iomap)
272{
273 struct iomap_readpage_ctx *ctx = data;
274 loff_t done, ret;
275
276 for (done = 0; done < length; done += ret) {
277 if (ctx->cur_page && ((pos + done) & (PAGE_SIZE - 1)) == 0) {
278 if (!ctx->cur_page_in_bio)
279 unlock_page(ctx->cur_page);
280 put_page(ctx->cur_page);
281 ctx->cur_page = NULL;
282 }
283 if (!ctx->cur_page) {
284 ctx->cur_page = iomap_next_page(inode, ctx->pages,
285 pos, length, &done);
286 if (!ctx->cur_page)
287 break;
288 ctx->cur_page_in_bio = false;
289 }
290 ret = iomap_readpage_actor(inode, pos + done, length - done,
291 ctx, iomap);
292 }
293
294 return done;
295}
296
297int
298iomap_readpages(struct address_space *mapping, struct list_head *pages,
299 unsigned nr_pages, const struct iomap_ops *ops)
300{
301 struct iomap_readpage_ctx ctx = {
302 .pages = pages,
303 .is_readahead = true,
304 };
305 loff_t pos = page_offset(list_entry(pages->prev, struct page, lru));
306 loff_t last = page_offset(list_entry(pages->next, struct page, lru));
307 loff_t length = last - pos + PAGE_SIZE, ret = 0;
308
309 while (length > 0) {
310 ret = iomap_apply(mapping->host, pos, length, 0, ops,
311 &ctx, iomap_readpages_actor);
312 if (ret <= 0) {
313 WARN_ON_ONCE(ret == 0);
314 goto done;
315 }
316 pos += ret;
317 length -= ret;
318 }
319 ret = 0;
320done:
321 if (ctx.bio)
322 submit_bio(ctx.bio);
323 if (ctx.cur_page) {
324 if (!ctx.cur_page_in_bio)
325 unlock_page(ctx.cur_page);
326 put_page(ctx.cur_page);
327 }
328
329 /*
330 * Check that we didn't lose a page due to the arcance calling
331 * conventions..
332 */
333 WARN_ON_ONCE(!ret && !list_empty(ctx.pages));
334 return ret;
335}
336EXPORT_SYMBOL_GPL(iomap_readpages);
337
338static void
ae259a9c
CH
339iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
340{
341 loff_t i_size = i_size_read(inode);
342
343 /*
344 * Only truncate newly allocated pages beyoned EOF, even if the
345 * write started inside the existing inode size.
346 */
347 if (pos + len > i_size)
348 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
349}
350
351static int
352iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
353 struct page **pagep, struct iomap *iomap)
354{
355 pgoff_t index = pos >> PAGE_SHIFT;
356 struct page *page;
357 int status = 0;
358
359 BUG_ON(pos + len > iomap->offset + iomap->length);
360
d1908f52
MH
361 if (fatal_signal_pending(current))
362 return -EINTR;
363
ae259a9c
CH
364 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
365 if (!page)
366 return -ENOMEM;
367
19e0c58f
AG
368 if (iomap->type == IOMAP_INLINE)
369 iomap_read_inline_data(inode, page, iomap);
370 else
371 status = __block_write_begin_int(page, pos, len, NULL, iomap);
372
ae259a9c
CH
373 if (unlikely(status)) {
374 unlock_page(page);
375 put_page(page);
376 page = NULL;
377
378 iomap_write_failed(inode, pos, len);
379 }
380
381 *pagep = page;
382 return status;
383}
384
19e0c58f
AG
385static int
386iomap_write_end_inline(struct inode *inode, struct page *page,
387 struct iomap *iomap, loff_t pos, unsigned copied)
388{
389 void *addr;
390
391 WARN_ON_ONCE(!PageUptodate(page));
392 BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
393
394 addr = kmap_atomic(page);
395 memcpy(iomap->inline_data + pos, addr + pos, copied);
396 kunmap_atomic(addr);
397
398 mark_inode_dirty(inode);
399 __generic_write_end(inode, pos, copied, page);
400 return copied;
401}
402
ae259a9c
CH
403static int
404iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
19e0c58f 405 unsigned copied, struct page *page, struct iomap *iomap)
ae259a9c
CH
406{
407 int ret;
408
19e0c58f
AG
409 if (iomap->type == IOMAP_INLINE) {
410 ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
411 } else {
412 ret = generic_write_end(NULL, inode->i_mapping, pos, len,
413 copied, page, NULL);
414 }
415
63899c6f
CH
416 if (iomap->page_done)
417 iomap->page_done(inode, pos, copied, page, iomap);
418
ae259a9c
CH
419 if (ret < len)
420 iomap_write_failed(inode, pos, len);
421 return ret;
422}
423
424static loff_t
425iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
426 struct iomap *iomap)
427{
428 struct iov_iter *i = data;
429 long status = 0;
430 ssize_t written = 0;
431 unsigned int flags = AOP_FLAG_NOFS;
432
ae259a9c
CH
433 do {
434 struct page *page;
435 unsigned long offset; /* Offset into pagecache page */
436 unsigned long bytes; /* Bytes to write to page */
437 size_t copied; /* Bytes copied from user */
438
439 offset = (pos & (PAGE_SIZE - 1));
440 bytes = min_t(unsigned long, PAGE_SIZE - offset,
441 iov_iter_count(i));
442again:
443 if (bytes > length)
444 bytes = length;
445
446 /*
447 * Bring in the user page that we will copy from _first_.
448 * Otherwise there's a nasty deadlock on copying from the
449 * same page as we're writing to, without it being marked
450 * up-to-date.
451 *
452 * Not only is this an optimisation, but it is also required
453 * to check that the address is actually valid, when atomic
454 * usercopies are used, below.
455 */
456 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
457 status = -EFAULT;
458 break;
459 }
460
461 status = iomap_write_begin(inode, pos, bytes, flags, &page,
462 iomap);
463 if (unlikely(status))
464 break;
465
466 if (mapping_writably_mapped(inode->i_mapping))
467 flush_dcache_page(page);
468
ae259a9c 469 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
ae259a9c
CH
470
471 flush_dcache_page(page);
ae259a9c 472
19e0c58f
AG
473 status = iomap_write_end(inode, pos, bytes, copied, page,
474 iomap);
ae259a9c
CH
475 if (unlikely(status < 0))
476 break;
477 copied = status;
478
479 cond_resched();
480
481 iov_iter_advance(i, copied);
482 if (unlikely(copied == 0)) {
483 /*
484 * If we were unable to copy any data at all, we must
485 * fall back to a single segment length write.
486 *
487 * If we didn't fallback here, we could livelock
488 * because not all segments in the iov can be copied at
489 * once without a pagefault.
490 */
491 bytes = min_t(unsigned long, PAGE_SIZE - offset,
492 iov_iter_single_seg_count(i));
493 goto again;
494 }
495 pos += copied;
496 written += copied;
497 length -= copied;
498
499 balance_dirty_pages_ratelimited(inode->i_mapping);
500 } while (iov_iter_count(i) && length);
501
502 return written ? written : status;
503}
504
505ssize_t
506iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
8ff6daa1 507 const struct iomap_ops *ops)
ae259a9c
CH
508{
509 struct inode *inode = iocb->ki_filp->f_mapping->host;
510 loff_t pos = iocb->ki_pos, ret = 0, written = 0;
511
512 while (iov_iter_count(iter)) {
513 ret = iomap_apply(inode, pos, iov_iter_count(iter),
514 IOMAP_WRITE, ops, iter, iomap_write_actor);
515 if (ret <= 0)
516 break;
517 pos += ret;
518 written += ret;
519 }
520
521 return written ? written : ret;
522}
523EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
524
5f4e5752
CH
525static struct page *
526__iomap_read_page(struct inode *inode, loff_t offset)
527{
528 struct address_space *mapping = inode->i_mapping;
529 struct page *page;
530
531 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
532 if (IS_ERR(page))
533 return page;
534 if (!PageUptodate(page)) {
535 put_page(page);
536 return ERR_PTR(-EIO);
537 }
538 return page;
539}
540
541static loff_t
542iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
543 struct iomap *iomap)
544{
545 long status = 0;
546 ssize_t written = 0;
547
548 do {
549 struct page *page, *rpage;
550 unsigned long offset; /* Offset into pagecache page */
551 unsigned long bytes; /* Bytes to write to page */
552
553 offset = (pos & (PAGE_SIZE - 1));
e28ae8e4 554 bytes = min_t(loff_t, PAGE_SIZE - offset, length);
5f4e5752
CH
555
556 rpage = __iomap_read_page(inode, pos);
557 if (IS_ERR(rpage))
558 return PTR_ERR(rpage);
559
560 status = iomap_write_begin(inode, pos, bytes,
c718a975 561 AOP_FLAG_NOFS, &page, iomap);
5f4e5752
CH
562 put_page(rpage);
563 if (unlikely(status))
564 return status;
565
566 WARN_ON_ONCE(!PageUptodate(page));
567
19e0c58f 568 status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
5f4e5752
CH
569 if (unlikely(status <= 0)) {
570 if (WARN_ON_ONCE(status == 0))
571 return -EIO;
572 return status;
573 }
574
575 cond_resched();
576
577 pos += status;
578 written += status;
579 length -= status;
580
581 balance_dirty_pages_ratelimited(inode->i_mapping);
582 } while (length);
583
584 return written;
585}
586
587int
588iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
8ff6daa1 589 const struct iomap_ops *ops)
5f4e5752
CH
590{
591 loff_t ret;
592
593 while (len) {
594 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
595 iomap_dirty_actor);
596 if (ret <= 0)
597 return ret;
598 pos += ret;
599 len -= ret;
600 }
601
602 return 0;
603}
604EXPORT_SYMBOL_GPL(iomap_file_dirty);
605
ae259a9c
CH
606static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
607 unsigned bytes, struct iomap *iomap)
608{
609 struct page *page;
610 int status;
611
c718a975
TH
612 status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
613 iomap);
ae259a9c
CH
614 if (status)
615 return status;
616
617 zero_user(page, offset, bytes);
618 mark_page_accessed(page);
619
19e0c58f 620 return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
ae259a9c
CH
621}
622
9a286f0e
CH
623static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
624 struct iomap *iomap)
625{
57fc505d
CH
626 return __dax_zero_page_range(iomap->bdev, iomap->dax_dev,
627 iomap_sector(iomap, pos & PAGE_MASK), offset, bytes);
9a286f0e
CH
628}
629
ae259a9c
CH
630static loff_t
631iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
632 void *data, struct iomap *iomap)
633{
634 bool *did_zero = data;
635 loff_t written = 0;
636 int status;
637
638 /* already zeroed? we're done. */
639 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
640 return count;
641
642 do {
643 unsigned offset, bytes;
644
645 offset = pos & (PAGE_SIZE - 1); /* Within page */
e28ae8e4 646 bytes = min_t(loff_t, PAGE_SIZE - offset, count);
ae259a9c 647
9a286f0e
CH
648 if (IS_DAX(inode))
649 status = iomap_dax_zero(pos, offset, bytes, iomap);
650 else
651 status = iomap_zero(inode, pos, offset, bytes, iomap);
ae259a9c
CH
652 if (status < 0)
653 return status;
654
655 pos += bytes;
656 count -= bytes;
657 written += bytes;
658 if (did_zero)
659 *did_zero = true;
660 } while (count > 0);
661
662 return written;
663}
664
665int
666iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
8ff6daa1 667 const struct iomap_ops *ops)
ae259a9c
CH
668{
669 loff_t ret;
670
671 while (len > 0) {
672 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
673 ops, did_zero, iomap_zero_range_actor);
674 if (ret <= 0)
675 return ret;
676
677 pos += ret;
678 len -= ret;
679 }
680
681 return 0;
682}
683EXPORT_SYMBOL_GPL(iomap_zero_range);
684
685int
686iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
8ff6daa1 687 const struct iomap_ops *ops)
ae259a9c 688{
93407472
FF
689 unsigned int blocksize = i_blocksize(inode);
690 unsigned int off = pos & (blocksize - 1);
ae259a9c
CH
691
692 /* Block boundary? Nothing to do */
693 if (!off)
694 return 0;
695 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
696}
697EXPORT_SYMBOL_GPL(iomap_truncate_page);
698
699static loff_t
700iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
701 void *data, struct iomap *iomap)
702{
703 struct page *page = data;
704 int ret;
705
c663e29f 706 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
ae259a9c
CH
707 if (ret)
708 return ret;
709
710 block_commit_write(page, 0, length);
711 return length;
712}
713
11bac800 714int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
ae259a9c
CH
715{
716 struct page *page = vmf->page;
11bac800 717 struct inode *inode = file_inode(vmf->vma->vm_file);
ae259a9c
CH
718 unsigned long length;
719 loff_t offset, size;
720 ssize_t ret;
721
722 lock_page(page);
723 size = i_size_read(inode);
724 if ((page->mapping != inode->i_mapping) ||
725 (page_offset(page) > size)) {
726 /* We overload EFAULT to mean page got truncated */
727 ret = -EFAULT;
728 goto out_unlock;
729 }
730
731 /* page is wholly or partially inside EOF */
732 if (((page->index + 1) << PAGE_SHIFT) > size)
733 length = size & ~PAGE_MASK;
734 else
735 length = PAGE_SIZE;
736
737 offset = page_offset(page);
738 while (length > 0) {
9484ab1b
JK
739 ret = iomap_apply(inode, offset, length,
740 IOMAP_WRITE | IOMAP_FAULT, ops, page,
741 iomap_page_mkwrite_actor);
ae259a9c
CH
742 if (unlikely(ret <= 0))
743 goto out_unlock;
744 offset += ret;
745 length -= ret;
746 }
747
748 set_page_dirty(page);
749 wait_for_stable_page(page);
e7647fb4 750 return VM_FAULT_LOCKED;
ae259a9c
CH
751out_unlock:
752 unlock_page(page);
e7647fb4 753 return block_page_mkwrite_return(ret);
ae259a9c
CH
754}
755EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
8be9f564
CH
756
757struct fiemap_ctx {
758 struct fiemap_extent_info *fi;
759 struct iomap prev;
760};
761
762static int iomap_to_fiemap(struct fiemap_extent_info *fi,
763 struct iomap *iomap, u32 flags)
764{
765 switch (iomap->type) {
766 case IOMAP_HOLE:
767 /* skip holes */
768 return 0;
769 case IOMAP_DELALLOC:
770 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
771 break;
19319b53
CH
772 case IOMAP_MAPPED:
773 break;
8be9f564
CH
774 case IOMAP_UNWRITTEN:
775 flags |= FIEMAP_EXTENT_UNWRITTEN;
776 break;
19319b53
CH
777 case IOMAP_INLINE:
778 flags |= FIEMAP_EXTENT_DATA_INLINE;
8be9f564
CH
779 break;
780 }
781
17de0a9f
CH
782 if (iomap->flags & IOMAP_F_MERGED)
783 flags |= FIEMAP_EXTENT_MERGED;
e43c460d
DW
784 if (iomap->flags & IOMAP_F_SHARED)
785 flags |= FIEMAP_EXTENT_SHARED;
17de0a9f 786
8be9f564 787 return fiemap_fill_next_extent(fi, iomap->offset,
19fe5f64 788 iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0,
17de0a9f 789 iomap->length, flags);
8be9f564
CH
790}
791
792static loff_t
793iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
794 struct iomap *iomap)
795{
796 struct fiemap_ctx *ctx = data;
797 loff_t ret = length;
798
799 if (iomap->type == IOMAP_HOLE)
800 return length;
801
802 ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
803 ctx->prev = *iomap;
804 switch (ret) {
805 case 0: /* success */
806 return length;
807 case 1: /* extent array full */
808 return 0;
809 default:
810 return ret;
811 }
812}
813
814int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
8ff6daa1 815 loff_t start, loff_t len, const struct iomap_ops *ops)
8be9f564
CH
816{
817 struct fiemap_ctx ctx;
818 loff_t ret;
819
820 memset(&ctx, 0, sizeof(ctx));
821 ctx.fi = fi;
822 ctx.prev.type = IOMAP_HOLE;
823
824 ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
825 if (ret)
826 return ret;
827
8896b8f6
DC
828 if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
829 ret = filemap_write_and_wait(inode->i_mapping);
830 if (ret)
831 return ret;
832 }
8be9f564
CH
833
834 while (len > 0) {
d33fd776 835 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
8be9f564 836 iomap_fiemap_actor);
ac2dc058
DC
837 /* inode with no (attribute) mapping will give ENOENT */
838 if (ret == -ENOENT)
839 break;
8be9f564
CH
840 if (ret < 0)
841 return ret;
842 if (ret == 0)
843 break;
844
845 start += ret;
846 len -= ret;
847 }
848
849 if (ctx.prev.type != IOMAP_HOLE) {
850 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
851 if (ret < 0)
852 return ret;
853 }
854
855 return 0;
856}
857EXPORT_SYMBOL_GPL(iomap_fiemap);
ff6a9292 858
8a78cb1f
CH
859/*
860 * Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff.
afd9d6a1 861 * Returns true if found and updates @lastoff to the offset in file.
8a78cb1f 862 */
afd9d6a1
CH
863static bool
864page_seek_hole_data(struct inode *inode, struct page *page, loff_t *lastoff,
865 int whence)
8a78cb1f 866{
afd9d6a1
CH
867 const struct address_space_operations *ops = inode->i_mapping->a_ops;
868 unsigned int bsize = i_blocksize(inode), off;
8a78cb1f 869 bool seek_data = whence == SEEK_DATA;
afd9d6a1 870 loff_t poff = page_offset(page);
8a78cb1f 871
afd9d6a1
CH
872 if (WARN_ON_ONCE(*lastoff >= poff + PAGE_SIZE))
873 return false;
8a78cb1f 874
afd9d6a1 875 if (*lastoff < poff) {
8a78cb1f 876 /*
afd9d6a1
CH
877 * Last offset smaller than the start of the page means we found
878 * a hole:
8a78cb1f 879 */
afd9d6a1
CH
880 if (whence == SEEK_HOLE)
881 return true;
882 *lastoff = poff;
883 }
8a78cb1f 884
afd9d6a1
CH
885 /*
886 * Just check the page unless we can and should check block ranges:
887 */
888 if (bsize == PAGE_SIZE || !ops->is_partially_uptodate)
889 return PageUptodate(page) == seek_data;
890
891 lock_page(page);
892 if (unlikely(page->mapping != inode->i_mapping))
893 goto out_unlock_not_found;
894
895 for (off = 0; off < PAGE_SIZE; off += bsize) {
896 if ((*lastoff & ~PAGE_MASK) >= off + bsize)
897 continue;
898 if (ops->is_partially_uptodate(page, off, bsize) == seek_data) {
899 unlock_page(page);
900 return true;
901 }
902 *lastoff = poff + off + bsize;
903 }
904
905out_unlock_not_found:
906 unlock_page(page);
907 return false;
8a78cb1f
CH
908}
909
910/*
911 * Seek for SEEK_DATA / SEEK_HOLE in the page cache.
912 *
913 * Within unwritten extents, the page cache determines which parts are holes
bd56b3e1
CH
914 * and which are data: uptodate buffer heads count as data; everything else
915 * counts as a hole.
8a78cb1f
CH
916 *
917 * Returns the resulting offset on successs, and -ENOENT otherwise.
918 */
919static loff_t
920page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
921 int whence)
922{
923 pgoff_t index = offset >> PAGE_SHIFT;
924 pgoff_t end = DIV_ROUND_UP(offset + length, PAGE_SIZE);
925 loff_t lastoff = offset;
926 struct pagevec pvec;
927
928 if (length <= 0)
929 return -ENOENT;
930
931 pagevec_init(&pvec);
932
933 do {
934 unsigned nr_pages, i;
935
936 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index,
937 end - 1);
938 if (nr_pages == 0)
939 break;
940
941 for (i = 0; i < nr_pages; i++) {
942 struct page *page = pvec.pages[i];
943
afd9d6a1 944 if (page_seek_hole_data(inode, page, &lastoff, whence))
8a78cb1f 945 goto check_range;
8a78cb1f
CH
946 lastoff = page_offset(page) + PAGE_SIZE;
947 }
948 pagevec_release(&pvec);
949 } while (index < end);
950
951 /* When no page at lastoff and we are not done, we found a hole. */
952 if (whence != SEEK_HOLE)
953 goto not_found;
954
955check_range:
956 if (lastoff < offset + length)
957 goto out;
958not_found:
959 lastoff = -ENOENT;
960out:
961 pagevec_release(&pvec);
962 return lastoff;
963}
964
965
0ed3b0d4
AG
966static loff_t
967iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
968 void *data, struct iomap *iomap)
969{
970 switch (iomap->type) {
971 case IOMAP_UNWRITTEN:
972 offset = page_cache_seek_hole_data(inode, offset, length,
973 SEEK_HOLE);
974 if (offset < 0)
975 return length;
976 /* fall through */
977 case IOMAP_HOLE:
978 *(loff_t *)data = offset;
979 return 0;
980 default:
981 return length;
982 }
983}
984
985loff_t
986iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
987{
988 loff_t size = i_size_read(inode);
989 loff_t length = size - offset;
990 loff_t ret;
991
d6ab17f2
DW
992 /* Nothing to be found before or beyond the end of the file. */
993 if (offset < 0 || offset >= size)
0ed3b0d4
AG
994 return -ENXIO;
995
996 while (length > 0) {
997 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
998 &offset, iomap_seek_hole_actor);
999 if (ret < 0)
1000 return ret;
1001 if (ret == 0)
1002 break;
1003
1004 offset += ret;
1005 length -= ret;
1006 }
1007
1008 return offset;
1009}
1010EXPORT_SYMBOL_GPL(iomap_seek_hole);
1011
1012static loff_t
1013iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
1014 void *data, struct iomap *iomap)
1015{
1016 switch (iomap->type) {
1017 case IOMAP_HOLE:
1018 return length;
1019 case IOMAP_UNWRITTEN:
1020 offset = page_cache_seek_hole_data(inode, offset, length,
1021 SEEK_DATA);
1022 if (offset < 0)
1023 return length;
1024 /*FALLTHRU*/
1025 default:
1026 *(loff_t *)data = offset;
1027 return 0;
1028 }
1029}
1030
1031loff_t
1032iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
1033{
1034 loff_t size = i_size_read(inode);
1035 loff_t length = size - offset;
1036 loff_t ret;
1037
d6ab17f2
DW
1038 /* Nothing to be found before or beyond the end of the file. */
1039 if (offset < 0 || offset >= size)
0ed3b0d4
AG
1040 return -ENXIO;
1041
1042 while (length > 0) {
1043 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
1044 &offset, iomap_seek_data_actor);
1045 if (ret < 0)
1046 return ret;
1047 if (ret == 0)
1048 break;
1049
1050 offset += ret;
1051 length -= ret;
1052 }
1053
1054 if (length <= 0)
1055 return -ENXIO;
1056 return offset;
1057}
1058EXPORT_SYMBOL_GPL(iomap_seek_data);
1059
ff6a9292
CH
1060/*
1061 * Private flags for iomap_dio, must not overlap with the public ones in
1062 * iomap.h:
1063 */
3460cac1 1064#define IOMAP_DIO_WRITE_FUA (1 << 28)
4f8ff44b 1065#define IOMAP_DIO_NEED_SYNC (1 << 29)
ff6a9292
CH
1066#define IOMAP_DIO_WRITE (1 << 30)
1067#define IOMAP_DIO_DIRTY (1 << 31)
1068
1069struct iomap_dio {
1070 struct kiocb *iocb;
1071 iomap_dio_end_io_t *end_io;
1072 loff_t i_size;
1073 loff_t size;
1074 atomic_t ref;
1075 unsigned flags;
1076 int error;
ebf00be3 1077 bool wait_for_completion;
ff6a9292
CH
1078
1079 union {
1080 /* used during submission and for synchronous completion: */
1081 struct {
1082 struct iov_iter *iter;
1083 struct task_struct *waiter;
1084 struct request_queue *last_queue;
1085 blk_qc_t cookie;
1086 } submit;
1087
1088 /* used for aio completion: */
1089 struct {
1090 struct work_struct work;
1091 } aio;
1092 };
1093};
1094
1095static ssize_t iomap_dio_complete(struct iomap_dio *dio)
1096{
1097 struct kiocb *iocb = dio->iocb;
332391a9 1098 struct inode *inode = file_inode(iocb->ki_filp);
5e25c269 1099 loff_t offset = iocb->ki_pos;
ff6a9292
CH
1100 ssize_t ret;
1101
1102 if (dio->end_io) {
1103 ret = dio->end_io(iocb,
1104 dio->error ? dio->error : dio->size,
1105 dio->flags);
1106 } else {
1107 ret = dio->error;
1108 }
1109
1110 if (likely(!ret)) {
1111 ret = dio->size;
1112 /* check for short read */
5e25c269 1113 if (offset + ret > dio->i_size &&
ff6a9292 1114 !(dio->flags & IOMAP_DIO_WRITE))
5e25c269 1115 ret = dio->i_size - offset;
ff6a9292
CH
1116 iocb->ki_pos += ret;
1117 }
1118
5e25c269
EG
1119 /*
1120 * Try again to invalidate clean pages which might have been cached by
1121 * non-direct readahead, or faulted in by get_user_pages() if the source
1122 * of the write was an mmap'ed region of the file we're writing. Either
1123 * one is a pretty crazy thing to do, so we don't support it 100%. If
1124 * this invalidation fails, tough, the write still worked...
1125 *
1126 * And this page cache invalidation has to be after dio->end_io(), as
1127 * some filesystems convert unwritten extents to real allocations in
1128 * end_io() when necessary, otherwise a racing buffer read would cache
1129 * zeros from unwritten extents.
1130 */
1131 if (!dio->error &&
1132 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
1133 int err;
1134 err = invalidate_inode_pages2_range(inode->i_mapping,
1135 offset >> PAGE_SHIFT,
1136 (offset + dio->size - 1) >> PAGE_SHIFT);
5a9d929d
DW
1137 if (err)
1138 dio_warn_stale_pagecache(iocb->ki_filp);
5e25c269
EG
1139 }
1140
4f8ff44b
DC
1141 /*
1142 * If this is a DSYNC write, make sure we push it to stable storage now
1143 * that we've written data.
1144 */
1145 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
1146 ret = generic_write_sync(iocb, ret);
1147
ff6a9292
CH
1148 inode_dio_end(file_inode(iocb->ki_filp));
1149 kfree(dio);
1150
1151 return ret;
1152}
1153
1154static void iomap_dio_complete_work(struct work_struct *work)
1155{
1156 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
1157 struct kiocb *iocb = dio->iocb;
ff6a9292 1158
4f8ff44b 1159 iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
ff6a9292
CH
1160}
1161
1162/*
1163 * Set an error in the dio if none is set yet. We have to use cmpxchg
1164 * as the submission context and the completion context(s) can race to
1165 * update the error.
1166 */
1167static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
1168{
1169 cmpxchg(&dio->error, 0, ret);
1170}
1171
1172static void iomap_dio_bio_end_io(struct bio *bio)
1173{
1174 struct iomap_dio *dio = bio->bi_private;
1175 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
1176
4e4cbee9
CH
1177 if (bio->bi_status)
1178 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
ff6a9292
CH
1179
1180 if (atomic_dec_and_test(&dio->ref)) {
ebf00be3 1181 if (dio->wait_for_completion) {
ff6a9292 1182 struct task_struct *waiter = dio->submit.waiter;
ff6a9292
CH
1183 WRITE_ONCE(dio->submit.waiter, NULL);
1184 wake_up_process(waiter);
1185 } else if (dio->flags & IOMAP_DIO_WRITE) {
1186 struct inode *inode = file_inode(dio->iocb->ki_filp);
1187
1188 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
1189 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
1190 } else {
1191 iomap_dio_complete_work(&dio->aio.work);
1192 }
1193 }
1194
1195 if (should_dirty) {
1196 bio_check_pages_dirty(bio);
1197 } else {
1198 struct bio_vec *bvec;
1199 int i;
1200
1201 bio_for_each_segment_all(bvec, bio, i)
1202 put_page(bvec->bv_page);
1203 bio_put(bio);
1204 }
1205}
1206
1207static blk_qc_t
1208iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
1209 unsigned len)
1210{
1211 struct page *page = ZERO_PAGE(0);
1212 struct bio *bio;
1213
1214 bio = bio_alloc(GFP_KERNEL, 1);
74d46992 1215 bio_set_dev(bio, iomap->bdev);
57fc505d 1216 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
ff6a9292
CH
1217 bio->bi_private = dio;
1218 bio->bi_end_io = iomap_dio_bio_end_io;
1219
1220 get_page(page);
6533b4e4 1221 __bio_add_page(bio, page, len, 0);
5cc60aee 1222 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
ff6a9292
CH
1223
1224 atomic_inc(&dio->ref);
1225 return submit_bio(bio);
1226}
1227
1228static loff_t
1229iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
1230 void *data, struct iomap *iomap)
1231{
1232 struct iomap_dio *dio = data;
93407472
FF
1233 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
1234 unsigned int fs_block_size = i_blocksize(inode), pad;
1235 unsigned int align = iov_iter_alignment(dio->submit.iter);
ff6a9292
CH
1236 struct iov_iter iter;
1237 struct bio *bio;
1238 bool need_zeroout = false;
3460cac1 1239 bool use_fua = false;
ff6a9292 1240 int nr_pages, ret;
cfe057f7 1241 size_t copied = 0;
ff6a9292
CH
1242
1243 if ((pos | length | align) & ((1 << blkbits) - 1))
1244 return -EINVAL;
1245
1246 switch (iomap->type) {
1247 case IOMAP_HOLE:
1248 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
1249 return -EIO;
1250 /*FALLTHRU*/
1251 case IOMAP_UNWRITTEN:
1252 if (!(dio->flags & IOMAP_DIO_WRITE)) {
cfe057f7 1253 length = iov_iter_zero(length, dio->submit.iter);
ff6a9292
CH
1254 dio->size += length;
1255 return length;
1256 }
1257 dio->flags |= IOMAP_DIO_UNWRITTEN;
1258 need_zeroout = true;
1259 break;
1260 case IOMAP_MAPPED:
1261 if (iomap->flags & IOMAP_F_SHARED)
1262 dio->flags |= IOMAP_DIO_COW;
3460cac1 1263 if (iomap->flags & IOMAP_F_NEW) {
ff6a9292 1264 need_zeroout = true;
3460cac1
DC
1265 } else {
1266 /*
1267 * Use a FUA write if we need datasync semantics, this
1268 * is a pure data IO that doesn't require any metadata
1269 * updates and the underlying device supports FUA. This
1270 * allows us to avoid cache flushes on IO completion.
1271 */
1272 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
1273 (dio->flags & IOMAP_DIO_WRITE_FUA) &&
1274 blk_queue_fua(bdev_get_queue(iomap->bdev)))
1275 use_fua = true;
1276 }
ff6a9292
CH
1277 break;
1278 default:
1279 WARN_ON_ONCE(1);
1280 return -EIO;
1281 }
1282
1283 /*
1284 * Operate on a partial iter trimmed to the extent we were called for.
1285 * We'll update the iter in the dio once we're done with this extent.
1286 */
1287 iter = *dio->submit.iter;
1288 iov_iter_truncate(&iter, length);
1289
1290 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
1291 if (nr_pages <= 0)
1292 return nr_pages;
1293
1294 if (need_zeroout) {
1295 /* zero out from the start of the block to the write offset */
1296 pad = pos & (fs_block_size - 1);
1297 if (pad)
1298 iomap_dio_zero(dio, iomap, pos - pad, pad);
1299 }
1300
1301 do {
cfe057f7
AV
1302 size_t n;
1303 if (dio->error) {
1304 iov_iter_revert(dio->submit.iter, copied);
ff6a9292 1305 return 0;
cfe057f7 1306 }
ff6a9292
CH
1307
1308 bio = bio_alloc(GFP_KERNEL, nr_pages);
74d46992 1309 bio_set_dev(bio, iomap->bdev);
57fc505d 1310 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
45d06cf7 1311 bio->bi_write_hint = dio->iocb->ki_hint;
087e5669 1312 bio->bi_ioprio = dio->iocb->ki_ioprio;
ff6a9292
CH
1313 bio->bi_private = dio;
1314 bio->bi_end_io = iomap_dio_bio_end_io;
1315
1316 ret = bio_iov_iter_get_pages(bio, &iter);
1317 if (unlikely(ret)) {
1318 bio_put(bio);
cfe057f7 1319 return copied ? copied : ret;
ff6a9292
CH
1320 }
1321
cfe057f7 1322 n = bio->bi_iter.bi_size;
ff6a9292 1323 if (dio->flags & IOMAP_DIO_WRITE) {
3460cac1
DC
1324 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
1325 if (use_fua)
1326 bio->bi_opf |= REQ_FUA;
1327 else
1328 dio->flags &= ~IOMAP_DIO_WRITE_FUA;
cfe057f7 1329 task_io_account_write(n);
ff6a9292 1330 } else {
3460cac1 1331 bio->bi_opf = REQ_OP_READ;
ff6a9292
CH
1332 if (dio->flags & IOMAP_DIO_DIRTY)
1333 bio_set_pages_dirty(bio);
1334 }
1335
cfe057f7
AV
1336 iov_iter_advance(dio->submit.iter, n);
1337
1338 dio->size += n;
1339 pos += n;
1340 copied += n;
ff6a9292
CH
1341
1342 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
1343
1344 atomic_inc(&dio->ref);
1345
1346 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
1347 dio->submit.cookie = submit_bio(bio);
1348 } while (nr_pages);
1349
1350 if (need_zeroout) {
1351 /* zero out from the end of the write to the end of the block */
1352 pad = pos & (fs_block_size - 1);
1353 if (pad)
1354 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
1355 }
cfe057f7 1356 return copied;
ff6a9292
CH
1357}
1358
4f8ff44b
DC
1359/*
1360 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
3460cac1
DC
1361 * is being issued as AIO or not. This allows us to optimise pure data writes
1362 * to use REQ_FUA rather than requiring generic_write_sync() to issue a
1363 * REQ_FLUSH post write. This is slightly tricky because a single request here
1364 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
1365 * may be pure data writes. In that case, we still need to do a full data sync
1366 * completion.
4f8ff44b 1367 */
ff6a9292 1368ssize_t
8ff6daa1
CH
1369iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1370 const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
ff6a9292
CH
1371{
1372 struct address_space *mapping = iocb->ki_filp->f_mapping;
1373 struct inode *inode = file_inode(iocb->ki_filp);
1374 size_t count = iov_iter_count(iter);
c771c14b
EG
1375 loff_t pos = iocb->ki_pos, start = pos;
1376 loff_t end = iocb->ki_pos + count - 1, ret = 0;
ff6a9292
CH
1377 unsigned int flags = IOMAP_DIRECT;
1378 struct blk_plug plug;
1379 struct iomap_dio *dio;
1380
1381 lockdep_assert_held(&inode->i_rwsem);
1382
1383 if (!count)
1384 return 0;
1385
1386 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
1387 if (!dio)
1388 return -ENOMEM;
1389
1390 dio->iocb = iocb;
1391 atomic_set(&dio->ref, 1);
1392 dio->size = 0;
1393 dio->i_size = i_size_read(inode);
1394 dio->end_io = end_io;
1395 dio->error = 0;
1396 dio->flags = 0;
ebf00be3 1397 dio->wait_for_completion = is_sync_kiocb(iocb);
ff6a9292
CH
1398
1399 dio->submit.iter = iter;
ebf00be3
AG
1400 dio->submit.waiter = current;
1401 dio->submit.cookie = BLK_QC_T_NONE;
1402 dio->submit.last_queue = NULL;
ff6a9292
CH
1403
1404 if (iov_iter_rw(iter) == READ) {
1405 if (pos >= dio->i_size)
1406 goto out_free_dio;
1407
1408 if (iter->type == ITER_IOVEC)
1409 dio->flags |= IOMAP_DIO_DIRTY;
1410 } else {
3460cac1 1411 flags |= IOMAP_WRITE;
ff6a9292 1412 dio->flags |= IOMAP_DIO_WRITE;
3460cac1
DC
1413
1414 /* for data sync or sync, we need sync completion processing */
4f8ff44b
DC
1415 if (iocb->ki_flags & IOCB_DSYNC)
1416 dio->flags |= IOMAP_DIO_NEED_SYNC;
3460cac1
DC
1417
1418 /*
1419 * For datasync only writes, we optimistically try using FUA for
1420 * this IO. Any non-FUA write that occurs will clear this flag,
1421 * hence we know before completion whether a cache flush is
1422 * necessary.
1423 */
1424 if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC)
1425 dio->flags |= IOMAP_DIO_WRITE_FUA;
ff6a9292
CH
1426 }
1427
a38d1243
GR
1428 if (iocb->ki_flags & IOCB_NOWAIT) {
1429 if (filemap_range_has_page(mapping, start, end)) {
1430 ret = -EAGAIN;
1431 goto out_free_dio;
1432 }
1433 flags |= IOMAP_NOWAIT;
1434 }
1435
55635ba7
AR
1436 ret = filemap_write_and_wait_range(mapping, start, end);
1437 if (ret)
1438 goto out_free_dio;
ff6a9292 1439
5a9d929d
DW
1440 /*
1441 * Try to invalidate cache pages for the range we're direct
1442 * writing. If this invalidation fails, tough, the write will
1443 * still work, but racing two incompatible write paths is a
1444 * pretty crazy thing to do, so we don't support it 100%.
1445 */
55635ba7
AR
1446 ret = invalidate_inode_pages2_range(mapping,
1447 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
5a9d929d
DW
1448 if (ret)
1449 dio_warn_stale_pagecache(iocb->ki_filp);
55635ba7 1450 ret = 0;
ff6a9292 1451
ebf00be3 1452 if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion &&
546e7be8
CR
1453 !inode->i_sb->s_dio_done_wq) {
1454 ret = sb_init_dio_done_wq(inode->i_sb);
1455 if (ret < 0)
1456 goto out_free_dio;
1457 }
1458
ff6a9292
CH
1459 inode_dio_begin(inode);
1460
1461 blk_start_plug(&plug);
1462 do {
1463 ret = iomap_apply(inode, pos, count, flags, ops, dio,
1464 iomap_dio_actor);
1465 if (ret <= 0) {
1466 /* magic error code to fall back to buffered I/O */
ebf00be3
AG
1467 if (ret == -ENOTBLK) {
1468 dio->wait_for_completion = true;
ff6a9292 1469 ret = 0;
ebf00be3 1470 }
ff6a9292
CH
1471 break;
1472 }
1473 pos += ret;
a008c31c
CR
1474
1475 if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
1476 break;
ff6a9292
CH
1477 } while ((count = iov_iter_count(iter)) > 0);
1478 blk_finish_plug(&plug);
1479
1480 if (ret < 0)
1481 iomap_dio_set_error(dio, ret);
1482
3460cac1
DC
1483 /*
1484 * If all the writes we issued were FUA, we don't need to flush the
1485 * cache on IO completion. Clear the sync flag for this case.
1486 */
1487 if (dio->flags & IOMAP_DIO_WRITE_FUA)
1488 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
1489
ff6a9292 1490 if (!atomic_dec_and_test(&dio->ref)) {
ebf00be3 1491 if (!dio->wait_for_completion)
ff6a9292
CH
1492 return -EIOCBQUEUED;
1493
1494 for (;;) {
1495 set_current_state(TASK_UNINTERRUPTIBLE);
1496 if (!READ_ONCE(dio->submit.waiter))
1497 break;
1498
1499 if (!(iocb->ki_flags & IOCB_HIPRI) ||
1500 !dio->submit.last_queue ||
ea435e1b 1501 !blk_poll(dio->submit.last_queue,
5cc60aee 1502 dio->submit.cookie))
ff6a9292
CH
1503 io_schedule();
1504 }
1505 __set_current_state(TASK_RUNNING);
1506 }
1507
c771c14b
EG
1508 ret = iomap_dio_complete(dio);
1509
c771c14b 1510 return ret;
ff6a9292
CH
1511
1512out_free_dio:
1513 kfree(dio);
1514 return ret;
1515}
1516EXPORT_SYMBOL_GPL(iomap_dio_rw);
67482129
DW
1517
1518/* Swapfile activation */
1519
1520#ifdef CONFIG_SWAP
1521struct iomap_swapfile_info {
1522 struct iomap iomap; /* accumulated iomap */
1523 struct swap_info_struct *sis;
1524 uint64_t lowest_ppage; /* lowest physical addr seen (pages) */
1525 uint64_t highest_ppage; /* highest physical addr seen (pages) */
1526 unsigned long nr_pages; /* number of pages collected */
1527 int nr_extents; /* extent count */
1528};
1529
1530/*
1531 * Collect physical extents for this swap file. Physical extents reported to
1532 * the swap code must be trimmed to align to a page boundary. The logical
1533 * offset within the file is irrelevant since the swapfile code maps logical
1534 * page numbers of the swap device to the physical page-aligned extents.
1535 */
1536static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
1537{
1538 struct iomap *iomap = &isi->iomap;
1539 unsigned long nr_pages;
1540 uint64_t first_ppage;
1541 uint64_t first_ppage_reported;
1542 uint64_t next_ppage;
1543 int error;
1544
1545 /*
1546 * Round the start up and the end down so that the physical
1547 * extent aligns to a page boundary.
1548 */
1549 first_ppage = ALIGN(iomap->addr, PAGE_SIZE) >> PAGE_SHIFT;
1550 next_ppage = ALIGN_DOWN(iomap->addr + iomap->length, PAGE_SIZE) >>
1551 PAGE_SHIFT;
1552
1553 /* Skip too-short physical extents. */
1554 if (first_ppage >= next_ppage)
1555 return 0;
1556 nr_pages = next_ppage - first_ppage;
1557
1558 /*
1559 * Calculate how much swap space we're adding; the first page contains
1560 * the swap header and doesn't count. The mm still wants that first
1561 * page fed to add_swap_extent, however.
1562 */
1563 first_ppage_reported = first_ppage;
1564 if (iomap->offset == 0)
1565 first_ppage_reported++;
1566 if (isi->lowest_ppage > first_ppage_reported)
1567 isi->lowest_ppage = first_ppage_reported;
1568 if (isi->highest_ppage < (next_ppage - 1))
1569 isi->highest_ppage = next_ppage - 1;
1570
1571 /* Add extent, set up for the next call. */
1572 error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage);
1573 if (error < 0)
1574 return error;
1575 isi->nr_extents += error;
1576 isi->nr_pages += nr_pages;
1577 return 0;
1578}
1579
1580/*
1581 * Accumulate iomaps for this swap file. We have to accumulate iomaps because
1582 * swap only cares about contiguous page-aligned physical extents and makes no
1583 * distinction between written and unwritten extents.
1584 */
1585static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos,
1586 loff_t count, void *data, struct iomap *iomap)
1587{
1588 struct iomap_swapfile_info *isi = data;
1589 int error;
1590
19319b53
CH
1591 switch (iomap->type) {
1592 case IOMAP_MAPPED:
1593 case IOMAP_UNWRITTEN:
1594 /* Only real or unwritten extents. */
1595 break;
1596 case IOMAP_INLINE:
1597 /* No inline data. */
ec601924
OS
1598 pr_err("swapon: file is inline\n");
1599 return -EINVAL;
19319b53 1600 default:
ec601924
OS
1601 pr_err("swapon: file has unallocated extents\n");
1602 return -EINVAL;
1603 }
67482129 1604
ec601924
OS
1605 /* No uncommitted metadata or shared blocks. */
1606 if (iomap->flags & IOMAP_F_DIRTY) {
1607 pr_err("swapon: file is not committed\n");
1608 return -EINVAL;
1609 }
1610 if (iomap->flags & IOMAP_F_SHARED) {
1611 pr_err("swapon: file has shared extents\n");
1612 return -EINVAL;
1613 }
67482129 1614
ec601924
OS
1615 /* Only one bdev per swap file. */
1616 if (iomap->bdev != isi->sis->bdev) {
1617 pr_err("swapon: file is on multiple devices\n");
1618 return -EINVAL;
1619 }
67482129
DW
1620
1621 if (isi->iomap.length == 0) {
1622 /* No accumulated extent, so just store it. */
1623 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
1624 } else if (isi->iomap.addr + isi->iomap.length == iomap->addr) {
1625 /* Append this to the accumulated extent. */
1626 isi->iomap.length += iomap->length;
1627 } else {
1628 /* Otherwise, add the retained iomap and store this one. */
1629 error = iomap_swapfile_add_extent(isi);
1630 if (error)
1631 return error;
1632 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
1633 }
67482129 1634 return count;
67482129
DW
1635}
1636
1637/*
1638 * Iterate a swap file's iomaps to construct physical extents that can be
1639 * passed to the swapfile subsystem.
1640 */
1641int iomap_swapfile_activate(struct swap_info_struct *sis,
1642 struct file *swap_file, sector_t *pagespan,
1643 const struct iomap_ops *ops)
1644{
1645 struct iomap_swapfile_info isi = {
1646 .sis = sis,
1647 .lowest_ppage = (sector_t)-1ULL,
1648 };
1649 struct address_space *mapping = swap_file->f_mapping;
1650 struct inode *inode = mapping->host;
1651 loff_t pos = 0;
1652 loff_t len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE);
1653 loff_t ret;
1654
117a148f
DW
1655 /*
1656 * Persist all file mapping metadata so that we won't have any
1657 * IOMAP_F_DIRTY iomaps.
1658 */
1659 ret = vfs_fsync(swap_file, 1);
67482129
DW
1660 if (ret)
1661 return ret;
1662
1663 while (len > 0) {
1664 ret = iomap_apply(inode, pos, len, IOMAP_REPORT,
1665 ops, &isi, iomap_swapfile_activate_actor);
1666 if (ret <= 0)
1667 return ret;
1668
1669 pos += ret;
1670 len -= ret;
1671 }
1672
1673 if (isi.iomap.length) {
1674 ret = iomap_swapfile_add_extent(&isi);
1675 if (ret)
1676 return ret;
1677 }
1678
1679 *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
1680 sis->max = isi.nr_pages;
1681 sis->pages = isi.nr_pages - 1;
1682 sis->highest_bit = isi.nr_pages - 1;
1683 return isi.nr_extents;
1684}
1685EXPORT_SYMBOL_GPL(iomap_swapfile_activate);
1686#endif /* CONFIG_SWAP */
89eb1906
CH
1687
1688static loff_t
1689iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length,
1690 void *data, struct iomap *iomap)
1691{
1692 sector_t *bno = data, addr;
1693
1694 if (iomap->type == IOMAP_MAPPED) {
1695 addr = (pos - iomap->offset + iomap->addr) >> inode->i_blkbits;
1696 if (addr > INT_MAX)
1697 WARN(1, "would truncate bmap result\n");
1698 else
1699 *bno = addr;
1700 }
1701 return 0;
1702}
1703
1704/* legacy ->bmap interface. 0 is the error return (!) */
1705sector_t
1706iomap_bmap(struct address_space *mapping, sector_t bno,
1707 const struct iomap_ops *ops)
1708{
1709 struct inode *inode = mapping->host;
1710 loff_t pos = bno >> inode->i_blkbits;
1711 unsigned blocksize = i_blocksize(inode);
1712
1713 if (filemap_write_and_wait(mapping))
1714 return 0;
1715
1716 bno = 0;
1717 iomap_apply(inode, pos, blocksize, 0, ops, &bno, iomap_bmap_actor);
1718 return bno;
1719}
1720EXPORT_SYMBOL_GPL(iomap_bmap);
This page took 0.363722 seconds and 4 git commands to generate.