]> Git Repo - J-linux.git/commitdiff
Merge tag 'iomap-5.20-merge-1' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
authorLinus Torvalds <[email protected]>
Wed, 3 Aug 2022 22:16:49 +0000 (15:16 -0700)
committerLinus Torvalds <[email protected]>
Wed, 3 Aug 2022 22:16:49 +0000 (15:16 -0700)
Pull iomap updates from Darrick Wong:
 "The most notable change in this first batch is that we no longer
  schedule pages beyond i_size for writeback, preferring instead to let
  truncate deal with those pages.

  Next week, there may be a second pull request to remove
  iomap_writepage from the other two filesystems (gfs2/zonefs) that use
  iomap for buffered IO. This follows in the same vein as the recent
  removal of writepage from XFS, since it hasn't been triggered in a few
  years; it does nothing during direct reclaim; and as far as the people
  who examined the patchset can tell, it's moving the codebase in the
  right direction.

  However, as it was a late addition to for-next, I'm holding off on
  that section for another week of testing to see if anyone can come up
  with a solid reason for holding off in the meantime.

  Summary:

   - Skip writeback for pages that are completely beyond EOF

   - Minor code cleanups"

* tag 'iomap-5.20-merge-1' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux:
  dax: set did_zero to true when zeroing successfully
  iomap: set did_zero to true when zeroing successfully
  iomap: skip pages past eof in iomap_do_writepage()

1  2 
fs/iomap/buffered-io.c

diff --combined fs/iomap/buffered-io.c
index 6505d45a99e098c059d8dee7839fcc7b87fef176,afd260632836d9d4f9dc48655c128e4934459051..2b82c7f1de88b847cde1feab3f42937170bd3996
@@@ -44,28 -44,20 +44,28 @@@ static inline struct iomap_page *to_iom
  static struct bio_set iomap_ioend_bioset;
  
  static struct iomap_page *
 -iomap_page_create(struct inode *inode, struct folio *folio)
 +iomap_page_create(struct inode *inode, struct folio *folio, unsigned int flags)
  {
        struct iomap_page *iop = to_iomap_page(folio);
        unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
 +      gfp_t gfp;
  
        if (iop || nr_blocks <= 1)
                return iop;
  
 +      if (flags & IOMAP_NOWAIT)
 +              gfp = GFP_NOWAIT;
 +      else
 +              gfp = GFP_NOFS | __GFP_NOFAIL;
 +
        iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)),
 -                      GFP_NOFS | __GFP_NOFAIL);
 -      spin_lock_init(&iop->uptodate_lock);
 -      if (folio_test_uptodate(folio))
 -              bitmap_fill(iop->uptodate, nr_blocks);
 -      folio_attach_private(folio, iop);
 +                    gfp);
 +      if (iop) {
 +              spin_lock_init(&iop->uptodate_lock);
 +              if (folio_test_uptodate(folio))
 +                      bitmap_fill(iop->uptodate, nr_blocks);
 +              folio_attach_private(folio, iop);
 +      }
        return iop;
  }
  
@@@ -162,6 -154,9 +162,6 @@@ static void iomap_iop_set_range_uptodat
  static void iomap_set_range_uptodate(struct folio *folio,
                struct iomap_page *iop, size_t off, size_t len)
  {
 -      if (folio_test_error(folio))
 -              return;
 -
        if (iop)
                iomap_iop_set_range_uptodate(folio, iop, off, len);
        else
@@@ -231,7 -226,7 +231,7 @@@ static int iomap_read_inline_data(cons
        if (WARN_ON_ONCE(size > iomap->length))
                return -EIO;
        if (offset > 0)
 -              iop = iomap_page_create(iter->inode, folio);
 +              iop = iomap_page_create(iter->inode, folio, iter->flags);
        else
                iop = to_iomap_page(folio);
  
@@@ -269,7 -264,7 +269,7 @@@ static loff_t iomap_readpage_iter(cons
                return iomap_read_inline_data(iter, folio);
  
        /* zero post-eof blocks as the page may be mapped */
 -      iop = iomap_page_create(iter->inode, folio);
 +      iop = iomap_page_create(iter->inode, folio, iter->flags);
        iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen);
        if (plen == 0)
                goto done;
@@@ -497,6 -492,31 +497,6 @@@ void iomap_invalidate_folio(struct foli
  }
  EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
  
 -#ifdef CONFIG_MIGRATION
 -int
 -iomap_migrate_page(struct address_space *mapping, struct page *newpage,
 -              struct page *page, enum migrate_mode mode)
 -{
 -      struct folio *folio = page_folio(page);
 -      struct folio *newfolio = page_folio(newpage);
 -      int ret;
 -
 -      ret = folio_migrate_mapping(mapping, newfolio, folio, 0);
 -      if (ret != MIGRATEPAGE_SUCCESS)
 -              return ret;
 -
 -      if (folio_test_private(folio))
 -              folio_attach_private(newfolio, folio_detach_private(folio));
 -
 -      if (mode != MIGRATE_SYNC_NO_COPY)
 -              folio_migrate_copy(newfolio, folio);
 -      else
 -              folio_migrate_flags(newfolio, folio);
 -      return MIGRATEPAGE_SUCCESS;
 -}
 -EXPORT_SYMBOL_GPL(iomap_migrate_page);
 -#endif /* CONFIG_MIGRATION */
 -
  static void
  iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
  {
@@@ -527,11 -547,10 +527,11 @@@ static int __iomap_write_begin(const st
                size_t len, struct folio *folio)
  {
        const struct iomap *srcmap = iomap_iter_srcmap(iter);
 -      struct iomap_page *iop = iomap_page_create(iter->inode, folio);
 +      struct iomap_page *iop;
        loff_t block_size = i_blocksize(iter->inode);
        loff_t block_start = round_down(pos, block_size);
        loff_t block_end = round_up(pos + len, block_size);
 +      unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio);
        size_t from = offset_in_folio(folio, pos), to = from + len;
        size_t poff, plen;
  
                return 0;
        folio_clear_error(folio);
  
 +      iop = iomap_page_create(iter->inode, folio, iter->flags);
 +      if ((iter->flags & IOMAP_NOWAIT) && !iop && nr_blocks > 1)
 +              return -EAGAIN;
 +
        do {
                iomap_adjust_read_range(iter->inode, folio, &block_start,
                                block_end - block_start, &poff, &plen);
                                return -EIO;
                        folio_zero_segments(folio, poff, from, to, poff + plen);
                } else {
 -                      int status = iomap_read_folio_sync(block_start, folio,
 +                      int status;
 +
 +                      if (iter->flags & IOMAP_NOWAIT)
 +                              return -EAGAIN;
 +
 +                      status = iomap_read_folio_sync(block_start, folio,
                                        poff, plen, srcmap);
                        if (status)
                                return status;
@@@ -593,9 -603,6 +593,9 @@@ static int iomap_write_begin(const stru
        unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS;
        int status = 0;
  
 +      if (iter->flags & IOMAP_NOWAIT)
 +              fgp |= FGP_NOWAIT;
 +
        BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
        if (srcmap != &iter->iomap)
                BUG_ON(pos + len > srcmap->offset + srcmap->length);
        folio = __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
                        fgp, mapping_gfp_mask(iter->inode->i_mapping));
        if (!folio) {
 -              status = -ENOMEM;
 +              status = (iter->flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOMEM;
                goto out_no_page;
        }
        if (pos + len > folio_pos(folio) + folio_size(folio))
@@@ -733,8 -740,6 +733,8 @@@ static loff_t iomap_write_iter(struct i
        loff_t pos = iter->pos;
        ssize_t written = 0;
        long status = 0;
 +      struct address_space *mapping = iter->inode->i_mapping;
 +      unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
  
        do {
                struct folio *folio;
                bytes = min_t(unsigned long, PAGE_SIZE - offset,
                                                iov_iter_count(i));
  again:
 +              status = balance_dirty_pages_ratelimited_flags(mapping,
 +                                                             bdp_flags);
 +              if (unlikely(status))
 +                      break;
 +
                if (bytes > length)
                        bytes = length;
  
                 * Otherwise there's a nasty deadlock on copying from the
                 * same page as we're writing to, without it being marked
                 * up-to-date.
 +               *
 +               * For async buffered writes the assumption is that the user
 +               * page has already been faulted in. This can be optimized by
 +               * faulting the user page.
                 */
                if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
                        status = -EFAULT;
                        break;
  
                page = folio_file_page(folio, pos >> PAGE_SHIFT);
 -              if (mapping_writably_mapped(iter->inode->i_mapping))
 +              if (mapping_writably_mapped(mapping))
                        flush_dcache_page(page);
  
                copied = copy_page_from_iter_atomic(page, offset, bytes, i);
                pos += status;
                written += status;
                length -= status;
 -
 -              balance_dirty_pages_ratelimited(iter->inode->i_mapping);
        } while (iov_iter_count(i) && length);
  
 +      if (status == -EAGAIN) {
 +              iov_iter_revert(i, written);
 +              return -EAGAIN;
 +      }
        return written ? written : status;
  }
  
@@@ -821,9 -815,6 +821,9 @@@ iomap_file_buffered_write(struct kiocb 
        };
        int ret;
  
 +      if (iocb->ki_flags & IOCB_NOWAIT)
 +              iter.flags |= IOMAP_NOWAIT;
 +
        while ((ret = iomap_iter(&iter, ops)) > 0)
                iter.processed = iomap_write_iter(&iter, i);
        if (iter.pos == iocb->ki_pos)
@@@ -926,10 -917,10 +926,10 @@@ static loff_t iomap_zero_iter(struct io
                pos += bytes;
                length -= bytes;
                written += bytes;
-               if (did_zero)
-                       *did_zero = true;
        } while (length > 0);
  
+       if (did_zero)
+               *did_zero = true;
        return written;
  }
  
@@@ -1338,7 -1329,7 +1338,7 @@@ iomap_writepage_map(struct iomap_writep
                struct writeback_control *wbc, struct inode *inode,
                struct folio *folio, u64 end_pos)
  {
 -      struct iomap_page *iop = iomap_page_create(inode, folio);
 +      struct iomap_page *iop = iomap_page_create(inode, folio, 0);
        struct iomap_ioend *ioend, *next;
        unsigned len = i_blocksize(inode);
        unsigned nblocks = i_blocks_per_folio(inode, folio);
@@@ -1487,10 -1478,10 +1487,10 @@@ iomap_do_writepage(struct page *page, s
                pgoff_t end_index = isize >> PAGE_SHIFT;
  
                /*
-                * Skip the page if it's fully outside i_size, e.g. due to a
-                * truncate operation that's in progress. We must redirty the
-                * page so that reclaim stops reclaiming it. Otherwise
-                * iomap_release_folio() is called on it and gets confused.
+                * Skip the page if it's fully outside i_size, e.g.
+                * due to a truncate operation that's in progress.  We've
+                * cleaned this page and truncate will finish things off for
+                * us.
                 *
                 * Note that the end_index is unsigned long.  If the given
                 * offset is greater than 16TB on a 32-bit system then if we
                 */
                if (folio->index > end_index ||
                    (folio->index == end_index && poff == 0))
-                       goto redirty;
+                       goto unlock;
  
                /*
                 * The page straddles i_size.  It must be zeroed out on each
  
  redirty:
        folio_redirty_for_writepage(wbc, folio);
+ unlock:
        folio_unlock(folio);
        return 0;
  }
This page took 0.067442 seconds and 4 git commands to generate.