]> Git Repo - J-linux.git/commitdiff
Merge tag 'dio_for_v5.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack...
authorLinus Torvalds <[email protected]>
Thu, 15 Oct 2020 22:03:10 +0000 (15:03 -0700)
committerLinus Torvalds <[email protected]>
Thu, 15 Oct 2020 22:03:10 +0000 (15:03 -0700)
Pull direct-io fix from Jan Kara:
 "Fix for unaligned direct IO read past EOF in legacy DIO code"

* tag 'dio_for_v5.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs:
  direct-io: defer alignment check until after the EOF check
  direct-io: don't force writeback for reads beyond EOF
  direct-io: clean up error paths of do_blockdev_direct_IO

1  2 
fs/direct-io.c

diff --combined fs/direct-io.c
index abf535b036ab7789ef504fae9105d16a0a80bc0f,82838cca934b26f095867894c934e8a5ab26bd55..d53fa92a1ab656ebc13f4ce5660be5c3f0252999
@@@ -386,6 -386,25 +386,6 @@@ static void dio_bio_end_io(struct bio *
        spin_unlock_irqrestore(&dio->bio_lock, flags);
  }
  
 -/**
 - * dio_end_io - handle the end io action for the given bio
 - * @bio: The direct io bio thats being completed
 - *
 - * This is meant to be called by any filesystem that uses their own dio_submit_t
 - * so that the DIO specific endio actions are dealt with after the filesystem
 - * has done it's completion work.
 - */
 -void dio_end_io(struct bio *bio)
 -{
 -      struct dio *dio = bio->bi_private;
 -
 -      if (dio->is_async)
 -              dio_bio_end_aio(bio);
 -      else
 -              dio_bio_end_io(bio);
 -}
 -EXPORT_SYMBOL_GPL(dio_end_io);
 -
  static inline void
  dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
              struct block_device *bdev,
@@@ -1146,22 -1165,13 +1146,13 @@@ do_blockdev_direct_IO(struct kiocb *ioc
         * the early prefetch in the caller enough time.
         */
  
-       if (align & blocksize_mask) {
-               if (bdev)
-                       blkbits = blksize_bits(bdev_logical_block_size(bdev));
-               blocksize_mask = (1 << blkbits) - 1;
-               if (align & blocksize_mask)
-                       goto out;
-       }
        /* watch out for a 0 len io from a tricksy fs */
        if (iov_iter_rw(iter) == READ && !count)
                return 0;
  
        dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
-       retval = -ENOMEM;
        if (!dio)
-               goto out;
+               return -ENOMEM;
        /*
         * Believe it or not, zeroing out the page array caused a .5%
         * performance regression in a database benchmark.  So, we take
        memset(dio, 0, offsetof(struct dio, pages));
  
        dio->flags = flags;
-       if (dio->flags & DIO_LOCKING) {
-               if (iov_iter_rw(iter) == READ) {
-                       struct address_space *mapping =
-                                       iocb->ki_filp->f_mapping;
-                       /* will be released by direct_io_worker */
-                       inode_lock(inode);
-                       retval = filemap_write_and_wait_range(mapping, offset,
-                                                             end - 1);
-                       if (retval) {
-                               inode_unlock(inode);
-                               kmem_cache_free(dio_cache, dio);
-                               goto out;
-                       }
-               }
+       if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) {
+               /* will be released by direct_io_worker */
+               inode_lock(inode);
        }
  
        /* Once we sampled i_size check for reads beyond EOF */
        dio->i_size = i_size_read(inode);
        if (iov_iter_rw(iter) == READ && offset >= dio->i_size) {
-               if (dio->flags & DIO_LOCKING)
-                       inode_unlock(inode);
-               kmem_cache_free(dio_cache, dio);
                retval = 0;
-               goto out;
+               goto fail_dio;
+       }
+       if (align & blocksize_mask) {
+               if (bdev)
+                       blkbits = blksize_bits(bdev_logical_block_size(bdev));
+               blocksize_mask = (1 << blkbits) - 1;
+               if (align & blocksize_mask)
+                       goto fail_dio;
+       }
+       if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) {
+               struct address_space *mapping = iocb->ki_filp->f_mapping;
+               retval = filemap_write_and_wait_range(mapping, offset, end - 1);
+               if (retval)
+                       goto fail_dio;
        }
  
        /*
                         */
                        retval = sb_init_dio_done_wq(dio->inode->i_sb);
                }
-               if (retval) {
-                       /*
-                        * We grab i_mutex only for reads so we don't have
-                        * to release it here
-                        */
-                       kmem_cache_free(dio_cache, dio);
-                       goto out;
-               }
+               if (retval)
+                       goto fail_dio;
        }
  
        /*
        } else
                BUG_ON(retval != -EIOCBQUEUED);
  
- out:
+       return retval;
+ fail_dio:
+       if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ)
+               inode_unlock(inode);
+       kmem_cache_free(dio_cache, dio);
        return retval;
  }
  
This page took 0.060956 seconds and 4 git commands to generate.