]> Git Repo - linux.git/commitdiff
Merge branch 'for-4.13-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
authorLinus Torvalds <[email protected]>
Sat, 15 Jul 2017 05:55:52 +0000 (22:55 -0700)
committerLinus Torvalds <[email protected]>
Sat, 15 Jul 2017 05:55:52 +0000 (22:55 -0700)
Pull btrfs fixes from David Sterba:
 "We've identified and fixed a silent corruption (introduced by code in
  the first pull), a fixup after the blk_status_t merge and two fixes to
  incremental send that Filipe has been hunting for some time"

* 'for-4.13-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  Btrfs: fix unexpected return value of bio_readpage_error
  btrfs: btrfs_create_repair_bio never fails, skip error handling
  btrfs: cloned bios must not be iterated by bio_for_each_segment_all
  Btrfs: fix write corruption due to bio cloning on raid5/6
  Btrfs: incremental send, fix invalid memory access
  Btrfs: incremental send, fix invalid path for link commands

1  2 
fs/btrfs/compression.c
fs/btrfs/disk-io.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/inode.c
fs/btrfs/raid56.c

diff --combined fs/btrfs/compression.c
index 2c0b7b57fcd5525f826f40041da230b5fe212fcf,8ba1b86c9b725c8ff485bba5f5a20ffbdad3a531..d2ef9ac2a63038e279239012fdec7de0cf9573e3
@@@ -109,7 -109,7 +109,7 @@@ static void end_compressed_bio_read(str
        unsigned long index;
        int ret;
  
 -      if (bio->bi_error)
 +      if (bio->bi_status)
                cb->errors = 1;
  
        /* if there are more bios still pending for this compressed
@@@ -152,6 -152,7 +152,7 @@@ csum_failed
                 * we have verified the checksum already, set page
                 * checked so the end_io handlers know about it
                 */
+               ASSERT(!bio_flagged(bio, BIO_CLONED));
                bio_for_each_segment_all(bvec, cb->orig_bio, i)
                        SetPageChecked(bvec->bv_page);
  
@@@ -219,7 -220,7 +220,7 @@@ static void end_compressed_bio_write(st
        struct page *page;
        unsigned long index;
  
 -      if (bio->bi_error)
 +      if (bio->bi_status)
                cb->errors = 1;
  
        /* if there are more bios still pending for this compressed
                                         cb->start,
                                         cb->start + cb->len - 1,
                                         NULL,
 -                                       bio->bi_error ? 0 : 1);
 +                                       bio->bi_status ? 0 : 1);
        cb->compressed_pages[0]->mapping = NULL;
  
        end_compressed_writeback(inode, cb);
@@@ -271,7 -272,7 +272,7 @@@ out
   * This also checksums the file bytes and gets things ready for
   * the end io hooks.
   */
 -int btrfs_submit_compressed_write(struct inode *inode, u64 start,
 +blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
                                 unsigned long len, u64 disk_start,
                                 unsigned long compressed_len,
                                 struct page **compressed_pages,
        struct page *page;
        u64 first_byte = disk_start;
        struct block_device *bdev;
 -      int ret;
 +      blk_status_t ret;
        int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
  
        WARN_ON(start & ((u64)PAGE_SIZE - 1));
        cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
        if (!cb)
 -              return -ENOMEM;
 +              return BLK_STS_RESOURCE;
        refcount_set(&cb->pending_bios, 0);
        cb->errors = 0;
        cb->inode = inode;
        /* create and submit bios for the compressed pages */
        bytes_left = compressed_len;
        for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
 +              int submit = 0;
 +
                page = compressed_pages[pg_index];
                page->mapping = inode->i_mapping;
                if (bio->bi_iter.bi_size)
 -                      ret = io_tree->ops->merge_bio_hook(page, 0,
 +                      submit = io_tree->ops->merge_bio_hook(page, 0,
                                                           PAGE_SIZE,
                                                           bio, 0);
 -              else
 -                      ret = 0;
  
                page->mapping = NULL;
 -              if (ret || bio_add_page(bio, page, PAGE_SIZE, 0) <
 +              if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
                    PAGE_SIZE) {
                        bio_get(bio);
  
  
                        ret = btrfs_map_bio(fs_info, bio, 0, 1);
                        if (ret) {
 -                              bio->bi_error = ret;
 +                              bio->bi_status = ret;
                                bio_endio(bio);
                        }
  
  
        ret = btrfs_map_bio(fs_info, bio, 0, 1);
        if (ret) {
 -              bio->bi_error = ret;
 +              bio->bi_status = ret;
                bio_endio(bio);
        }
  
@@@ -515,7 -516,7 +516,7 @@@ next
   * After the compressed pages are read, we copy the bytes into the
   * bio we were passed and then call the bio end_io calls
   */
 -int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 +blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                                 int mirror_num, unsigned long bio_flags)
  {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        u64 em_len;
        u64 em_start;
        struct extent_map *em;
 -      int ret = -ENOMEM;
 +      blk_status_t ret = BLK_STS_RESOURCE;
        int faili = 0;
        u32 *sums;
  
                                   PAGE_SIZE);
        read_unlock(&em_tree->lock);
        if (!em)
 -              return -EIO;
 +              return BLK_STS_IOERR;
  
        compressed_len = em->block_len;
        cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
                                                              __GFP_HIGHMEM);
                if (!cb->compressed_pages[pg_index]) {
                        faili = pg_index - 1;
 -                      ret = -ENOMEM;
 +                      ret = BLK_STS_RESOURCE;
                        goto fail2;
                }
        }
        refcount_set(&cb->pending_bios, 1);
  
        for (pg_index = 0; pg_index < nr_pages; pg_index++) {
 +              int submit = 0;
 +
                page = cb->compressed_pages[pg_index];
                page->mapping = inode->i_mapping;
                page->index = em_start >> PAGE_SHIFT;
  
                if (comp_bio->bi_iter.bi_size)
 -                      ret = tree->ops->merge_bio_hook(page, 0,
 +                      submit = tree->ops->merge_bio_hook(page, 0,
                                                        PAGE_SIZE,
                                                        comp_bio, 0);
 -              else
 -                      ret = 0;
  
                page->mapping = NULL;
 -              if (ret || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
 +              if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
                    PAGE_SIZE) {
                        bio_get(comp_bio);
  
  
                        ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
                        if (ret) {
 -                              comp_bio->bi_error = ret;
 +                              comp_bio->bi_status = ret;
                                bio_endio(comp_bio);
                        }
  
  
        ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
        if (ret) {
 -              comp_bio->bi_error = ret;
 +              comp_bio->bi_status = ret;
                bio_endio(comp_bio);
        }
  
diff --combined fs/btrfs/disk-io.c
index 086dcbadce0971a43cd099fa6e0bbcd23bcc17e1,075beedb435213d45435bf3e6ea491d43e06fe68..080e2ebb8aa0137baef69edda45aa895bf8b7c7c
@@@ -87,7 -87,7 +87,7 @@@ struct btrfs_end_io_wq 
        bio_end_io_t *end_io;
        void *private;
        struct btrfs_fs_info *info;
 -      int error;
 +      blk_status_t status;
        enum btrfs_wq_endio_type metadata;
        struct btrfs_work work;
  };
@@@ -130,7 -130,7 +130,7 @@@ struct async_submit_bio 
         */
        u64 bio_offset;
        struct btrfs_work work;
 -      int error;
 +      blk_status_t status;
  };
  
  /*
@@@ -798,7 -798,7 +798,7 @@@ static void end_workqueue_bio(struct bi
        btrfs_work_func_t func;
  
        fs_info = end_io_wq->info;
 -      end_io_wq->error = bio->bi_error;
 +      end_io_wq->status = bio->bi_status;
  
        if (bio_op(bio) == REQ_OP_WRITE) {
                if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
        btrfs_queue_work(wq, &end_io_wq->work);
  }
  
 -int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
 +blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
                        enum btrfs_wq_endio_type metadata)
  {
        struct btrfs_end_io_wq *end_io_wq;
  
        end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
        if (!end_io_wq)
 -              return -ENOMEM;
 +              return BLK_STS_RESOURCE;
  
        end_io_wq->private = bio->bi_private;
        end_io_wq->end_io = bio->bi_end_io;
        end_io_wq->info = info;
 -      end_io_wq->error = 0;
 +      end_io_wq->status = 0;
        end_io_wq->bio = bio;
        end_io_wq->metadata = metadata;
  
@@@ -867,14 -867,14 +867,14 @@@ unsigned long btrfs_async_submit_limit(
  static void run_one_async_start(struct btrfs_work *work)
  {
        struct async_submit_bio *async;
 -      int ret;
 +      blk_status_t ret;
  
        async = container_of(work, struct  async_submit_bio, work);
        ret = async->submit_bio_start(async->private_data, async->bio,
                                      async->mirror_num, async->bio_flags,
                                      async->bio_offset);
        if (ret)
 -              async->error = ret;
 +              async->status = ret;
  }
  
  static void run_one_async_done(struct btrfs_work *work)
                wake_up(&fs_info->async_submit_wait);
  
        /* If an error occurred we just want to clean up the bio and move on */
 -      if (async->error) {
 -              async->bio->bi_error = async->error;
 +      if (async->status) {
 +              async->bio->bi_status = async->status;
                bio_endio(async->bio);
                return;
        }
@@@ -915,17 -915,17 +915,17 @@@ static void run_one_async_free(struct b
        kfree(async);
  }
  
 -int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
 -                      int mirror_num, unsigned long bio_flags,
 -                      u64 bio_offset, void *private_data,
 -                      extent_submit_bio_hook_t *submit_bio_start,
 -                      extent_submit_bio_hook_t *submit_bio_done)
 +blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
 +                               int mirror_num, unsigned long bio_flags,
 +                               u64 bio_offset, void *private_data,
 +                               extent_submit_bio_hook_t *submit_bio_start,
 +                               extent_submit_bio_hook_t *submit_bio_done)
  {
        struct async_submit_bio *async;
  
        async = kmalloc(sizeof(*async), GFP_NOFS);
        if (!async)
 -              return -ENOMEM;
 +              return BLK_STS_RESOURCE;
  
        async->private_data = private_data;
        async->fs_info = fs_info;
        async->bio_flags = bio_flags;
        async->bio_offset = bio_offset;
  
 -      async->error = 0;
 +      async->status = 0;
  
        atomic_inc(&fs_info->nr_async_submits);
  
        return 0;
  }
  
 -static int btree_csum_one_bio(struct bio *bio)
 +static blk_status_t btree_csum_one_bio(struct bio *bio)
  {
        struct bio_vec *bvec;
        struct btrfs_root *root;
        int i, ret = 0;
  
+       ASSERT(!bio_flagged(bio, BIO_CLONED));
        bio_for_each_segment_all(bvec, bio, i) {
                root = BTRFS_I(bvec->bv_page->mapping->host)->root;
                ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
                        break;
        }
  
 -      return ret;
 +      return errno_to_blk_status(ret);
  }
  
 -static int __btree_submit_bio_start(void *private_data, struct bio *bio,
 -                                  int mirror_num, unsigned long bio_flags,
 -                                  u64 bio_offset)
 +static blk_status_t __btree_submit_bio_start(void *private_data, struct bio *bio,
 +                                           int mirror_num, unsigned long bio_flags,
 +                                           u64 bio_offset)
  {
        /*
         * when we're called for a write, we're already in the async
        return btree_csum_one_bio(bio);
  }
  
 -static int __btree_submit_bio_done(void *private_data, struct bio *bio,
 -                               int mirror_num, unsigned long bio_flags,
 -                               u64 bio_offset)
 +static blk_status_t __btree_submit_bio_done(void *private_data, struct bio *bio,
 +                                          int mirror_num, unsigned long bio_flags,
 +                                          u64 bio_offset)
  {
        struct inode *inode = private_data;
 -      int ret;
 +      blk_status_t ret;
  
        /*
         * when we're called for a write, we're already in the async
         */
        ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1);
        if (ret) {
 -              bio->bi_error = ret;
 +              bio->bi_status = ret;
                bio_endio(bio);
        }
        return ret;
@@@ -1015,14 -1016,14 +1016,14 @@@ static int check_async_write(unsigned l
        return 1;
  }
  
 -static int btree_submit_bio_hook(void *private_data, struct bio *bio,
 -                               int mirror_num, unsigned long bio_flags,
 -                               u64 bio_offset)
 +static blk_status_t btree_submit_bio_hook(void *private_data, struct bio *bio,
 +                                        int mirror_num, unsigned long bio_flags,
 +                                        u64 bio_offset)
  {
        struct inode *inode = private_data;
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        int async = check_async_write(bio_flags);
 -      int ret;
 +      blk_status_t ret;
  
        if (bio_op(bio) != REQ_OP_WRITE) {
                /*
        return 0;
  
  out_w_error:
 -      bio->bi_error = ret;
 +      bio->bi_status = ret;
        bio_endio(bio);
        return ret;
  }
@@@ -1256,9 -1257,9 +1257,9 @@@ void clean_tree_block(struct btrfs_fs_i
                btrfs_assert_tree_locked(buf);
  
                if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
 -                      __percpu_counter_add(&fs_info->dirty_metadata_bytes,
 -                                           -buf->len,
 -                                           fs_info->dirty_metadata_batch);
 +                      percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
 +                                               -buf->len,
 +                                               fs_info->dirty_metadata_batch);
                        /* ugh, clear_extent_buffer_dirty needs to lock the page */
                        btrfs_set_lock_blocking(buf);
                        clear_extent_buffer_dirty(buf);
@@@ -1820,7 -1821,7 +1821,7 @@@ static void end_workqueue_fn(struct btr
        end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
        bio = end_io_wq->bio;
  
 -      bio->bi_error = end_io_wq->error;
 +      bio->bi_status = end_io_wq->status;
        bio->bi_private = end_io_wq->private;
        bio->bi_end_io = end_io_wq->end_io;
        kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
@@@ -3510,7 -3511,7 +3511,7 @@@ static void write_dev_flush(struct btrf
  /*
   * If the flush bio has been submitted by write_dev_flush, wait for it.
   */
 -static int wait_dev_flush(struct btrfs_device *device)
 +static blk_status_t wait_dev_flush(struct btrfs_device *device)
  {
        struct bio *bio = device->flush_bio;
  
        device->flush_bio_sent = 0;
        wait_for_completion_io(&device->flush_wait);
  
 -      return bio->bi_error;
 +      return bio->bi_status;
  }
  
  static int check_barrier_error(struct btrfs_fs_devices *fsdevs)
@@@ -3549,7 -3550,7 +3550,7 @@@ static int barrier_all_devices(struct b
        struct list_head *head;
        struct btrfs_device *dev;
        int errors_wait = 0;
 -      int ret;
 +      blk_status_t ret;
  
        /* send down all the barriers */
        head = &info->fs_devices->devices;
@@@ -4047,9 -4048,9 +4048,9 @@@ void btrfs_mark_buffer_dirty(struct ext
                        buf->start, transid, fs_info->generation);
        was_dirty = set_extent_buffer_dirty(buf);
        if (!was_dirty)
 -              __percpu_counter_add(&fs_info->dirty_metadata_bytes,
 -                                   buf->len,
 -                                   fs_info->dirty_metadata_batch);
 +              percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
 +                                       buf->len,
 +                                       fs_info->dirty_metadata_batch);
  #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
        if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) {
                btrfs_print_leaf(fs_info, buf);
diff --combined fs/btrfs/extent_io.c
index 556484cf5d9338d3383a23de7d484df2b5348d73,eb484a0d1320c7385c66019e7259f32c34869d04..0aff9b278c1990f55feb2693a9fff65d5bf693ed
@@@ -164,8 -164,7 +164,8 @@@ int __init extent_io_init(void
                goto free_state_cache;
  
        btrfs_bioset = bioset_create(BIO_POOL_SIZE,
 -                                   offsetof(struct btrfs_io_bio, bio));
 +                                   offsetof(struct btrfs_io_bio, bio),
 +                                   BIOSET_NEED_BVECS);
        if (!btrfs_bioset)
                goto free_buffer_cache;
  
@@@ -2258,7 -2257,7 +2258,7 @@@ int btrfs_get_io_failure_record(struct 
        return 0;
  }
  
int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
bool btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
                           struct io_failure_record *failrec, int failed_mirror)
  {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
                btrfs_debug(fs_info,
                        "Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
                        num_copies, failrec->this_mirror, failed_mirror);
-               return 0;
+               return false;
        }
  
        /*
                btrfs_debug(fs_info,
                        "Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
                        num_copies, failrec->this_mirror, failed_mirror);
-               return 0;
+               return false;
        }
  
-       return 1;
+       return true;
  }
  
  
@@@ -2373,7 -2372,6 +2373,7 @@@ static int bio_readpage_error(struct bi
        struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
        struct bio *bio;
        int read_mode = 0;
 +      blk_status_t status;
        int ret;
  
        BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
        if (ret)
                return ret;
  
-       ret = btrfs_check_repairable(inode, failed_bio, failrec, failed_mirror);
-       if (!ret) {
+       if (!btrfs_check_repairable(inode, failed_bio, failrec,
+                                   failed_mirror)) {
                free_io_failure(failure_tree, tree, failrec);
                return -EIO;
        }
                                      start - page_offset(page),
                                      (int)phy_offset, failed_bio->bi_end_io,
                                      NULL);
-       if (!bio) {
-               free_io_failure(failure_tree, tree, failrec);
-               return -EIO;
-       }
        bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
  
        btrfs_debug(btrfs_sb(inode->i_sb),
                "Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d",
                read_mode, failrec->this_mirror, failrec->in_validation);
  
 -      ret = tree->ops->submit_bio_hook(tree->private_data, bio, failrec->this_mirror,
 +      status = tree->ops->submit_bio_hook(tree->private_data, bio, failrec->this_mirror,
                                         failrec->bio_flags, 0);
 -      if (ret) {
 +      if (status) {
                free_io_failure(failure_tree, tree, failrec);
                bio_put(bio);
 +              ret = blk_status_to_errno(status);
        }
  
        return ret;
@@@ -2450,12 -2443,12 +2446,13 @@@ void end_extent_writepage(struct page *
   */
  static void end_bio_extent_writepage(struct bio *bio)
  {
 +      int error = blk_status_to_errno(bio->bi_status);
        struct bio_vec *bvec;
        u64 start;
        u64 end;
        int i;
  
+       ASSERT(!bio_flagged(bio, BIO_CLONED));
        bio_for_each_segment_all(bvec, bio, i) {
                struct page *page = bvec->bv_page;
                struct inode *inode = page->mapping->host;
                start = page_offset(page);
                end = start + bvec->bv_offset + bvec->bv_len - 1;
  
 -              end_extent_writepage(page, bio->bi_error, start, end);
 +              end_extent_writepage(page, error, start, end);
                end_page_writeback(page);
        }
  
@@@ -2513,7 -2506,7 +2510,7 @@@ endio_readpage_release_extent(struct ex
  static void end_bio_extent_readpage(struct bio *bio)
  {
        struct bio_vec *bvec;
 -      int uptodate = !bio->bi_error;
 +      int uptodate = !bio->bi_status;
        struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
        struct extent_io_tree *tree, *failure_tree;
        u64 offset = 0;
        int ret;
        int i;
  
+       ASSERT(!bio_flagged(bio, BIO_CLONED));
        bio_for_each_segment_all(bvec, bio, i) {
                struct page *page = bvec->bv_page;
                struct inode *inode = page->mapping->host;
  
                btrfs_debug(fs_info,
                        "end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
 -                      (u64)bio->bi_iter.bi_sector, bio->bi_error,
 +                      (u64)bio->bi_iter.bi_sector, bio->bi_status,
                        io_bio->mirror_num);
                tree = &BTRFS_I(inode)->io_tree;
                failure_tree = &BTRFS_I(inode)->io_failure_tree;
                                ret = bio_readpage_error(bio, offset, page,
                                                         start, end, mirror);
                                if (ret == 0) {
 -                                      uptodate = !bio->bi_error;
 +                                      uptodate = !bio->bi_status;
                                        offset += len;
                                        continue;
                                }
@@@ -2653,7 -2647,7 +2651,7 @@@ readpage_ok
                endio_readpage_release_extent(tree, extent_start, extent_len,
                                              uptodate);
        if (io_bio->end_io)
 -              io_bio->end_io(io_bio, bio->bi_error);
 +              io_bio->end_io(io_bio, blk_status_to_errno(bio->bi_status));
        bio_put(bio);
  }
  
@@@ -2726,7 -2720,7 +2724,7 @@@ struct bio *btrfs_bio_clone_partial(str
  static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
                                       unsigned long bio_flags)
  {
 -      int ret = 0;
 +      blk_status_t ret = 0;
        struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
        struct page *page = bvec->bv_page;
        struct extent_io_tree *tree = bio->bi_private;
                btrfsic_submit_bio(bio);
  
        bio_put(bio);
 -      return ret;
 +      return blk_status_to_errno(ret);
  }
  
  static int merge_bio(struct extent_io_tree *tree, struct page *page,
@@@ -2805,7 -2799,6 +2803,7 @@@ static int submit_extent_page(int op, i
        bio_add_page(bio, page, page_size, offset);
        bio->bi_end_io = end_io_func;
        bio->bi_private = tree;
 +      bio->bi_write_hint = page->mapping->host->i_write_hint;
        bio_set_op_attrs(bio, op, op_flags);
        if (wbc) {
                wbc_init_bio(wbc, bio);
@@@ -3577,9 -3570,9 +3575,9 @@@ lock_extent_buffer_for_io(struct extent
                set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
                spin_unlock(&eb->refs_lock);
                btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
 -              __percpu_counter_add(&fs_info->dirty_metadata_bytes,
 -                                   -eb->len,
 -                                   fs_info->dirty_metadata_batch);
 +              percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
 +                                       -eb->len,
 +                                       fs_info->dirty_metadata_batch);
                ret = 1;
        } else {
                spin_unlock(&eb->refs_lock);
@@@ -3680,6 -3673,7 +3678,7 @@@ static void end_bio_extent_buffer_write
        struct extent_buffer *eb;
        int i, done;
  
+       ASSERT(!bio_flagged(bio, BIO_CLONED));
        bio_for_each_segment_all(bvec, bio, i) {
                struct page *page = bvec->bv_page;
  
                BUG_ON(!eb);
                done = atomic_dec_and_test(&eb->io_pages);
  
 -              if (bio->bi_error ||
 +              if (bio->bi_status ||
                    test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
                        ClearPageUptodate(page);
                        set_btree_ioerr(page);
diff --combined fs/btrfs/extent_io.h
index 3fb8513bf02e27d97eed1f1529aa456b919eeca6,cfdbb9efaaed4fee211881ee9626014bc2615610..4f030912f3efe784cfec3117a4459c07d77518c8
@@@ -92,7 -92,7 +92,7 @@@ struct btrfs_inode
  struct btrfs_io_bio;
  struct io_failure_record;
  
 -typedef       int (extent_submit_bio_hook_t)(void *private_data, struct bio *bio,
 +typedef       blk_status_t (extent_submit_bio_hook_t)(void *private_data, struct bio *bio,
                                       int mirror_num, unsigned long bio_flags,
                                       u64 bio_offset);
  struct extent_io_ops {
@@@ -539,8 -539,8 +539,8 @@@ void btrfs_free_io_failure_record(struc
                u64 end);
  int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
                                struct io_failure_record **failrec_ret);
int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
-                          struct io_failure_record *failrec, int fail_mirror);
bool btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
+                           struct io_failure_record *failrec, int fail_mirror);
  struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
                                    struct io_failure_record *failrec,
                                    struct page *page, int pg_offset, int icsum,
diff --combined fs/btrfs/inode.c
index 06dea7c89bbde5866664294a8d639b13d6dcf7bb,eb495e956d53ef80758b3c4331e98918be5367e7..95c212037095fea727f4a35ea19d9d7e54ad8cfe
@@@ -836,12 -836,13 +836,12 @@@ retry
                                NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
                                PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
                                PAGE_SET_WRITEBACK);
 -              ret = btrfs_submit_compressed_write(inode,
 +              if (btrfs_submit_compressed_write(inode,
                                    async_extent->start,
                                    async_extent->ram_size,
                                    ins.objectid,
                                    ins.offset, async_extent->pages,
 -                                  async_extent->nr_pages);
 -              if (ret) {
 +                                  async_extent->nr_pages)) {
                        struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
                        struct page *p = async_extent->pages[0];
                        const u64 start = async_extent->start;
@@@ -1763,8 -1764,8 +1763,8 @@@ static void btrfs_set_bit_hook(void *pr
                if (btrfs_is_testing(fs_info))
                        return;
  
 -              __percpu_counter_add(&fs_info->delalloc_bytes, len,
 -                                   fs_info->delalloc_batch);
 +              percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
 +                                       fs_info->delalloc_batch);
                spin_lock(&BTRFS_I(inode)->lock);
                BTRFS_I(inode)->delalloc_bytes += len;
                if (*bits & EXTENT_DEFRAG)
@@@ -1838,8 -1839,8 +1838,8 @@@ static void btrfs_clear_bit_hook(void *
                                        &inode->vfs_inode,
                                        state->start, len);
  
 -              __percpu_counter_add(&fs_info->delalloc_bytes, -len,
 -                                   fs_info->delalloc_batch);
 +              percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
 +                                       fs_info->delalloc_batch);
                spin_lock(&inode->lock);
                inode->delalloc_bytes -= len;
                if (do_list && inode->delalloc_bytes == 0 &&
@@@ -1899,12 -1900,12 +1899,12 @@@ int btrfs_merge_bio_hook(struct page *p
   * At IO completion time the cums attached on the ordered extent record
   * are inserted into the btree
   */
 -static int __btrfs_submit_bio_start(void *private_data, struct bio *bio,
 +static blk_status_t __btrfs_submit_bio_start(void *private_data, struct bio *bio,
                                    int mirror_num, unsigned long bio_flags,
                                    u64 bio_offset)
  {
        struct inode *inode = private_data;
 -      int ret = 0;
 +      blk_status_t ret = 0;
  
        ret = btrfs_csum_one_bio(inode, bio, 0, 0);
        BUG_ON(ret); /* -ENOMEM */
   * At IO completion time the cums attached on the ordered extent record
   * are inserted into the btree
   */
 -static int __btrfs_submit_bio_done(void *private_data, struct bio *bio,
 +static blk_status_t __btrfs_submit_bio_done(void *private_data, struct bio *bio,
                          int mirror_num, unsigned long bio_flags,
                          u64 bio_offset)
  {
        struct inode *inode = private_data;
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 -      int ret;
 +      blk_status_t ret;
  
        ret = btrfs_map_bio(fs_info, bio, mirror_num, 1);
        if (ret) {
 -              bio->bi_error = ret;
 +              bio->bi_status = ret;
                bio_endio(bio);
        }
        return ret;
   * extent_io.c submission hook. This does the right thing for csum calculation
   * on write, or reading the csums from the tree before a read
   */
 -static int btrfs_submit_bio_hook(void *private_data, struct bio *bio,
 +static blk_status_t btrfs_submit_bio_hook(void *private_data, struct bio *bio,
                                 int mirror_num, unsigned long bio_flags,
                                 u64 bio_offset)
  {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
 -      int ret = 0;
 +      blk_status_t ret = 0;
        int skip_sum;
        int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
  
@@@ -1992,8 -1993,8 +1992,8 @@@ mapit
        ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
  
  out:
 -      if (ret < 0) {
 -              bio->bi_error = ret;
 +      if (ret) {
 +              bio->bi_status = ret;
                bio_endio(bio);
        }
        return ret;
@@@ -8016,10 -8017,6 +8016,6 @@@ static int dio_read_error(struct inode 
        isector >>= inode->i_sb->s_blocksize_bits;
        bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
                                pgoff, isector, repair_endio, repair_arg);
-       if (!bio) {
-               free_io_failure(failure_tree, io_tree, failrec);
-               return -EIO;
-       }
        bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
  
        btrfs_debug(BTRFS_I(inode)->root->fs_info,
@@@ -8050,7 -8047,7 +8046,7 @@@ static void btrfs_retry_endio_nocsum(st
        struct extent_io_tree *io_tree, *failure_tree;
        int i;
  
 -      if (bio->bi_error)
 +      if (bio->bi_status)
                goto end;
  
        ASSERT(bio->bi_vcnt == 1);
        ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
  
        done->uptodate = 1;
+       ASSERT(!bio_flagged(bio, BIO_CLONED));
        bio_for_each_segment_all(bvec, bio, i)
                clean_io_failure(BTRFS_I(inode)->root->fs_info, failure_tree,
                                 io_tree, done->start, bvec->bv_page,
@@@ -8139,7 -8137,7 +8136,7 @@@ static void btrfs_retry_endio(struct bi
        int ret;
        int i;
  
 -      if (bio->bi_error)
 +      if (bio->bi_status)
                goto end;
  
        uptodate = 1;
        io_tree = &BTRFS_I(inode)->io_tree;
        failure_tree = &BTRFS_I(inode)->io_failure_tree;
  
+       ASSERT(!bio_flagged(bio, BIO_CLONED));
        bio_for_each_segment_all(bvec, bio, i) {
                ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page,
                                             bvec->bv_offset, done->start,
@@@ -8170,8 -8169,8 +8168,8 @@@ end
        bio_put(bio);
  }
  
 -static int __btrfs_subio_endio_read(struct inode *inode,
 -                                  struct btrfs_io_bio *io_bio, int err)
 +static blk_status_t __btrfs_subio_endio_read(struct inode *inode,
 +              struct btrfs_io_bio *io_bio, blk_status_t err)
  {
        struct btrfs_fs_info *fs_info;
        struct bio_vec bvec;
@@@ -8216,7 -8215,7 +8214,7 @@@ try_again
                                io_bio->mirror_num,
                                btrfs_retry_endio, &done);
                if (ret) {
 -                      err = ret;
 +                      err = errno_to_blk_status(ret);
                        goto next;
                }
  
@@@ -8243,8 -8242,8 +8241,8 @@@ next
        return err;
  }
  
 -static int btrfs_subio_endio_read(struct inode *inode,
 -                                struct btrfs_io_bio *io_bio, int err)
 +static blk_status_t btrfs_subio_endio_read(struct inode *inode,
 +              struct btrfs_io_bio *io_bio, blk_status_t err)
  {
        bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
  
@@@ -8264,12 -8263,12 +8262,12 @@@ static void btrfs_endio_direct_read(str
        struct inode *inode = dip->inode;
        struct bio *dio_bio;
        struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
 -      int err = bio->bi_error;
 +      blk_status_t err = bio->bi_status;
  
        if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED) {
                err = btrfs_subio_endio_read(inode, io_bio, err);
                if (!err)
 -                      bio->bi_error = 0;
 +                      bio->bi_status = 0;
        }
  
        unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
  
        kfree(dip);
  
 -      dio_bio->bi_error = bio->bi_error;
 -      dio_end_io(dio_bio, bio->bi_error);
 +      dio_bio->bi_status = bio->bi_status;
 +      dio_end_io(dio_bio);
  
        if (io_bio->end_io)
 -              io_bio->end_io(io_bio, err);
 +              io_bio->end_io(io_bio, blk_status_to_errno(err));
        bio_put(bio);
  }
  
@@@ -8334,21 -8333,21 +8332,21 @@@ static void btrfs_endio_direct_write(st
        struct bio *dio_bio = dip->dio_bio;
  
        __endio_write_update_ordered(dip->inode, dip->logical_offset,
 -                                   dip->bytes, !bio->bi_error);
 +                                   dip->bytes, !bio->bi_status);
  
        kfree(dip);
  
 -      dio_bio->bi_error = bio->bi_error;
 -      dio_end_io(dio_bio, bio->bi_error);
 +      dio_bio->bi_status = bio->bi_status;
 +      dio_end_io(dio_bio);
        bio_put(bio);
  }
  
 -static int __btrfs_submit_bio_start_direct_io(void *private_data,
 +static blk_status_t __btrfs_submit_bio_start_direct_io(void *private_data,
                                    struct bio *bio, int mirror_num,
                                    unsigned long bio_flags, u64 offset)
  {
        struct inode *inode = private_data;
 -      int ret;
 +      blk_status_t ret;
        ret = btrfs_csum_one_bio(inode, bio, offset, 1);
        BUG_ON(ret); /* -ENOMEM */
        return 0;
  static void btrfs_end_dio_bio(struct bio *bio)
  {
        struct btrfs_dio_private *dip = bio->bi_private;
 -      int err = bio->bi_error;
 +      blk_status_t err = bio->bi_status;
  
        if (err)
                btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
        if (dip->errors) {
                bio_io_error(dip->orig_bio);
        } else {
 -              dip->dio_bio->bi_error = 0;
 +              dip->dio_bio->bi_status = 0;
                bio_endio(dip->orig_bio);
        }
  out:
        bio_put(bio);
  }
  
 -static inline int btrfs_lookup_and_bind_dio_csum(struct inode *inode,
 +static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode,
                                                 struct btrfs_dio_private *dip,
                                                 struct bio *bio,
                                                 u64 file_offset)
  {
        struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
        struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio);
 -      int ret;
 +      blk_status_t ret;
  
        /*
         * We load all the csum data we need when we submit
@@@ -8432,7 -8431,7 +8430,7 @@@ static inline int __btrfs_submit_dio_bi
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_dio_private *dip = bio->bi_private;
        bool write = bio_op(bio) == REQ_OP_WRITE;
 -      int ret;
 +      blk_status_t ret;
  
        if (async_submit)
                async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
@@@ -8669,12 -8668,12 +8667,12 @@@ free_ordered
                        unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
                              file_offset + dio_bio->bi_iter.bi_size - 1);
  
 -              dio_bio->bi_error = -EIO;
 +              dio_bio->bi_status = BLK_STS_IOERR;
                /*
                 * Releases and cleans up our dio_bio, no need to bio_put()
                 * nor bio_endio()/bio_io_error() against dio_bio.
                 */
 -              dio_end_io(dio_bio, ret);
 +              dio_end_io(dio_bio);
        }
        if (bio)
                bio_put(bio);
@@@ -8757,9 -8756,6 +8755,9 @@@ static ssize_t btrfs_direct_IO(struct k
                        dio_data.overwrite = 1;
                        inode_unlock(inode);
                        relock = true;
 +              } else if (iocb->ki_flags & IOCB_NOWAIT) {
 +                      ret = -EAGAIN;
 +                      goto out;
                }
                ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
                                                   offset, count);
diff --combined fs/btrfs/raid56.c
index 6f845d219cd6d0c6d8a8dfc26019ca6c7be122af,b89d070036979dd97867dfa0c2e4fbedd070ebd8..208638384cd2abfb1206b2f5927b5763a6330283
@@@ -868,7 -868,7 +868,7 @@@ static void free_raid_bio(struct btrfs_
   * this frees the rbio and runs through all the bios in the
   * bio_list and calls end_io on them
   */
 -static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err)
 +static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
  {
        struct bio *cur = bio_list_get(&rbio->bio_list);
        struct bio *next;
        while (cur) {
                next = cur->bi_next;
                cur->bi_next = NULL;
 -              cur->bi_error = err;
 +              cur->bi_status = err;
                bio_endio(cur);
                cur = next;
        }
  static void raid_write_end_io(struct bio *bio)
  {
        struct btrfs_raid_bio *rbio = bio->bi_private;
 -      int err = bio->bi_error;
 +      blk_status_t err = bio->bi_status;
        int max_errors;
  
        if (err)
        max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
                     0 : rbio->bbio->max_errors;
        if (atomic_read(&rbio->error) > max_errors)
 -              err = -EIO;
 +              err = BLK_STS_IOERR;
  
        rbio_orig_end_io(rbio, err);
  }
@@@ -1089,7 -1089,7 +1089,7 @@@ static int rbio_add_io_page(struct btrf
                 * devices or if they are not contiguous
                 */
                if (last_end == disk_start && stripe->dev->bdev &&
 -                  !last->bi_error &&
 +                  !last->bi_status &&
                    last->bi_bdev == stripe->dev->bdev) {
                        ret = bio_add_page(last, page, PAGE_SIZE, 0);
                        if (ret == PAGE_SIZE)
@@@ -1136,20 -1136,27 +1136,27 @@@ static void validate_rbio_for_rmw(struc
  static void index_rbio_pages(struct btrfs_raid_bio *rbio)
  {
        struct bio *bio;
-       struct bio_vec *bvec;
        u64 start;
        unsigned long stripe_offset;
        unsigned long page_index;
-       int i;
  
        spin_lock_irq(&rbio->bio_list_lock);
        bio_list_for_each(bio, &rbio->bio_list) {
+               struct bio_vec bvec;
+               struct bvec_iter iter;
+               int i = 0;
                start = (u64)bio->bi_iter.bi_sector << 9;
                stripe_offset = start - rbio->bbio->raid_map[0];
                page_index = stripe_offset >> PAGE_SHIFT;
  
-               bio_for_each_segment_all(bvec, bio, i)
-                       rbio->bio_pages[page_index + i] = bvec->bv_page;
+               if (bio_flagged(bio, BIO_CLONED))
+                       bio->bi_iter = btrfs_io_bio(bio)->iter;
+               bio_for_each_segment(bvec, bio, iter) {
+                       rbio->bio_pages[page_index + i] = bvec.bv_page;
+                       i++;
+               }
        }
        spin_unlock_irq(&rbio->bio_list_lock);
  }
@@@ -1423,11 -1430,14 +1430,14 @@@ static int fail_bio_stripe(struct btrfs
   */
  static void set_bio_pages_uptodate(struct bio *bio)
  {
-       struct bio_vec *bvec;
-       int i;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
+       if (bio_flagged(bio, BIO_CLONED))
+               bio->bi_iter = btrfs_io_bio(bio)->iter;
  
-       bio_for_each_segment_all(bvec, bio, i)
-               SetPageUptodate(bvec->bv_page);
+       bio_for_each_segment(bvec, bio, iter)
+               SetPageUptodate(bvec.bv_page);
  }
  
  /*
@@@ -1442,7 -1452,7 +1452,7 @@@ static void raid_rmw_end_io(struct bio 
  {
        struct btrfs_raid_bio *rbio = bio->bi_private;
  
 -      if (bio->bi_error)
 +      if (bio->bi_status)
                fail_bio_stripe(rbio, bio);
        else
                set_bio_pages_uptodate(bio);
@@@ -1985,7 -1995,7 +1995,7 @@@ static void raid_recover_end_io(struct 
         * we only read stripe pages off the disk, set them
         * up to date if there were no errors
         */
 -      if (bio->bi_error)
 +      if (bio->bi_status)
                fail_bio_stripe(rbio, bio);
        else
                set_bio_pages_uptodate(bio);
@@@ -2524,7 -2534,7 +2534,7 @@@ static void raid56_parity_scrub_end_io(
  {
        struct btrfs_raid_bio *rbio = bio->bi_private;
  
 -      if (bio->bi_error)
 +      if (bio->bi_status)
                fail_bio_stripe(rbio, bio);
        else
                set_bio_pages_uptodate(bio);
This page took 0.159853 seconds and 4 git commands to generate.