]> Git Repo - linux.git/commitdiff
Merge uncontroversial parts of branch 'readlink' of git://git.kernel.org/pub/scm...
authorLinus Torvalds <[email protected]>
Sun, 18 Dec 2016 03:16:12 +0000 (19:16 -0800)
committerLinus Torvalds <[email protected]>
Sun, 18 Dec 2016 03:16:12 +0000 (19:16 -0800)
Pull partial readlink cleanups from Miklos Szeredi.

This is the uncontroversial part of the readlink cleanup patch-set that
simplifies the default readlink handling.

Miklos and Al are still discussing the rest of the series.

* git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/vfs:
  vfs: make generic_readlink() static
  vfs: remove ".readlink = generic_readlink" assignments
  vfs: default to generic_readlink()
  vfs: replace calling i_op->readlink with vfs_readlink()
  proc/self: use generic_readlink
  ecryptfs: use vfs_get_link()
  bad_inode: add missing i_op initializers

16 files changed:
1  2 
Documentation/filesystems/vfs.txt
fs/btrfs/inode.c
fs/ceph/inode.c
fs/f2fs/namei.c
fs/fuse/dir.c
fs/libfs.c
fs/namei.c
fs/nfsd/nfs4xdr.c
fs/nfsd/vfs.c
fs/overlayfs/inode.c
fs/proc/inode.c
fs/ubifs/file.c
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_iops.c
include/linux/fs.h
mm/shmem.c

index 3893f4d44cd46e0c7b9579fad48638a0fe359960,038241123ca5a6a4cc32e3cc3eac3c35af111eff..b968084eeac14bbc4f8f51dbf7fc3484f61da496
@@@ -451,9 -451,6 +451,6 @@@ otherwise noted
        exist; this is checked by the VFS.  Unlike plain rename,
        source and target may be of different type.
  
-   readlink: called by the readlink(2) system call. Only required if
-       you want to support reading symbolic links
    get_link: called by the VFS to follow a symbolic link to the
        inode it points to.  Only required if you want to support
        symbolic links.  This method returns the symlink body
        argument.  If request can't be handled without leaving RCU mode,
        have it return ERR_PTR(-ECHILD).
  
+   readlink: this is now just an override for use by readlink(2) for the
+       cases when ->get_link uses nd_jump_link() or object is not in
+       fact a symlink.  Normally filesystems should only implement
+       ->get_link for symlinks and readlink(2) will automatically use
+       that.
    permission: called by the VFS to check for access rights on a POSIX-like
        filesystem.
  
@@@ -948,7 -951,7 +951,7 @@@ struct dentry_operations 
        void (*d_iput)(struct dentry *, struct inode *);
        char *(*d_dname)(struct dentry *, char *, int);
        struct vfsmount *(*d_automount)(struct path *);
 -      int (*d_manage)(struct dentry *, bool);
 +      int (*d_manage)(const struct path *, bool);
        struct dentry *(*d_real)(struct dentry *, const struct inode *,
                                 unsigned int);
  };
diff --combined fs/btrfs/inode.c
index c3b6ffa8e39d272981c0f3fe8134e97a93f0b360,d9c0eb7c16d4fd037fded8efe5c6dde798734142..f2b281ad7af6b9db26b48c6d4f072a850c19d58a
@@@ -30,6 -30,7 +30,6 @@@
  #include <linux/mpage.h>
  #include <linux/swap.h>
  #include <linux/writeback.h>
 -#include <linux/statfs.h>
  #include <linux/compat.h>
  #include <linux/bit_spinlock.h>
  #include <linux/xattr.h>
@@@ -249,12 -250,11 +249,12 @@@ static noinline int cow_file_range_inli
                                          int compress_type,
                                          struct page **compressed_pages)
  {
 +      struct btrfs_fs_info *fs_info = root->fs_info;
        struct btrfs_trans_handle *trans;
        u64 isize = i_size_read(inode);
        u64 actual_end = min(end + 1, isize);
        u64 inline_len = actual_end - start;
 -      u64 aligned_end = ALIGN(end, root->sectorsize);
 +      u64 aligned_end = ALIGN(end, fs_info->sectorsize);
        u64 data_len = inline_len;
        int ret;
        struct btrfs_path *path;
                data_len = compressed_size;
  
        if (start > 0 ||
 -          actual_end > root->sectorsize ||
 -          data_len > BTRFS_MAX_INLINE_DATA_SIZE(root) ||
 +          actual_end > fs_info->sectorsize ||
 +          data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
            (!compressed_size &&
 -          (actual_end & (root->sectorsize - 1)) == 0) ||
 +          (actual_end & (fs_info->sectorsize - 1)) == 0) ||
            end + 1 < isize ||
 -          data_len > root->fs_info->max_inline) {
 +          data_len > fs_info->max_inline) {
                return 1;
        }
  
                btrfs_free_path(path);
                return PTR_ERR(trans);
        }
 -      trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 +      trans->block_rsv = &fs_info->delalloc_block_rsv;
  
        if (compressed_size && compressed_pages)
                extent_item_size = btrfs_file_extent_calc_inline_size(
@@@ -326,7 -326,7 +326,7 @@@ out
         */
        btrfs_qgroup_free_data(inode, 0, PAGE_SIZE);
        btrfs_free_path(path);
 -      btrfs_end_transaction(trans, root);
 +      btrfs_end_transaction(trans);
        return ret;
  }
  
@@@ -373,15 -373,15 +373,15 @@@ static noinline int add_async_extent(st
  
  static inline int inode_need_compress(struct inode *inode)
  {
 -      struct btrfs_root *root = BTRFS_I(inode)->root;
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  
        /* force compress */
 -      if (btrfs_test_opt(root->fs_info, FORCE_COMPRESS))
 +      if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
                return 1;
        /* bad compression ratios */
        if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
                return 0;
 -      if (btrfs_test_opt(root->fs_info, COMPRESS) ||
 +      if (btrfs_test_opt(fs_info, COMPRESS) ||
            BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS ||
            BTRFS_I(inode)->force_compress)
                return 1;
@@@ -411,10 -411,9 +411,10 @@@ static noinline void compress_file_rang
                                        struct async_cow *async_cow,
                                        int *num_added)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        u64 num_bytes;
 -      u64 blocksize = root->sectorsize;
 +      u64 blocksize = fs_info->sectorsize;
        u64 actual_end;
        u64 isize = i_size_read(inode);
        int ret = 0;
        unsigned long max_uncompressed = SZ_128K;
        int i;
        int will_compress;
 -      int compress_type = root->fs_info->compress_type;
 +      int compress_type = fs_info->compress_type;
        int redirty = 0;
  
        /* if this is a small write inside eof, kick off a defrag */
@@@ -626,7 -625,7 +626,7 @@@ cont
                nr_pages_ret = 0;
  
                /* flag the file so we don't compress in the future */
 -              if (!btrfs_test_opt(root->fs_info, FORCE_COMPRESS) &&
 +              if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) &&
                    !(BTRFS_I(inode)->force_compress)) {
                        BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
                }
@@@ -684,7 -683,6 +684,7 @@@ static void free_async_extent_pages(str
  static noinline void submit_compressed_extents(struct inode *inode,
                                              struct async_cow *async_cow)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct async_extent *async_extent;
        u64 alloc_hint = 0;
        struct btrfs_key ins;
@@@ -797,7 -795,7 +797,7 @@@ retry
                em->block_len = ins.offset;
                em->orig_block_len = ins.offset;
                em->ram_bytes = async_extent->ram_size;
 -              em->bdev = root->fs_info->fs_devices->latest_bdev;
 +              em->bdev = fs_info->fs_devices->latest_bdev;
                em->compress_type = async_extent->compress_type;
                set_bit(EXTENT_FLAG_PINNED, &em->flags);
                set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
                                                async_extent->ram_size - 1, 0);
                        goto out_free_reserve;
                }
 -              btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
 +              btrfs_dec_block_group_reservations(fs_info, ins.objectid);
  
                /*
                 * clear dirty, set writeback and unlock the pages.
        }
        return;
  out_free_reserve:
 -      btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
 -      btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
 +      btrfs_dec_block_group_reservations(fs_info, ins.objectid);
 +      btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
  out_free:
        extent_clear_unlock_delalloc(inode, async_extent->start,
                                     async_extent->start +
@@@ -942,14 -940,13 +942,14 @@@ static noinline int cow_file_range(stru
                                   int *page_started, unsigned long *nr_written,
                                   int unlock, struct btrfs_dedupe_hash *hash)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        u64 alloc_hint = 0;
        u64 num_bytes;
        unsigned long ram_size;
        u64 disk_num_bytes;
        u64 cur_alloc_size;
 -      u64 blocksize = root->sectorsize;
 +      u64 blocksize = fs_info->sectorsize;
        struct btrfs_key ins;
        struct extent_map *em;
        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
        }
  
        BUG_ON(disk_num_bytes >
 -             btrfs_super_total_bytes(root->fs_info->super_copy));
 +             btrfs_super_total_bytes(fs_info->super_copy));
  
        alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
        btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
  
                cur_alloc_size = disk_num_bytes;
                ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
 -                                         root->sectorsize, 0, alloc_hint,
 +                                         fs_info->sectorsize, 0, alloc_hint,
                                           &ins, 1, 1);
                if (ret < 0)
                        goto out_unlock;
                em->block_len = ins.offset;
                em->orig_block_len = ins.offset;
                em->ram_bytes = ram_size;
 -              em->bdev = root->fs_info->fs_devices->latest_bdev;
 +              em->bdev = fs_info->fs_devices->latest_bdev;
                set_bit(EXTENT_FLAG_PINNED, &em->flags);
                em->generation = -1;
  
                                goto out_drop_extent_cache;
                }
  
 -              btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
 +              btrfs_dec_block_group_reservations(fs_info, ins.objectid);
  
                if (disk_num_bytes < cur_alloc_size)
                        break;
@@@ -1087,8 -1084,8 +1087,8 @@@ out
  out_drop_extent_cache:
        btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0);
  out_reserve:
 -      btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
 -      btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
 +      btrfs_dec_block_group_reservations(fs_info, ins.objectid);
 +      btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
  out_unlock:
        extent_clear_unlock_delalloc(inode, start, end, delalloc_end,
                                     locked_page,
@@@ -1122,7 -1119,6 +1122,7 @@@ static noinline void async_cow_start(st
   */
  static noinline void async_cow_submit(struct btrfs_work *work)
  {
 +      struct btrfs_fs_info *fs_info;
        struct async_cow *async_cow;
        struct btrfs_root *root;
        unsigned long nr_pages;
        async_cow = container_of(work, struct async_cow, work);
  
        root = async_cow->root;
 +      fs_info = root->fs_info;
        nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >>
                PAGE_SHIFT;
  
        /*
         * atomic_sub_return implies a barrier for waitqueue_active
         */
 -      if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
 +      if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
            5 * SZ_1M &&
 -          waitqueue_active(&root->fs_info->async_submit_wait))
 -              wake_up(&root->fs_info->async_submit_wait);
 +          waitqueue_active(&fs_info->async_submit_wait))
 +              wake_up(&fs_info->async_submit_wait);
  
        if (async_cow->inode)
                submit_compressed_extents(async_cow->inode, async_cow);
@@@ -1159,7 -1154,6 +1159,7 @@@ static int cow_file_range_async(struct 
                                u64 start, u64 end, int *page_started,
                                unsigned long *nr_written)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct async_cow *async_cow;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        unsigned long nr_pages;
                async_cow->start = start;
  
                if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
 -                  !btrfs_test_opt(root->fs_info, FORCE_COMPRESS))
 +                  !btrfs_test_opt(fs_info, FORCE_COMPRESS))
                        cur_end = end;
                else
                        cur_end = min(end, start + SZ_512K - 1);
  
                nr_pages = (cur_end - start + PAGE_SIZE) >>
                        PAGE_SHIFT;
 -              atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
 +              atomic_add(nr_pages, &fs_info->async_delalloc_pages);
  
 -              btrfs_queue_work(root->fs_info->delalloc_workers,
 -                               &async_cow->work);
 +              btrfs_queue_work(fs_info->delalloc_workers, &async_cow->work);
  
 -              if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
 -                      wait_event(root->fs_info->async_submit_wait,
 -                         (atomic_read(&root->fs_info->async_delalloc_pages) <
 -                          limit));
 +              if (atomic_read(&fs_info->async_delalloc_pages) > limit) {
 +                      wait_event(fs_info->async_submit_wait,
 +                                 (atomic_read(&fs_info->async_delalloc_pages) <
 +                                  limit));
                }
  
 -              while (atomic_read(&root->fs_info->async_submit_draining) &&
 -                    atomic_read(&root->fs_info->async_delalloc_pages)) {
 -                      wait_event(root->fs_info->async_submit_wait,
 -                        (atomic_read(&root->fs_info->async_delalloc_pages) ==
 -                         0));
 +              while (atomic_read(&fs_info->async_submit_draining) &&
 +                     atomic_read(&fs_info->async_delalloc_pages)) {
 +                      wait_event(fs_info->async_submit_wait,
 +                                 (atomic_read(&fs_info->async_delalloc_pages) ==
 +                                  0));
                }
  
                *nr_written += nr_pages;
        return 0;
  }
  
 -static noinline int csum_exist_in_range(struct btrfs_root *root,
 +static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
                                        u64 bytenr, u64 num_bytes)
  {
        int ret;
        struct btrfs_ordered_sum *sums;
        LIST_HEAD(list);
  
 -      ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
 +      ret = btrfs_lookup_csums_range(fs_info->csum_root, bytenr,
                                       bytenr + num_bytes - 1, &list, 0);
        if (ret == 0 && list_empty(&list))
                return 0;
@@@ -1248,7 -1243,6 +1248,7 @@@ static noinline int run_delalloc_nocow(
                              u64 start, u64 end, int *page_started, int force,
                              unsigned long *nr_written)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_trans_handle *trans;
        struct extent_buffer *leaf;
                return PTR_ERR(trans);
        }
  
 -      trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 +      trans->block_rsv = &fs_info->delalloc_block_rsv;
  
        cow_start = (u64)-1;
        cur_offset = start;
@@@ -1380,7 -1374,7 +1380,7 @@@ next_slot
                                goto out_check;
                        if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
                                goto out_check;
 -                      if (btrfs_extent_readonly(root, disk_bytenr))
 +                      if (btrfs_extent_readonly(fs_info, disk_bytenr))
                                goto out_check;
                        if (btrfs_cross_ref_exist(trans, root, ino,
                                                  found_key.offset -
                         * this ensure that csum for a given extent are
                         * either valid or do not exist.
                         */
 -                      if (csum_exist_in_range(root, disk_bytenr, num_bytes))
 +                      if (csum_exist_in_range(fs_info, disk_bytenr,
 +                                              num_bytes))
                                goto out_check;
 -                      if (!btrfs_inc_nocow_writers(root->fs_info,
 -                                                   disk_bytenr))
 +                      if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr))
                                goto out_check;
                        nocow = 1;
                } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
                        extent_end = found_key.offset +
                                btrfs_file_extent_inline_len(leaf,
                                                     path->slots[0], fi);
 -                      extent_end = ALIGN(extent_end, root->sectorsize);
 +                      extent_end = ALIGN(extent_end,
 +                                         fs_info->sectorsize);
                } else {
                        BUG_ON(1);
                }
@@@ -1424,7 -1417,8 +1424,7 @@@ out_check
                        if (!nolock && nocow)
                                btrfs_end_write_no_snapshoting(root);
                        if (nocow)
 -                              btrfs_dec_nocow_writers(root->fs_info,
 -                                                      disk_bytenr);
 +                              btrfs_dec_nocow_writers(fs_info, disk_bytenr);
                        goto next_slot;
                }
                if (!nocow) {
                                if (!nolock && nocow)
                                        btrfs_end_write_no_snapshoting(root);
                                if (nocow)
 -                                      btrfs_dec_nocow_writers(root->fs_info,
 +                                      btrfs_dec_nocow_writers(fs_info,
                                                                disk_bytenr);
                                goto error;
                        }
                        em->block_start = disk_bytenr;
                        em->orig_block_len = disk_num_bytes;
                        em->ram_bytes = ram_bytes;
 -                      em->bdev = root->fs_info->fs_devices->latest_bdev;
 +                      em->bdev = fs_info->fs_devices->latest_bdev;
                        em->mod_start = em->start;
                        em->mod_len = em->len;
                        set_bit(EXTENT_FLAG_PINNED, &em->flags);
                ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
                                               num_bytes, num_bytes, type);
                if (nocow)
 -                      btrfs_dec_nocow_writers(root->fs_info, disk_bytenr);
 +                      btrfs_dec_nocow_writers(fs_info, disk_bytenr);
                BUG_ON(ret); /* -ENOMEM */
  
                if (root->root_key.objectid ==
        }
  
  error:
 -      err = btrfs_end_transaction(trans, root);
 +      err = btrfs_end_transaction(trans);
        if (!ret)
                ret = err;
  
@@@ -1699,8 -1693,6 +1699,8 @@@ static void btrfs_merge_extent_hook(str
  static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
                                      struct inode *inode)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 +
        spin_lock(&root->delalloc_lock);
        if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
                list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
                        &BTRFS_I(inode)->runtime_flags);
                root->nr_delalloc_inodes++;
                if (root->nr_delalloc_inodes == 1) {
 -                      spin_lock(&root->fs_info->delalloc_root_lock);
 +                      spin_lock(&fs_info->delalloc_root_lock);
                        BUG_ON(!list_empty(&root->delalloc_root));
                        list_add_tail(&root->delalloc_root,
 -                                    &root->fs_info->delalloc_roots);
 -                      spin_unlock(&root->fs_info->delalloc_root_lock);
 +                                    &fs_info->delalloc_roots);
 +                      spin_unlock(&fs_info->delalloc_root_lock);
                }
        }
        spin_unlock(&root->delalloc_lock);
  static void btrfs_del_delalloc_inode(struct btrfs_root *root,
                                     struct inode *inode)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 +
        spin_lock(&root->delalloc_lock);
        if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
                list_del_init(&BTRFS_I(inode)->delalloc_inodes);
                          &BTRFS_I(inode)->runtime_flags);
                root->nr_delalloc_inodes--;
                if (!root->nr_delalloc_inodes) {
 -                      spin_lock(&root->fs_info->delalloc_root_lock);
 +                      spin_lock(&fs_info->delalloc_root_lock);
                        BUG_ON(list_empty(&root->delalloc_root));
                        list_del_init(&root->delalloc_root);
 -                      spin_unlock(&root->fs_info->delalloc_root_lock);
 +                      spin_unlock(&fs_info->delalloc_root_lock);
                }
        }
        spin_unlock(&root->delalloc_lock);
@@@ -1749,8 -1739,6 +1749,8 @@@ static void btrfs_set_bit_hook(struct i
                               struct extent_state *state, unsigned *bits)
  {
  
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 +
        if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
                WARN_ON(1);
        /*
                }
  
                /* For sanity tests */
 -              if (btrfs_is_testing(root->fs_info))
 +              if (btrfs_is_testing(fs_info))
                        return;
  
 -              __percpu_counter_add(&root->fs_info->delalloc_bytes, len,
 -                                   root->fs_info->delalloc_batch);
 +              __percpu_counter_add(&fs_info->delalloc_bytes, len,
 +                                   fs_info->delalloc_batch);
                spin_lock(&BTRFS_I(inode)->lock);
                BTRFS_I(inode)->delalloc_bytes += len;
                if (*bits & EXTENT_DEFRAG)
@@@ -1795,7 -1783,6 +1795,7 @@@ static void btrfs_clear_bit_hook(struc
                                 struct extent_state *state,
                                 unsigned *bits)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        u64 len = state->end + 1 - state->start;
        u64 num_extents = div64_u64(len + BTRFS_MAX_EXTENT_SIZE -1,
                                    BTRFS_MAX_EXTENT_SIZE);
                 * error.
                 */
                if (*bits & EXTENT_DO_ACCOUNTING &&
 -                  root != root->fs_info->tree_root)
 +                  root != fs_info->tree_root)
                        btrfs_delalloc_release_metadata(inode, len);
  
                /* For sanity tests. */
 -              if (btrfs_is_testing(root->fs_info))
 +              if (btrfs_is_testing(fs_info))
                        return;
  
                if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
                        btrfs_free_reserved_data_space_noquota(inode,
                                        state->start, len);
  
 -              __percpu_counter_add(&root->fs_info->delalloc_bytes, -len,
 -                                   root->fs_info->delalloc_batch);
 +              __percpu_counter_add(&fs_info->delalloc_bytes, -len,
 +                                   fs_info->delalloc_batch);
                spin_lock(&BTRFS_I(inode)->lock);
                BTRFS_I(inode)->delalloc_bytes -= len;
                if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
@@@ -1866,8 -1853,7 +1866,8 @@@ int btrfs_merge_bio_hook(struct page *p
                         size_t size, struct bio *bio,
                         unsigned long bio_flags)
  {
 -      struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
 +      struct inode *inode = page->mapping->host;
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        u64 logical = (u64)bio->bi_iter.bi_sector << 9;
        u64 length = 0;
        u64 map_length;
  
        length = bio->bi_iter.bi_size;
        map_length = length;
 -      ret = btrfs_map_block(root->fs_info, bio_op(bio), logical,
 -                            &map_length, NULL, 0);
 +      ret = btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length,
 +                            NULL, 0);
        if (ret < 0)
                return ret;
        if (map_length < length + size)
@@@ -1899,9 -1885,10 +1899,9 @@@ static int __btrfs_submit_bio_start(str
                                    int mirror_num, unsigned long bio_flags,
                                    u64 bio_offset)
  {
 -      struct btrfs_root *root = BTRFS_I(inode)->root;
        int ret = 0;
  
 -      ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
 +      ret = btrfs_csum_one_bio(inode, bio, 0, 0);
        BUG_ON(ret); /* -ENOMEM */
        return 0;
  }
@@@ -1918,10 -1905,10 +1918,10 @@@ static int __btrfs_submit_bio_done(stru
                          int mirror_num, unsigned long bio_flags,
                          u64 bio_offset)
  {
 -      struct btrfs_root *root = BTRFS_I(inode)->root;
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        int ret;
  
 -      ret = btrfs_map_bio(root, bio, mirror_num, 1);
 +      ret = btrfs_map_bio(fs_info, bio, mirror_num, 1);
        if (ret) {
                bio->bi_error = ret;
                bio_endio(bio);
@@@ -1937,7 -1924,6 +1937,7 @@@ static int btrfs_submit_bio_hook(struc
                          int mirror_num, unsigned long bio_flags,
                          u64 bio_offset)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
        int ret = 0;
                metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
  
        if (bio_op(bio) != REQ_OP_WRITE) {
 -              ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
 +              ret = btrfs_bio_wq_end_io(fs_info, bio, metadata);
                if (ret)
                        goto out;
  
                                                           bio_flags);
                        goto out;
                } else if (!skip_sum) {
 -                      ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
 +                      ret = btrfs_lookup_bio_sums(inode, bio, NULL);
                        if (ret)
                                goto out;
                }
                if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
                        goto mapit;
                /* we're doing a write, do the async checksumming */
 -              ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
 -                                 inode, bio, mirror_num,
 -                                 bio_flags, bio_offset,
 -                                 __btrfs_submit_bio_start,
 -                                 __btrfs_submit_bio_done);
 +              ret = btrfs_wq_submit_bio(fs_info, inode, bio, mirror_num,
 +                                        bio_flags, bio_offset,
 +                                        __btrfs_submit_bio_start,
 +                                        __btrfs_submit_bio_done);
                goto out;
        } else if (!skip_sum) {
 -              ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
 +              ret = btrfs_csum_one_bio(inode, bio, 0, 0);
                if (ret)
                        goto out;
        }
  
  mapit:
 -      ret = btrfs_map_bio(root, bio, mirror_num, 0);
 +      ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
  
  out:
        if (ret < 0) {
@@@ -2103,8 -2090,8 +2103,8 @@@ out_page
  static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
  {
        struct inode *inode = page->mapping->host;
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_writepage_fixup *fixup;
 -      struct btrfs_root *root = BTRFS_I(inode)->root;
  
        /* this page is properly in the ordered list */
        if (TestClearPagePrivate2(page))
        btrfs_init_work(&fixup->work, btrfs_fixup_helper,
                        btrfs_writepage_fixup_worker, NULL, NULL);
        fixup->page = page;
 -      btrfs_queue_work(root->fs_info->fixup_workers, &fixup->work);
 +      btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
        return -EBUSY;
  }
  
@@@ -2193,9 -2180,10 +2193,9 @@@ static int insert_reserved_file_extent(
        ins.objectid = disk_bytenr;
        ins.offset = disk_num_bytes;
        ins.type = BTRFS_EXTENT_ITEM_KEY;
 -      ret = btrfs_alloc_reserved_file_extent(trans, root,
 -                                      root->root_key.objectid,
 -                                      btrfs_ino(inode), file_pos,
 -                                      ram_bytes, &ins);
 +      ret = btrfs_alloc_reserved_file_extent(trans, root->root_key.objectid,
 +                                             btrfs_ino(inode), file_pos,
 +                                             ram_bytes, &ins);
        /*
         * Release the reserved range from inode dirty range map, as it is
         * already moved into delayed_ref_head
@@@ -2305,6 -2293,7 +2305,6 @@@ static noinline int record_one_backref(
                                       void *ctx)
  {
        struct btrfs_file_extent_item *extent;
 -      struct btrfs_fs_info *fs_info;
        struct old_sa_defrag_extent *old = ctx;
        struct new_sa_defrag_extent *new = old->new;
        struct btrfs_path *path = new->path;
        struct sa_defrag_extent_backref *backref;
        struct extent_buffer *leaf;
        struct inode *inode = new->inode;
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        int slot;
        int ret;
        u64 extent_offset;
        key.type = BTRFS_ROOT_ITEM_KEY;
        key.offset = (u64)-1;
  
 -      fs_info = BTRFS_I(inode)->root->fs_info;
        root = btrfs_read_fs_root_no_name(fs_info, &key);
        if (IS_ERR(root)) {
                if (PTR_ERR(root) == -ENOENT)
@@@ -2424,7 -2413,7 +2424,7 @@@ out
  static noinline bool record_extent_backrefs(struct btrfs_path *path,
                                   struct new_sa_defrag_extent *new)
  {
 -      struct btrfs_fs_info *fs_info = BTRFS_I(new->inode)->root->fs_info;
 +      struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
        struct old_sa_defrag_extent *old, *tmp;
        int ret;
  
@@@ -2482,12 -2471,13 +2482,12 @@@ static noinline int relink_extent_backr
        struct btrfs_file_extent_item *item;
        struct btrfs_ordered_extent *ordered;
        struct btrfs_trans_handle *trans;
 -      struct btrfs_fs_info *fs_info;
        struct btrfs_root *root;
        struct btrfs_key key;
        struct extent_buffer *leaf;
        struct old_sa_defrag_extent *old = backref->old;
        struct new_sa_defrag_extent *new = old->new;
 -      struct inode *src_inode = new->inode;
 +      struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
        struct inode *inode;
        struct extent_state *cached = NULL;
        int ret = 0;
        key.type = BTRFS_ROOT_ITEM_KEY;
        key.offset = (u64)-1;
  
 -      fs_info = BTRFS_I(src_inode)->root->fs_info;
        index = srcu_read_lock(&fs_info->subvol_srcu);
  
        root = btrfs_read_fs_root_no_name(fs_info, &key);
@@@ -2652,7 -2643,7 +2652,7 @@@ again
        inode_add_bytes(inode, len);
        btrfs_release_path(path);
  
 -      ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
 +      ret = btrfs_inc_extent_ref(trans, fs_info, new->bytenr,
                        new->disk_len, 0,
                        backref->root_id, backref->inum,
                        new->file_pos); /* start - extent_offset */
  out_free_path:
        btrfs_release_path(path);
        path->leave_spinning = 0;
 -      btrfs_end_transaction(trans, root);
 +      btrfs_end_transaction(trans);
  out_unlock:
        unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
                             &cached, GFP_NOFS);
@@@ -2688,7 -2679,6 +2688,7 @@@ static void free_sa_defrag_extent(struc
  
  static void relink_file_extents(struct new_sa_defrag_extent *new)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
        struct btrfs_path *path;
        struct sa_defrag_extent_backref *backref;
        struct sa_defrag_extent_backref *prev = NULL;
  out:
        free_sa_defrag_extent(new);
  
 -      atomic_dec(&root->fs_info->defrag_running);
 -      wake_up(&root->fs_info->transaction_wait);
 +      atomic_dec(&fs_info->defrag_running);
 +      wake_up(&fs_info->transaction_wait);
  }
  
  static struct new_sa_defrag_extent *
  record_old_file_extents(struct inode *inode,
                        struct btrfs_ordered_extent *ordered)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_path *path;
        struct btrfs_key key;
@@@ -2842,7 -2831,7 +2842,7 @@@ next
        }
  
        btrfs_free_path(path);
 -      atomic_inc(&root->fs_info->defrag_running);
 +      atomic_inc(&fs_info->defrag_running);
  
        return new;
  
@@@ -2853,12 -2842,12 +2853,12 @@@ out_kfree
        return NULL;
  }
  
 -static void btrfs_release_delalloc_bytes(struct btrfs_root *root,
 +static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
                                         u64 start, u64 len)
  {
        struct btrfs_block_group_cache *cache;
  
 -      cache = btrfs_lookup_block_group(root->fs_info, start);
 +      cache = btrfs_lookup_block_group(fs_info, start);
        ASSERT(cache);
  
        spin_lock(&cache->lock);
  static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
  {
        struct inode *inode = ordered_extent->inode;
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_trans_handle *trans = NULL;
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
                        trans = NULL;
                        goto out;
                }
 -              trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 +              trans->block_rsv = &fs_info->delalloc_block_rsv;
                ret = btrfs_update_inode_fallback(trans, root, inode);
                if (ret) /* -ENOMEM or corruption */
                        btrfs_abort_transaction(trans, ret);
                goto out_unlock;
        }
  
 -      trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 +      trans->block_rsv = &fs_info->delalloc_block_rsv;
  
        if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
                compress_type = ordered_extent->compress_type;
                                                ordered_extent->file_offset +
                                                logical_len);
        } else {
 -              BUG_ON(root == root->fs_info->tree_root);
 +              BUG_ON(root == fs_info->tree_root);
                ret = insert_reserved_file_extent(trans, inode,
                                                ordered_extent->file_offset,
                                                ordered_extent->start,
                                                compress_type, 0, 0,
                                                BTRFS_FILE_EXTENT_REG);
                if (!ret)
 -                      btrfs_release_delalloc_bytes(root,
 +                      btrfs_release_delalloc_bytes(fs_info,
                                                     ordered_extent->start,
                                                     ordered_extent->disk_len);
        }
@@@ -3008,10 -2996,10 +3008,10 @@@ out_unlock
                             ordered_extent->file_offset +
                             ordered_extent->len - 1, &cached_state, GFP_NOFS);
  out:
 -      if (root != root->fs_info->tree_root)
 +      if (root != fs_info->tree_root)
                btrfs_delalloc_release_metadata(inode, ordered_extent->len);
        if (trans)
 -              btrfs_end_transaction(trans, root);
 +              btrfs_end_transaction(trans);
  
        if (ret || truncated) {
                u64 start, end;
                if ((ret || !logical_len) &&
                    !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
                    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
 -                      btrfs_free_reserved_extent(root, ordered_extent->start,
 +                      btrfs_free_reserved_extent(fs_info,
 +                                                 ordered_extent->start,
                                                   ordered_extent->disk_len, 1);
        }
  
        if (new) {
                if (ret) {
                        free_sa_defrag_extent(new);
 -                      atomic_dec(&root->fs_info->defrag_running);
 +                      atomic_dec(&fs_info->defrag_running);
                } else {
                        relink_file_extents(new);
                }
@@@ -3076,7 -3063,7 +3076,7 @@@ static int btrfs_writepage_end_io_hook(
                                struct extent_state *state, int uptodate)
  {
        struct inode *inode = page->mapping->host;
 -      struct btrfs_root *root = BTRFS_I(inode)->root;
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_ordered_extent *ordered_extent = NULL;
        struct btrfs_workqueue *wq;
        btrfs_work_func_t func;
                return 0;
  
        if (btrfs_is_free_space_inode(inode)) {
 -              wq = root->fs_info->endio_freespace_worker;
 +              wq = fs_info->endio_freespace_worker;
                func = btrfs_freespace_write_helper;
        } else {
 -              wq = root->fs_info->endio_write_workers;
 +              wq = fs_info->endio_write_workers;
                func = btrfs_endio_write_helper;
        }
  
@@@ -3116,7 -3103,7 +3116,7 @@@ static int __readpage_endio_check(struc
  
        kaddr = kmap_atomic(page);
        csum = btrfs_csum_data(kaddr + pgoff, csum,  len);
 -      btrfs_csum_final(csum, (char *)&csum);
 +      btrfs_csum_final(csum, (u8 *)&csum);
        if (csum != csum_expected)
                goto zeroit;
  
@@@ -3169,7 -3156,7 +3169,7 @@@ static int btrfs_readpage_end_io_hook(s
  
  void btrfs_add_delayed_iput(struct inode *inode)
  {
 -      struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_inode *binode = BTRFS_I(inode);
  
        if (atomic_add_unless(&inode->i_count, -1, 1))
        spin_unlock(&fs_info->delayed_iput_lock);
  }
  
 -void btrfs_run_delayed_iputs(struct btrfs_root *root)
 +void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
  {
 -      struct btrfs_fs_info *fs_info = root->fs_info;
  
        spin_lock(&fs_info->delayed_iput_lock);
        while (!list_empty(&fs_info->delayed_iputs)) {
  void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
                              struct btrfs_root *root)
  {
 +      struct btrfs_fs_info *fs_info = root->fs_info;
        struct btrfs_block_rsv *block_rsv;
        int ret;
  
  
        if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state) &&
            btrfs_root_refs(&root->root_item) > 0) {
 -              ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
 +              ret = btrfs_del_orphan_item(trans, fs_info->tree_root,
                                            root->root_key.objectid);
                if (ret)
                        btrfs_abort_transaction(trans, ret);
  
        if (block_rsv) {
                WARN_ON(block_rsv->size > 0);
 -              btrfs_free_block_rsv(root, block_rsv);
 +              btrfs_free_block_rsv(fs_info, block_rsv);
        }
  }
  
   */
  int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_block_rsv *block_rsv = NULL;
        int reserve = 0;
        int ret;
  
        if (!root->orphan_block_rsv) {
 -              block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
 +              block_rsv = btrfs_alloc_block_rsv(fs_info,
 +                                                BTRFS_BLOCK_RSV_TEMP);
                if (!block_rsv)
                        return -ENOMEM;
        }
        if (!root->orphan_block_rsv) {
                root->orphan_block_rsv = block_rsv;
        } else if (block_rsv) {
 -              btrfs_free_block_rsv(root, block_rsv);
 +              btrfs_free_block_rsv(fs_info, block_rsv);
                block_rsv = NULL;
        }
  
  
        /* insert an orphan item to track subvolume contains orphan files */
        if (insert >= 2) {
 -              ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
 +              ret = btrfs_insert_orphan_item(trans, fs_info->tree_root,
                                               root->root_key.objectid);
                if (ret && ret != -EEXIST) {
                        btrfs_abort_transaction(trans, ret);
@@@ -3397,7 -3382,6 +3397,7 @@@ static int btrfs_orphan_del(struct btrf
   */
  int btrfs_orphan_cleanup(struct btrfs_root *root)
  {
 +      struct btrfs_fs_info *fs_info = root->fs_info;
        struct btrfs_path *path;
        struct extent_buffer *leaf;
        struct btrfs_key key, found_key;
                 */
  
                if (found_key.offset == last_objectid) {
 -                      btrfs_err(root->fs_info,
 -                              "Error removing orphan entry, stopping orphan cleanup");
 +                      btrfs_err(fs_info,
 +                                "Error removing orphan entry, stopping orphan cleanup");
                        ret = -EINVAL;
                        goto out;
                }
                found_key.objectid = found_key.offset;
                found_key.type = BTRFS_INODE_ITEM_KEY;
                found_key.offset = 0;
 -              inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
 +              inode = btrfs_iget(fs_info->sb, &found_key, root, NULL);
                ret = PTR_ERR_OR_ZERO(inode);
                if (ret && ret != -ENOENT)
                        goto out;
  
 -              if (ret == -ENOENT && root == root->fs_info->tree_root) {
 +              if (ret == -ENOENT && root == fs_info->tree_root) {
                        struct btrfs_root *dead_root;
                        struct btrfs_fs_info *fs_info = root->fs_info;
                        int is_dead_root = 0;
                                ret = PTR_ERR(trans);
                                goto out;
                        }
 -                      btrfs_debug(root->fs_info, "auto deleting %Lu",
 -                              found_key.objectid);
 +                      btrfs_debug(fs_info, "auto deleting %Lu",
 +                                  found_key.objectid);
                        ret = btrfs_del_orphan_item(trans, root,
                                                    found_key.objectid);
 -                      btrfs_end_transaction(trans, root);
 +                      btrfs_end_transaction(trans);
                        if (ret)
                                goto out;
                        continue;
                                goto out;
                        }
                        ret = btrfs_orphan_add(trans, inode);
 -                      btrfs_end_transaction(trans, root);
 +                      btrfs_end_transaction(trans);
                        if (ret) {
                                iput(inode);
                                goto out;
        root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
  
        if (root->orphan_block_rsv)
 -              btrfs_block_rsv_release(root, root->orphan_block_rsv,
 +              btrfs_block_rsv_release(fs_info, root->orphan_block_rsv,
                                        (u64)-1);
  
        if (root->orphan_block_rsv ||
            test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
                trans = btrfs_join_transaction(root);
                if (!IS_ERR(trans))
 -                      btrfs_end_transaction(trans, root);
 +                      btrfs_end_transaction(trans);
        }
  
        if (nr_unlink)
 -              btrfs_debug(root->fs_info, "unlinked %d orphans", nr_unlink);
 +              btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink);
        if (nr_truncate)
 -              btrfs_debug(root->fs_info, "truncated %d orphans", nr_truncate);
 +              btrfs_debug(fs_info, "truncated %d orphans", nr_truncate);
  
  out:
        if (ret)
 -              btrfs_err(root->fs_info,
 -                      "could not do orphan cleanup %d", ret);
 +              btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
        btrfs_free_path(path);
        return ret;
  }
@@@ -3669,7 -3654,6 +3669,7 @@@ static noinline int acls_after_inode_it
   */
  static int btrfs_read_locked_inode(struct inode *inode)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_path *path;
        struct extent_buffer *leaf;
        struct btrfs_inode_item *inode_item;
@@@ -3750,7 -3734,7 +3750,7 @@@ cache_index
         * This is required for both inode re-read from disk and delayed inode
         * in delayed_nodes_tree.
         */
 -      if (BTRFS_I(inode)->last_trans == root->fs_info->generation)
 +      if (BTRFS_I(inode)->last_trans == fs_info->generation)
                set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
                        &BTRFS_I(inode)->runtime_flags);
  
@@@ -3816,7 -3800,7 +3816,7 @@@ cache_acl
                path->slots[0] = first_xattr_slot;
                ret = btrfs_load_inode_props(inode, path);
                if (ret)
 -                      btrfs_err(root->fs_info,
 +                      btrfs_err(fs_info,
                                  "error loading props for ino %llu (root %llu): %d",
                                  btrfs_ino(inode),
                                  root->root_key.objectid, ret);
                break;
        case S_IFDIR:
                inode->i_fop = &btrfs_dir_file_operations;
 -              if (root == root->fs_info->tree_root)
 +              if (root == fs_info->tree_root)
                        inode->i_op = &btrfs_dir_ro_inode_operations;
                else
                        inode->i_op = &btrfs_dir_inode_operations;
@@@ -3953,7 -3937,6 +3953,7 @@@ failed
  noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
                                struct btrfs_root *root, struct inode *inode)
  {
 +      struct btrfs_fs_info *fs_info = root->fs_info;
        int ret;
  
        /*
         */
        if (!btrfs_is_free_space_inode(inode)
            && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
 -          && !test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) {
 +          && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
                btrfs_update_root_times(trans, root);
  
                ret = btrfs_delayed_update_inode(trans, root, inode);
@@@ -3999,7 -3982,6 +3999,7 @@@ static int __btrfs_unlink_inode(struct 
                                struct inode *dir, struct inode *inode,
                                const char *name, int name_len)
  {
 +      struct btrfs_fs_info *fs_info = root->fs_info;
        struct btrfs_path *path;
        int ret = 0;
        struct extent_buffer *leaf;
        ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
                                  dir_ino, &index);
        if (ret) {
 -              btrfs_info(root->fs_info,
 +              btrfs_info(fs_info,
                        "failed to delete reference to %.*s, inode %llu parent %llu",
                        name_len, name, ino, dir_ino);
                btrfs_abort_transaction(trans, ret);
                goto err;
        }
  skip_backref:
 -      ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
 +      ret = btrfs_delete_delayed_dir_index(trans, fs_info, dir, index);
        if (ret) {
                btrfs_abort_transaction(trans, ret);
                goto err;
@@@ -4156,8 -4138,8 +4156,8 @@@ static int btrfs_unlink(struct inode *d
        }
  
  out:
 -      btrfs_end_transaction(trans, root);
 -      btrfs_btree_balance_dirty(root);
 +      btrfs_end_transaction(trans);
 +      btrfs_btree_balance_dirty(root->fs_info);
        return ret;
  }
  
@@@ -4166,7 -4148,6 +4166,7 @@@ int btrfs_unlink_subvol(struct btrfs_tr
                        struct inode *dir, u64 objectid,
                        const char *name, int name_len)
  {
 +      struct btrfs_fs_info *fs_info = root->fs_info;
        struct btrfs_path *path;
        struct extent_buffer *leaf;
        struct btrfs_dir_item *di;
        }
        btrfs_release_path(path);
  
 -      ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
 -                               objectid, root->root_key.objectid,
 -                               dir_ino, &index, name, name_len);
 +      ret = btrfs_del_root_ref(trans, fs_info, objectid,
 +                               root->root_key.objectid, dir_ino,
 +                               &index, name, name_len);
        if (ret < 0) {
                if (ret != -ENOENT) {
                        btrfs_abort_transaction(trans, ret);
        }
        btrfs_release_path(path);
  
 -      ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
 +      ret = btrfs_delete_delayed_dir_index(trans, fs_info, dir, index);
        if (ret) {
                btrfs_abort_transaction(trans, ret);
                goto out;
@@@ -4293,8 -4274,8 +4293,8 @@@ static int btrfs_rmdir(struct inode *di
                        BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
        }
  out:
 -      btrfs_end_transaction(trans, root);
 -      btrfs_btree_balance_dirty(root);
 +      btrfs_end_transaction(trans);
 +      btrfs_btree_balance_dirty(root->fs_info);
  
        return err;
  }
@@@ -4303,19 -4284,18 +4303,19 @@@ static int truncate_space_check(struct 
                                struct btrfs_root *root,
                                u64 bytes_deleted)
  {
 +      struct btrfs_fs_info *fs_info = root->fs_info;
        int ret;
  
        /*
         * This is only used to apply pressure to the enospc system, we don't
         * intend to use this reservation at all.
         */
 -      bytes_deleted = btrfs_csum_bytes_to_leaves(root, bytes_deleted);
 -      bytes_deleted *= root->nodesize;
 -      ret = btrfs_block_rsv_add(root, &root->fs_info->trans_block_rsv,
 +      bytes_deleted = btrfs_csum_bytes_to_leaves(fs_info, bytes_deleted);
 +      bytes_deleted *= fs_info->nodesize;
 +      ret = btrfs_block_rsv_add(root, &fs_info->trans_block_rsv,
                                  bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
        if (!ret) {
 -              trace_btrfs_space_reservation(root->fs_info, "transaction",
 +              trace_btrfs_space_reservation(fs_info, "transaction",
                                              trans->transid,
                                              bytes_deleted, 1);
                trans->bytes_reserved += bytes_deleted;
@@@ -4358,7 -4338,7 +4358,7 @@@ static int truncate_inline_extent(struc
  
        btrfs_set_file_extent_ram_bytes(leaf, fi, size);
        size = btrfs_file_extent_calc_inline_size(size);
 -      btrfs_truncate_item(root, path, size, 1);
 +      btrfs_truncate_item(root->fs_info, path, size, 1);
  
        if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
                inode_sub_bytes(inode, item_end + 1 - new_size);
@@@ -4382,7 -4362,6 +4382,7 @@@ int btrfs_truncate_inode_items(struct b
                               struct inode *inode,
                               u64 new_size, u32 min_type)
  {
 +      struct btrfs_fs_info *fs_info = root->fs_info;
        struct btrfs_path *path;
        struct extent_buffer *leaf;
        struct btrfs_file_extent_item *fi;
         * extent just the way it is.
         */
        if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
 -          root == root->fs_info->tree_root)
 +          root == fs_info->tree_root)
                btrfs_drop_extent_cache(inode, ALIGN(new_size,
 -                                      root->sectorsize), (u64)-1, 0);
 +                                      fs_info->sectorsize),
 +                                      (u64)-1, 0);
  
        /*
         * This function is also used to drop the items in the log tree before
@@@ -4453,7 -4431,7 +4453,7 @@@ search_again
         * bytes_deleted is > 0, it will be huge by the time we get here
         */
        if (be_nice && bytes_deleted > SZ_32M) {
 -              if (btrfs_should_end_transaction(trans, root)) {
 +              if (btrfs_should_end_transaction(trans)) {
                        err = -EAGAIN;
                        goto error;
                }
                                        btrfs_file_extent_num_bytes(leaf, fi);
                                extent_num_bytes = ALIGN(new_size -
                                                found_key.offset,
 -                                              root->sectorsize);
 +                                              fs_info->sectorsize);
                                btrfs_set_file_extent_num_bytes(leaf, fi,
                                                         extent_num_bytes);
                                num_dec = (orig_num_bytes -
@@@ -4617,16 -4595,16 +4617,16 @@@ delete
  
                if (found_extent &&
                    (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
 -                   root == root->fs_info->tree_root)) {
 +                   root == fs_info->tree_root)) {
                        btrfs_set_path_blocking(path);
                        bytes_deleted += extent_num_bytes;
 -                      ret = btrfs_free_extent(trans, root, extent_start,
 +                      ret = btrfs_free_extent(trans, fs_info, extent_start,
                                                extent_num_bytes, 0,
                                                btrfs_header_owner(leaf),
                                                ino, extent_offset);
                        BUG_ON(ret);
 -                      if (btrfs_should_throttle_delayed_refs(trans, root))
 -                              btrfs_async_run_delayed_refs(root,
 +                      if (btrfs_should_throttle_delayed_refs(trans, fs_info))
 +                              btrfs_async_run_delayed_refs(fs_info,
                                        trans->delayed_ref_updates * 2,
                                        trans->transid, 0);
                        if (be_nice) {
                                        should_end = 1;
                                }
                                if (btrfs_should_throttle_delayed_refs(trans,
 -                                                                     root)) {
 +                                                                     fs_info))
                                        should_throttle = 1;
 -                              }
                        }
                }
  
                                unsigned long updates = trans->delayed_ref_updates;
                                if (updates) {
                                        trans->delayed_ref_updates = 0;
 -                                      ret = btrfs_run_delayed_refs(trans, root, updates * 2);
 +                                      ret = btrfs_run_delayed_refs(trans,
 +                                                                 fs_info,
 +                                                                 updates * 2);
                                        if (ret && !err)
                                                err = ret;
                                }
@@@ -4698,8 -4675,7 +4698,8 @@@ error
                unsigned long updates = trans->delayed_ref_updates;
                if (updates) {
                        trans->delayed_ref_updates = 0;
 -                      ret = btrfs_run_delayed_refs(trans, root, updates * 2);
 +                      ret = btrfs_run_delayed_refs(trans, fs_info,
 +                                                   updates * 2);
                        if (ret && !err)
                                err = ret;
                }
  int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
                        int front)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct address_space *mapping = inode->i_mapping;
 -      struct btrfs_root *root = BTRFS_I(inode)->root;
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
        struct btrfs_ordered_extent *ordered;
        struct extent_state *cached_state = NULL;
        char *kaddr;
 -      u32 blocksize = root->sectorsize;
 +      u32 blocksize = fs_info->sectorsize;
        pgoff_t index = from >> PAGE_SHIFT;
        unsigned offset = from & (blocksize - 1);
        struct page *page;
@@@ -4831,7 -4807,6 +4831,7 @@@ out
  static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
                             u64 offset, u64 len)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_trans_handle *trans;
        int ret;
  
         * Still need to make sure the inode looks like it's been updated so
         * that any holes get logged if we fsync.
         */
 -      if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) {
 -              BTRFS_I(inode)->last_trans = root->fs_info->generation;
 +      if (btrfs_fs_incompat(fs_info, NO_HOLES)) {
 +              BTRFS_I(inode)->last_trans = fs_info->generation;
                BTRFS_I(inode)->last_sub_trans = root->log_transid;
                BTRFS_I(inode)->last_log_commit = root->last_log_commit;
                return 0;
        ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1);
        if (ret) {
                btrfs_abort_transaction(trans, ret);
 -              btrfs_end_transaction(trans, root);
 +              btrfs_end_transaction(trans);
                return ret;
        }
  
                btrfs_abort_transaction(trans, ret);
        else
                btrfs_update_inode(trans, root, inode);
 -      btrfs_end_transaction(trans, root);
 +      btrfs_end_transaction(trans);
        return ret;
  }
  
   */
  int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
        struct extent_map *em = NULL;
        struct extent_state *cached_state = NULL;
        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
 -      u64 hole_start = ALIGN(oldsize, root->sectorsize);
 -      u64 block_end = ALIGN(size, root->sectorsize);
 +      u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
 +      u64 block_end = ALIGN(size, fs_info->sectorsize);
        u64 last_byte;
        u64 cur_offset;
        u64 hole_size;
                        break;
                }
                last_byte = min(extent_map_end(em), block_end);
 -              last_byte = ALIGN(last_byte , root->sectorsize);
 +              last_byte = ALIGN(last_byte, fs_info->sectorsize);
                if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
                        struct extent_map *hole_em;
                        hole_size = last_byte - cur_offset;
                        hole_em->block_len = 0;
                        hole_em->orig_block_len = 0;
                        hole_em->ram_bytes = hole_size;
 -                      hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
 +                      hole_em->bdev = fs_info->fs_devices->latest_bdev;
                        hole_em->compress_type = BTRFS_COMPRESS_NONE;
 -                      hole_em->generation = root->fs_info->generation;
 +                      hole_em->generation = fs_info->generation;
  
                        while (1) {
                                write_lock(&em_tree->lock);
@@@ -5032,7 -5006,7 +5032,7 @@@ static int btrfs_setsize(struct inode *
                pagecache_isize_extended(inode, oldsize, newsize);
                ret = btrfs_update_inode(trans, root, inode);
                btrfs_end_write_no_snapshoting(root);
 -              btrfs_end_transaction(trans, root);
 +              btrfs_end_transaction(trans);
        } else {
  
                /*
                 * will be consistent.
                 */
                ret = btrfs_orphan_add(trans, inode);
 -              btrfs_end_transaction(trans, root);
 +              btrfs_end_transaction(trans);
                if (ret)
                        return ret;
  
                        err = btrfs_orphan_del(trans, inode);
                        if (err)
                                btrfs_abort_transaction(trans, err);
 -                      btrfs_end_transaction(trans, root);
 +                      btrfs_end_transaction(trans);
                }
        }
  
@@@ -5227,7 -5201,6 +5227,7 @@@ static void evict_inode_truncate_pages(
  
  void btrfs_evict_inode(struct inode *inode)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_trans_handle *trans;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_block_rsv *rsv, *global_rsv;
                return;
        }
  
 -      min_size = btrfs_calc_trunc_metadata_size(root, 1);
 +      min_size = btrfs_calc_trunc_metadata_size(fs_info, 1);
  
        evict_inode_truncate_pages(inode);
  
  
        btrfs_free_io_failure_record(inode, 0, (u64)-1);
  
 -      if (test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) {
 +      if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
                BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
                                 &BTRFS_I(inode)->runtime_flags));
                goto no_delete;
                goto no_delete;
        }
  
 -      rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
 +      rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
        if (!rsv) {
                btrfs_orphan_del(NULL, inode);
                goto no_delete;
        }
        rsv->size = min_size;
        rsv->failfast = 1;
 -      global_rsv = &root->fs_info->global_block_rsv;
 +      global_rsv = &fs_info->global_block_rsv;
  
        btrfs_i_size_write(inode, 0);
  
                 * steal_from_global == 3: abandon all hope!
                 */
                if (steal_from_global > 2) {
 -                      btrfs_warn(root->fs_info,
 -                              "Could not get space for a delete, will truncate on mount %d",
 -                              ret);
 +                      btrfs_warn(fs_info,
 +                                 "Could not get space for a delete, will truncate on mount %d",
 +                                 ret);
                        btrfs_orphan_del(NULL, inode);
 -                      btrfs_free_block_rsv(root, rsv);
 +                      btrfs_free_block_rsv(fs_info, rsv);
                        goto no_delete;
                }
  
                trans = btrfs_join_transaction(root);
                if (IS_ERR(trans)) {
                        btrfs_orphan_del(NULL, inode);
 -                      btrfs_free_block_rsv(root, rsv);
 +                      btrfs_free_block_rsv(fs_info, rsv);
                        goto no_delete;
                }
  
                 * again.
                 */
                if (steal_from_global) {
 -                      if (!btrfs_check_space_for_delayed_refs(trans, root))
 +                      if (!btrfs_check_space_for_delayed_refs(trans, fs_info))
                                ret = btrfs_block_rsv_migrate(global_rsv, rsv,
                                                              min_size, 0);
                        else
                 * again.
                 */
                if (ret) {
 -                      ret = btrfs_commit_transaction(trans, root);
 +                      ret = btrfs_commit_transaction(trans);
                        if (ret) {
                                btrfs_orphan_del(NULL, inode);
 -                              btrfs_free_block_rsv(root, rsv);
 +                              btrfs_free_block_rsv(fs_info, rsv);
                                goto no_delete;
                        }
                        continue;
                if (ret != -ENOSPC && ret != -EAGAIN)
                        break;
  
 -              trans->block_rsv = &root->fs_info->trans_block_rsv;
 -              btrfs_end_transaction(trans, root);
 +              trans->block_rsv = &fs_info->trans_block_rsv;
 +              btrfs_end_transaction(trans);
                trans = NULL;
 -              btrfs_btree_balance_dirty(root);
 +              btrfs_btree_balance_dirty(fs_info);
        }
  
 -      btrfs_free_block_rsv(root, rsv);
 +      btrfs_free_block_rsv(fs_info, rsv);
  
        /*
         * Errors here aren't a big deal, it just means we leave orphan items
                btrfs_orphan_del(NULL, inode);
        }
  
 -      trans->block_rsv = &root->fs_info->trans_block_rsv;
 -      if (!(root == root->fs_info->tree_root ||
 +      trans->block_rsv = &fs_info->trans_block_rsv;
 +      if (!(root == fs_info->tree_root ||
              root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
                btrfs_return_ino(root, btrfs_ino(inode));
  
 -      btrfs_end_transaction(trans, root);
 -      btrfs_btree_balance_dirty(root);
 +      btrfs_end_transaction(trans);
 +      btrfs_btree_balance_dirty(fs_info);
  no_delete:
        btrfs_remove_delayed_node(inode);
        clear_inode(inode);
@@@ -5443,7 -5416,7 +5443,7 @@@ out_err
   * needs to be changed to reflect the root directory of the tree root.  This
   * is kind of like crossing a mount point.
   */
 -static int fixup_tree_root_location(struct btrfs_root *root,
 +static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
                                    struct inode *dir,
                                    struct dentry *dentry,
                                    struct btrfs_key *location,
        key.type = BTRFS_ROOT_REF_KEY;
        key.offset = location->objectid;
  
 -      ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, path,
 -                              0, 0);
 +      ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
        if (ret) {
                if (ret < 0)
                        err = ret;
  
        btrfs_release_path(path);
  
 -      new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
 +      new_root = btrfs_read_fs_root_no_name(fs_info, location);
        if (IS_ERR(new_root)) {
                err = PTR_ERR(new_root);
                goto out;
@@@ -5543,7 -5517,6 +5543,7 @@@ static void inode_tree_add(struct inod
  
  static void inode_tree_del(struct inode *inode)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        int empty = 0;
  
        spin_unlock(&root->inode_lock);
  
        if (empty && btrfs_root_refs(&root->root_item) == 0) {
 -              synchronize_srcu(&root->fs_info->subvol_srcu);
 +              synchronize_srcu(&fs_info->subvol_srcu);
                spin_lock(&root->inode_lock);
                empty = RB_EMPTY_ROOT(&root->inode_tree);
                spin_unlock(&root->inode_lock);
  
  void btrfs_invalidate_inodes(struct btrfs_root *root)
  {
 +      struct btrfs_fs_info *fs_info = root->fs_info;
        struct rb_node *node;
        struct rb_node *prev;
        struct btrfs_inode *entry;
        struct inode *inode;
        u64 objectid = 0;
  
 -      if (!test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
 +      if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
                WARN_ON(btrfs_root_refs(&root->root_item) != 0);
  
        spin_lock(&root->inode_lock);
@@@ -5722,7 -5694,6 +5722,7 @@@ static struct inode *new_simple_dir(str
  
  struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
        struct inode *inode;
        struct btrfs_root *root = BTRFS_I(dir)->root;
        struct btrfs_root *sub_root = root;
  
        BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
  
 -      index = srcu_read_lock(&root->fs_info->subvol_srcu);
 -      ret = fixup_tree_root_location(root, dir, dentry,
 +      index = srcu_read_lock(&fs_info->subvol_srcu);
 +      ret = fixup_tree_root_location(fs_info, dir, dentry,
                                       &location, &sub_root);
        if (ret < 0) {
                if (ret != -ENOENT)
        } else {
                inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
        }
 -      srcu_read_unlock(&root->fs_info->subvol_srcu, index);
 +      srcu_read_unlock(&fs_info->subvol_srcu, index);
  
        if (!IS_ERR(inode) && root != sub_root) {
 -              down_read(&root->fs_info->cleanup_work_sem);
 +              down_read(&fs_info->cleanup_work_sem);
                if (!(inode->i_sb->s_flags & MS_RDONLY))
                        ret = btrfs_orphan_cleanup(sub_root);
 -              up_read(&root->fs_info->cleanup_work_sem);
 +              up_read(&fs_info->cleanup_work_sem);
                if (ret) {
                        iput(inode);
                        inode = ERR_PTR(ret);
@@@ -5821,7 -5792,6 +5821,7 @@@ unsigned char btrfs_filetype_table[] = 
  static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
  {
        struct inode *inode = file_inode(file);
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_item *item;
        struct btrfs_dir_item *di;
        int slot;
        unsigned char d_type;
        int over = 0;
 -      u32 di_cur;
 -      u32 di_total;
 -      u32 di_len;
 -      int key_type = BTRFS_DIR_INDEX_KEY;
        char tmp_name[32];
        char *name_ptr;
        int name_len;
 -      int is_curr = 0;        /* ctx->pos points to the current index? */
 -      bool emitted;
        bool put = false;
 -
 -      /* FIXME, use a real flag for deciding about the key type */
 -      if (root->fs_info->tree_root == root)
 -              key_type = BTRFS_DIR_ITEM_KEY;
 +      struct btrfs_key location;
  
        if (!dir_emit_dots(file, ctx))
                return 0;
  
        path->reada = READA_FORWARD;
  
 -      if (key_type == BTRFS_DIR_INDEX_KEY) {
 -              INIT_LIST_HEAD(&ins_list);
 -              INIT_LIST_HEAD(&del_list);
 -              put = btrfs_readdir_get_delayed_items(inode, &ins_list,
 -                                                    &del_list);
 -      }
 +      INIT_LIST_HEAD(&ins_list);
 +      INIT_LIST_HEAD(&del_list);
 +      put = btrfs_readdir_get_delayed_items(inode, &ins_list, &del_list);
  
 -      key.type = key_type;
 +      key.type = BTRFS_DIR_INDEX_KEY;
        key.offset = ctx->pos;
        key.objectid = btrfs_ino(inode);
  
        if (ret < 0)
                goto err;
  
 -      emitted = false;
        while (1) {
                leaf = path->nodes[0];
                slot = path->slots[0];
  
                if (found_key.objectid != key.objectid)
                        break;
 -              if (found_key.type != key_type)
 +              if (found_key.type != BTRFS_DIR_INDEX_KEY)
                        break;
                if (found_key.offset < ctx->pos)
                        goto next;
 -              if (key_type == BTRFS_DIR_INDEX_KEY &&
 -                  btrfs_should_delete_dir_index(&del_list,
 -                                                found_key.offset))
 +              if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
                        goto next;
  
                ctx->pos = found_key.offset;
 -              is_curr = 1;
  
                di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
 -              di_cur = 0;
 -              di_total = btrfs_item_size(leaf, item);
 -
 -              while (di_cur < di_total) {
 -                      struct btrfs_key location;
 -
 -                      if (verify_dir_item(root, leaf, di))
 -                              break;
 +              if (verify_dir_item(fs_info, leaf, di))
 +                      goto next;
  
 -                      name_len = btrfs_dir_name_len(leaf, di);
 -                      if (name_len <= sizeof(tmp_name)) {
 -                              name_ptr = tmp_name;
 -                      } else {
 -                              name_ptr = kmalloc(name_len, GFP_KERNEL);
 -                              if (!name_ptr) {
 -                                      ret = -ENOMEM;
 -                                      goto err;
 -                              }
 +              name_len = btrfs_dir_name_len(leaf, di);
 +              if (name_len <= sizeof(tmp_name)) {
 +                      name_ptr = tmp_name;
 +              } else {
 +                      name_ptr = kmalloc(name_len, GFP_KERNEL);
 +                      if (!name_ptr) {
 +                              ret = -ENOMEM;
 +                              goto err;
                        }
 -                      read_extent_buffer(leaf, name_ptr,
 -                                         (unsigned long)(di + 1), name_len);
 -
 -                      d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
 -                      btrfs_dir_item_key_to_cpu(leaf, di, &location);
 +              }
 +              read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1),
 +                                 name_len);
  
 +              d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
 +              btrfs_dir_item_key_to_cpu(leaf, di, &location);
  
 -                      /* is this a reference to our own snapshot? If so
 -                       * skip it.
 -                       *
 -                       * In contrast to old kernels, we insert the snapshot's
 -                       * dir item and dir index after it has been created, so
 -                       * we won't find a reference to our own snapshot. We
 -                       * still keep the following code for backward
 -                       * compatibility.
 -                       */
 -                      if (location.type == BTRFS_ROOT_ITEM_KEY &&
 -                          location.objectid == root->root_key.objectid) {
 -                              over = 0;
 -                              goto skip;
 -                      }
 -                      over = !dir_emit(ctx, name_ptr, name_len,
 -                                     location.objectid, d_type);
 +              over = !dir_emit(ctx, name_ptr, name_len, location.objectid,
 +                               d_type);
  
 -skip:
 -                      if (name_ptr != tmp_name)
 -                              kfree(name_ptr);
 +              if (name_ptr != tmp_name)
 +                      kfree(name_ptr);
  
 -                      if (over)
 -                              goto nopos;
 -                      emitted = true;
 -                      di_len = btrfs_dir_name_len(leaf, di) +
 -                               btrfs_dir_data_len(leaf, di) + sizeof(*di);
 -                      di_cur += di_len;
 -                      di = (struct btrfs_dir_item *)((char *)di + di_len);
 -              }
 +              if (over)
 +                      goto nopos;
 +              ctx->pos++;
  next:
                path->slots[0]++;
        }
  
 -      if (key_type == BTRFS_DIR_INDEX_KEY) {
 -              if (is_curr)
 -                      ctx->pos++;
 -              ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list, &emitted);
 -              if (ret)
 -                      goto nopos;
 -      }
 -
 -      /*
 -       * If we haven't emitted any dir entry, we must not touch ctx->pos as
 -       * it was was set to the termination value in previous call. We assume
 -       * that "." and ".." were emitted if we reach this point and set the
 -       * termination value as well for an empty directory.
 -       */
 -      if (ctx->pos > 2 && !emitted)
 +      ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
 +      if (ret)
                goto nopos;
  
 -      /* Reached end of directory/root. Bump pos past the last item. */
 -      ctx->pos++;
 -
        /*
         * Stop new entries from being returned after we return the last
         * entry.
         * last entry requires it because doing so has broken 32bit apps
         * in the past.
         */
 -      if (key_type == BTRFS_DIR_INDEX_KEY) {
 -              if (ctx->pos >= INT_MAX)
 -                      ctx->pos = LLONG_MAX;
 -              else
 -                      ctx->pos = INT_MAX;
 -      }
 +      if (ctx->pos >= INT_MAX)
 +              ctx->pos = LLONG_MAX;
 +      else
 +              ctx->pos = INT_MAX;
  nopos:
        ret = 0;
  err:
@@@ -5975,7 -6006,7 +5975,7 @@@ int btrfs_write_inode(struct inode *ino
                        trans = btrfs_join_transaction(root);
                if (IS_ERR(trans))
                        return PTR_ERR(trans);
 -              ret = btrfs_commit_transaction(trans, root);
 +              ret = btrfs_commit_transaction(trans);
        }
        return ret;
  }
   */
  static int btrfs_dirty_inode(struct inode *inode)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_trans_handle *trans;
        int ret;
        ret = btrfs_update_inode(trans, root, inode);
        if (ret && ret == -ENOSPC) {
                /* whoops, lets try again with the full transaction */
 -              btrfs_end_transaction(trans, root);
 +              btrfs_end_transaction(trans);
                trans = btrfs_start_transaction(root, 1);
                if (IS_ERR(trans))
                        return PTR_ERR(trans);
  
                ret = btrfs_update_inode(trans, root, inode);
        }
 -      btrfs_end_transaction(trans, root);
 +      btrfs_end_transaction(trans);
        if (BTRFS_I(inode)->delayed_node)
 -              btrfs_balance_delayed_items(root);
 +              btrfs_balance_delayed_items(fs_info);
  
        return ret;
  }
@@@ -6138,7 -6168,6 +6138,7 @@@ static struct inode *btrfs_new_inode(st
                                     u64 ref_objectid, u64 objectid,
                                     umode_t mode, u64 *index)
  {
 +      struct btrfs_fs_info *fs_info = root->fs_info;
        struct inode *inode;
        struct btrfs_inode_item *inode_item;
        struct btrfs_key *location;
        if (!path)
                return ERR_PTR(-ENOMEM);
  
 -      inode = new_inode(root->fs_info->sb);
 +      inode = new_inode(fs_info->sb);
        if (!inode) {
                btrfs_free_path(path);
                return ERR_PTR(-ENOMEM);
  
        inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
                                  struct btrfs_inode_item);
 -      memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item,
 +      memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item,
                             sizeof(*inode_item));
        fill_inode_item(trans, path->nodes[0], inode_item, inode);
  
        btrfs_inherit_iflags(inode, dir);
  
        if (S_ISREG(mode)) {
 -              if (btrfs_test_opt(root->fs_info, NODATASUM))
 +              if (btrfs_test_opt(fs_info, NODATASUM))
                        BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
 -              if (btrfs_test_opt(root->fs_info, NODATACOW))
 +              if (btrfs_test_opt(fs_info, NODATACOW))
                        BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
                                BTRFS_INODE_NODATASUM;
        }
  
        ret = btrfs_inode_inherit_props(trans, inode, dir);
        if (ret)
 -              btrfs_err(root->fs_info,
 +              btrfs_err(fs_info,
                          "error inheriting props for ino %llu (root %llu): %d",
                          btrfs_ino(inode), root->root_key.objectid, ret);
  
@@@ -6314,7 -6343,6 +6314,7 @@@ int btrfs_add_link(struct btrfs_trans_h
                   struct inode *parent_inode, struct inode *inode,
                   const char *name, int name_len, int add_backref, u64 index)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        int ret = 0;
        struct btrfs_key key;
        struct btrfs_root *root = BTRFS_I(parent_inode)->root;
        }
  
        if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
 -              ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
 -                                       key.objectid, root->root_key.objectid,
 -                                       parent_ino, index, name, name_len);
 +              ret = btrfs_add_root_ref(trans, fs_info, key.objectid,
 +                                       root->root_key.objectid, parent_ino,
 +                                       index, name, name_len);
        } else if (add_backref) {
                ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
                                             parent_ino, index);
@@@ -6366,9 -6394,9 +6366,9 @@@ fail_dir_item
        if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
                u64 local_index;
                int err;
 -              err = btrfs_del_root_ref(trans, root->fs_info->tree_root,
 -                               key.objectid, root->root_key.objectid,
 -                               parent_ino, &local_index, name, name_len);
 +              err = btrfs_del_root_ref(trans, fs_info, key.objectid,
 +                                       root->root_key.objectid, parent_ino,
 +                                       &local_index, name, name_len);
  
        } else if (add_backref) {
                u64 local_index;
@@@ -6395,7 -6423,6 +6395,7 @@@ static int btrfs_add_nondir(struct btrf
  static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
                        umode_t mode, dev_t rdev)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
        struct btrfs_trans_handle *trans;
        struct btrfs_root *root = BTRFS_I(dir)->root;
        struct inode *inode = NULL;
        }
  
  out_unlock:
 -      btrfs_end_transaction(trans, root);
 -      btrfs_balance_delayed_items(root);
 -      btrfs_btree_balance_dirty(root);
 +      btrfs_end_transaction(trans);
 +      btrfs_balance_delayed_items(fs_info);
 +      btrfs_btree_balance_dirty(fs_info);
        if (drop_inode) {
                inode_dec_link_count(inode);
                iput(inode);
@@@ -6467,7 -6494,6 +6467,7 @@@ out_unlock_inode
  static int btrfs_create(struct inode *dir, struct dentry *dentry,
                        umode_t mode, bool excl)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
        struct btrfs_trans_handle *trans;
        struct btrfs_root *root = BTRFS_I(dir)->root;
        struct inode *inode = NULL;
        d_instantiate(dentry, inode);
  
  out_unlock:
 -      btrfs_end_transaction(trans, root);
 +      btrfs_end_transaction(trans);
        if (err && drop_inode_on_err) {
                inode_dec_link_count(inode);
                iput(inode);
        }
 -      btrfs_balance_delayed_items(root);
 -      btrfs_btree_balance_dirty(root);
 +      btrfs_balance_delayed_items(fs_info);
 +      btrfs_btree_balance_dirty(fs_info);
        return err;
  
  out_unlock_inode:
@@@ -6545,7 -6571,6 +6545,7 @@@ static int btrfs_link(struct dentry *ol
        struct btrfs_trans_handle *trans = NULL;
        struct btrfs_root *root = BTRFS_I(dir)->root;
        struct inode *inode = d_inode(old_dentry);
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        u64 index;
        int err;
        int drop_inode = 0;
                btrfs_log_new_name(trans, inode, NULL, parent);
        }
  
 -      btrfs_balance_delayed_items(root);
 +      btrfs_balance_delayed_items(fs_info);
  fail:
        if (trans)
 -              btrfs_end_transaction(trans, root);
 +              btrfs_end_transaction(trans);
        if (drop_inode) {
                inode_dec_link_count(inode);
                iput(inode);
        }
 -      btrfs_btree_balance_dirty(root);
 +      btrfs_btree_balance_dirty(fs_info);
        return err;
  }
  
  static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
        struct inode *inode = NULL;
        struct btrfs_trans_handle *trans;
        struct btrfs_root *root = BTRFS_I(dir)->root;
        drop_on_err = 0;
  
  out_fail:
 -      btrfs_end_transaction(trans, root);
 +      btrfs_end_transaction(trans);
        if (drop_on_err) {
                inode_dec_link_count(inode);
                iput(inode);
        }
 -      btrfs_balance_delayed_items(root);
 -      btrfs_btree_balance_dirty(root);
 +      btrfs_balance_delayed_items(fs_info);
 +      btrfs_btree_balance_dirty(fs_info);
        return err;
  
  out_fail_inode:
@@@ -6796,7 -6820,6 +6796,7 @@@ struct extent_map *btrfs_get_extent(str
                                    size_t pg_offset, u64 start, u64 len,
                                    int create)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        int ret;
        int err = 0;
        u64 extent_start = 0;
@@@ -6818,7 -6841,7 +6818,7 @@@ again
        read_lock(&em_tree->lock);
        em = lookup_extent_mapping(em_tree, start, len);
        if (em)
 -              em->bdev = root->fs_info->fs_devices->latest_bdev;
 +              em->bdev = fs_info->fs_devices->latest_bdev;
        read_unlock(&em_tree->lock);
  
        if (em) {
                err = -ENOMEM;
                goto out;
        }
 -      em->bdev = root->fs_info->fs_devices->latest_bdev;
 +      em->bdev = fs_info->fs_devices->latest_bdev;
        em->start = EXTENT_MAP_HOLE;
        em->orig_start = EXTENT_MAP_HOLE;
        em->len = (u64)-1;
        } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
                size_t size;
                size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
 -              extent_end = ALIGN(extent_start + size, root->sectorsize);
 +              extent_end = ALIGN(extent_start + size,
 +                                 fs_info->sectorsize);
        }
  next:
        if (start >= extent_end) {
                copy_size = min_t(u64, PAGE_SIZE - pg_offset,
                                  size - extent_offset);
                em->start = extent_start + extent_offset;
 -              em->len = ALIGN(copy_size, root->sectorsize);
 +              em->len = ALIGN(copy_size, fs_info->sectorsize);
                em->orig_block_len = em->len;
                em->orig_start = em->start;
                ptr = btrfs_file_extent_inline_start(item) + extent_offset;
@@@ -7002,7 -7024,7 +7002,7 @@@ not_found_em
  insert:
        btrfs_release_path(path);
        if (em->start > start || extent_map_end(em) <= start) {
 -              btrfs_err(root->fs_info,
 +              btrfs_err(fs_info,
                          "bad extent! em: [%llu %llu] passed [%llu %llu]",
                          em->start, em->len, start, len);
                err = -EIO;
                 * extent causing the -EEXIST.
                 */
                if (existing->start == em->start &&
 -                  extent_map_end(existing) == extent_map_end(em) &&
 +                  extent_map_end(existing) >= extent_map_end(em) &&
                    em->block_start == existing->block_start) {
                        /*
 -                       * these two extents are the same, it happens
 -                       * with inlines especially
 +                       * The existing extent map already encompasses the
 +                       * entire extent map we tried to add.
                         */
                        free_extent_map(em);
                        em = existing;
@@@ -7063,7 -7085,7 +7063,7 @@@ out
  
        btrfs_free_path(path);
        if (trans) {
 -              ret = btrfs_end_transaction(trans, root);
 +              ret = btrfs_end_transaction(trans);
                if (!err)
                        err = ret;
        }
@@@ -7242,7 -7264,6 +7242,7 @@@ static struct extent_map *btrfs_create_
  static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
                                                  u64 start, u64 len)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct extent_map *em;
        struct btrfs_key ins;
        int ret;
  
        alloc_hint = get_extent_allocation_hint(inode, start, len);
 -      ret = btrfs_reserve_extent(root, len, len, root->sectorsize, 0,
 -                                 alloc_hint, &ins, 1, 1);
 +      ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
 +                                 0, alloc_hint, &ins, 1, 1);
        if (ret)
                return ERR_PTR(ret);
  
        em = btrfs_create_dio_extent(inode, start, ins.offset, start,
                                     ins.objectid, ins.offset, ins.offset,
                                     ins.offset, 0);
 -      btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
 +      btrfs_dec_block_group_reservations(fs_info, ins.objectid);
        if (IS_ERR(em))
 -              btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
 +              btrfs_free_reserved_extent(fs_info, ins.objectid,
 +                                         ins.offset, 1);
  
        return em;
  }
@@@ -7274,7 -7294,6 +7274,7 @@@ noinline int can_nocow_extent(struct in
                              u64 *orig_start, u64 *orig_block_len,
                              u64 *ram_bytes)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_trans_handle *trans;
        struct btrfs_path *path;
        int ret;
                *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
        }
  
 -      if (btrfs_extent_readonly(root, disk_bytenr))
 +      if (btrfs_extent_readonly(fs_info, disk_bytenr))
                goto out;
  
        num_bytes = min(offset + *len, extent_end) - offset;
        if (!nocow && found_type == BTRFS_FILE_EXTENT_PREALLOC) {
                u64 range_end;
  
 -              range_end = round_up(offset + num_bytes, root->sectorsize) - 1;
 +              range_end = round_up(offset + num_bytes,
 +                                   root->fs_info->sectorsize) - 1;
                ret = test_range_bit(io_tree, offset, range_end,
                                     EXTENT_DELALLOC, 0, NULL);
                if (ret) {
  
        ret = btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
                                    key.offset - backref_offset, disk_bytenr);
 -      btrfs_end_transaction(trans, root);
 +      btrfs_end_transaction(trans);
        if (ret) {
                ret = 0;
                goto out;
         */
        disk_bytenr += backref_offset;
        disk_bytenr += offset - key.offset;
 -      if (csum_exist_in_range(root, disk_bytenr, num_bytes))
 -                              goto out;
 +      if (csum_exist_in_range(fs_info, disk_bytenr, num_bytes))
 +              goto out;
        /*
         * all of the above have passed, it is safe to overwrite this extent
         * without cow
@@@ -7635,8 -7653,8 +7635,8 @@@ static void adjust_dio_outstanding_exte
  static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
                                   struct buffer_head *bh_result, int create)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct extent_map *em;
 -      struct btrfs_root *root = BTRFS_I(inode)->root;
        struct extent_state *cached_state = NULL;
        struct btrfs_dio_data *dio_data = NULL;
        u64 start = iblock << inode->i_blkbits;
        if (create)
                unlock_bits |= EXTENT_DIRTY;
        else
 -              len = min_t(u64, len, root->sectorsize);
 +              len = min_t(u64, len, fs_info->sectorsize);
  
        lockstart = start;
        lockend = start + len - 1;
  
                if (can_nocow_extent(inode, start, &len, &orig_start,
                                     &orig_block_len, &ram_bytes) == 1 &&
 -                  btrfs_inc_nocow_writers(root->fs_info, block_start)) {
 +                  btrfs_inc_nocow_writers(fs_info, block_start)) {
                        struct extent_map *em2;
  
                        em2 = btrfs_create_dio_extent(inode, start, len,
                                                      orig_start, block_start,
                                                      len, orig_block_len,
                                                      ram_bytes, type);
 -                      btrfs_dec_nocow_writers(root->fs_info, block_start);
 +                      btrfs_dec_nocow_writers(fs_info, block_start);
                        if (type == BTRFS_ORDERED_PREALLOC) {
                                free_extent_map(em);
                                em = em2;
  static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio,
                                        int mirror_num)
  {
 -      struct btrfs_root *root = BTRFS_I(inode)->root;
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        int ret;
  
        BUG_ON(bio_op(bio) == REQ_OP_WRITE);
  
        bio_get(bio);
  
 -      ret = btrfs_bio_wq_end_io(root->fs_info, bio,
 -                                BTRFS_WQ_ENDIO_DIO_REPAIR);
 +      ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DIO_REPAIR);
        if (ret)
                goto err;
  
 -      ret = btrfs_map_bio(root, bio, mirror_num, 0);
 +      ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
  err:
        bio_put(bio);
        return ret;
@@@ -7898,7 -7917,7 +7898,7 @@@ static int dio_read_error(struct inode 
        struct io_failure_record *failrec;
        struct bio *bio;
        int isector;
 -      int read_mode;
 +      int read_mode = 0;
        int ret;
  
        BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
  
        if ((failed_bio->bi_vcnt > 1)
                || (failed_bio->bi_io_vec->bv_len
 -                      > BTRFS_I(inode)->root->sectorsize))
 -              read_mode = READ_SYNC | REQ_FAILFAST_DEV;
 -      else
 -              read_mode = READ_SYNC;
 +                      > btrfs_inode_sectorsize(inode)))
 +              read_mode |= REQ_FAILFAST_DEV;
  
        isector = start - btrfs_io_bio(failed_bio)->logical;
        isector >>= inode->i_sb->s_blocksize_bits;
@@@ -7961,7 -7982,7 +7961,7 @@@ static void btrfs_retry_endio_nocsum(st
  
        ASSERT(bio->bi_vcnt == 1);
        inode = bio->bi_io_vec->bv_page->mapping->host;
 -      ASSERT(bio->bi_io_vec->bv_len == BTRFS_I(inode)->root->sectorsize);
 +      ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
  
        done->uptodate = 1;
        bio_for_each_segment_all(bvec, bio, i)
@@@ -7985,7 -8006,7 +7985,7 @@@ static int __btrfs_correct_data_nocsum(
        int ret;
  
        fs_info = BTRFS_I(inode)->root->fs_info;
 -      sectorsize = BTRFS_I(inode)->root->sectorsize;
 +      sectorsize = fs_info->sectorsize;
  
        start = io_bio->logical;
        done.inode = inode;
@@@ -8044,7 -8065,7 +8044,7 @@@ static void btrfs_retry_endio(struct bi
  
        ASSERT(bio->bi_vcnt == 1);
        inode = bio->bi_io_vec->bv_page->mapping->host;
 -      ASSERT(bio->bi_io_vec->bv_len == BTRFS_I(inode)->root->sectorsize);
 +      ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
  
        bio_for_each_segment_all(bvec, bio, i) {
                ret = __readpage_endio_check(done->inode, io_bio, i,
@@@ -8079,7 -8100,7 +8079,7 @@@ static int __btrfs_subio_endio_read(str
        int ret;
  
        fs_info = BTRFS_I(inode)->root->fs_info;
 -      sectorsize = BTRFS_I(inode)->root->sectorsize;
 +      sectorsize = fs_info->sectorsize;
  
        err = 0;
        start = io_bio->logical;
@@@ -8176,7 -8197,7 +8176,7 @@@ static void btrfs_endio_direct_write_up
                                                    const u64 bytes,
                                                    const int uptodate)
  {
 -      struct btrfs_root *root = BTRFS_I(inode)->root;
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_ordered_extent *ordered = NULL;
        u64 ordered_offset = offset;
        u64 ordered_bytes = bytes;
@@@ -8192,7 -8213,8 +8192,7 @@@ again
  
        btrfs_init_work(&ordered->work, btrfs_endio_write_helper,
                        finish_ordered_fn, NULL, NULL);
 -      btrfs_queue_work(root->fs_info->endio_write_workers,
 -                       &ordered->work);
 +      btrfs_queue_work(fs_info->endio_write_workers, &ordered->work);
  out_test:
        /*
         * our bio might span multiple ordered extents.  If we haven't
@@@ -8227,7 -8249,8 +8227,7 @@@ static int __btrfs_submit_bio_start_dir
                                    unsigned long bio_flags, u64 offset)
  {
        int ret;
 -      struct btrfs_root *root = BTRFS_I(inode)->root;
 -      ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
 +      ret = btrfs_csum_one_bio(inode, bio, offset, 1);
        BUG_ON(ret); /* -ENOMEM */
        return 0;
  }
@@@ -8281,7 -8304,8 +8281,7 @@@ static struct bio *btrfs_dio_bio_alloc(
        return bio;
  }
  
 -static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root,
 -                                               struct inode *inode,
 +static inline int btrfs_lookup_and_bind_dio_csum(struct inode *inode,
                                                 struct btrfs_dio_private *dip,
                                                 struct bio *bio,
                                                 u64 file_offset)
         * contention.
         */
        if (dip->logical_offset == file_offset) {
 -              ret = btrfs_lookup_bio_sums_dio(root, inode, dip->orig_bio,
 +              ret = btrfs_lookup_bio_sums_dio(inode, dip->orig_bio,
                                                file_offset);
                if (ret)
                        return ret;
@@@ -8316,9 -8340,9 +8316,9 @@@ static inline int __btrfs_submit_dio_bi
                                         u64 file_offset, int skip_sum,
                                         int async_submit)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_dio_private *dip = bio->bi_private;
        bool write = bio_op(bio) == REQ_OP_WRITE;
 -      struct btrfs_root *root = BTRFS_I(inode)->root;
        int ret;
  
        if (async_submit)
        bio_get(bio);
  
        if (!write) {
 -              ret = btrfs_bio_wq_end_io(root->fs_info, bio,
 -                              BTRFS_WQ_ENDIO_DATA);
 +              ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
                if (ret)
                        goto err;
        }
                goto map;
  
        if (write && async_submit) {
 -              ret = btrfs_wq_submit_bio(root->fs_info,
 -                                 inode, bio, 0, 0, file_offset,
 -                                 __btrfs_submit_bio_start_direct_io,
 -                                 __btrfs_submit_bio_done);
 +              ret = btrfs_wq_submit_bio(fs_info, inode, bio, 0, 0,
 +                                        file_offset,
 +                                        __btrfs_submit_bio_start_direct_io,
 +                                        __btrfs_submit_bio_done);
                goto err;
        } else if (write) {
                /*
                 * If we aren't doing async submit, calculate the csum of the
                 * bio now.
                 */
 -              ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
 +              ret = btrfs_csum_one_bio(inode, bio, file_offset, 1);
                if (ret)
                        goto err;
        } else {
 -              ret = btrfs_lookup_and_bind_dio_csum(root, inode, dip, bio,
 +              ret = btrfs_lookup_and_bind_dio_csum(inode, dip, bio,
                                                     file_offset);
                if (ret)
                        goto err;
        }
  map:
 -      ret = btrfs_map_bio(root, bio, 0, async_submit);
 +      ret = btrfs_map_bio(fs_info, bio, 0, async_submit);
  err:
        bio_put(bio);
        return ret;
@@@ -8366,24 -8391,23 +8366,24 @@@ static int btrfs_submit_direct_hook(str
                                    int skip_sum)
  {
        struct inode *inode = dip->inode;
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct bio *bio;
        struct bio *orig_bio = dip->orig_bio;
 -      struct bio_vec *bvec = orig_bio->bi_io_vec;
 +      struct bio_vec *bvec;
        u64 start_sector = orig_bio->bi_iter.bi_sector;
        u64 file_offset = dip->logical_offset;
        u64 submit_len = 0;
        u64 map_length;
 -      u32 blocksize = root->sectorsize;
 +      u32 blocksize = fs_info->sectorsize;
        int async_submit = 0;
        int nr_sectors;
        int ret;
 -      int i;
 +      int i, j;
  
        map_length = orig_bio->bi_iter.bi_size;
 -      ret = btrfs_map_block(root->fs_info, bio_op(orig_bio),
 -                            start_sector << 9, &map_length, NULL, 0);
 +      ret = btrfs_map_block(fs_info, btrfs_op(orig_bio), start_sector << 9,
 +                            &map_length, NULL, 0);
        if (ret)
                return -EIO;
  
        if (!bio)
                return -ENOMEM;
  
 -      bio_set_op_attrs(bio, bio_op(orig_bio), bio_flags(orig_bio));
 +      bio->bi_opf = orig_bio->bi_opf;
        bio->bi_private = dip;
        bio->bi_end_io = btrfs_end_dio_bio;
        btrfs_io_bio(bio)->logical = file_offset;
        atomic_inc(&dip->pending_bios);
  
 -      while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
 -              nr_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info, bvec->bv_len);
 +      bio_for_each_segment_all(bvec, orig_bio, j) {
 +              nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec->bv_len);
                i = 0;
  next_block:
                if (unlikely(map_length < submit_len + blocksize ||
                                                  start_sector, GFP_NOFS);
                        if (!bio)
                                goto out_err;
 -                      bio_set_op_attrs(bio, bio_op(orig_bio),
 -                                       bio_flags(orig_bio));
 +                      bio->bi_opf = orig_bio->bi_opf;
                        bio->bi_private = dip;
                        bio->bi_end_io = btrfs_end_dio_bio;
                        btrfs_io_bio(bio)->logical = file_offset;
  
                        map_length = orig_bio->bi_iter.bi_size;
 -                      ret = btrfs_map_block(root->fs_info, bio_op(orig_bio),
 +                      ret = btrfs_map_block(fs_info, btrfs_op(orig_bio),
                                              start_sector << 9,
                                              &map_length, NULL, 0);
                        if (ret) {
                                i++;
                                goto next_block;
                        }
 -                      bvec++;
                }
        }
  
@@@ -8593,13 -8619,12 +8593,13 @@@ free_ordered
        kfree(dip);
  }
  
 -static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb,
 -                      const struct iov_iter *iter, loff_t offset)
 +static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
 +                             struct kiocb *iocb,
 +                             const struct iov_iter *iter, loff_t offset)
  {
        int seg;
        int i;
 -      unsigned blocksize_mask = root->sectorsize - 1;
 +      unsigned int blocksize_mask = fs_info->sectorsize - 1;
        ssize_t retval = -EINVAL;
  
        if (offset & blocksize_mask)
@@@ -8631,7 -8656,7 +8631,7 @@@ static ssize_t btrfs_direct_IO(struct k
  {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
 -      struct btrfs_root *root = BTRFS_I(inode)->root;
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_dio_data dio_data = { 0 };
        loff_t offset = iocb->ki_pos;
        size_t count = 0;
        bool relock = false;
        ssize_t ret;
  
 -      if (check_direct_IO(BTRFS_I(inode)->root, iocb, iter, offset))
 +      if (check_direct_IO(fs_info, iocb, iter, offset))
                return 0;
  
        inode_dio_begin(inode);
                 * do the accounting properly if we go over the number we
                 * originally calculated.  Abuse current->journal_info for this.
                 */
 -              dio_data.reserve = round_up(count, root->sectorsize);
 +              dio_data.reserve = round_up(count,
 +                                          fs_info->sectorsize);
                dio_data.unsubmitted_oe_range_start = (u64)offset;
                dio_data.unsubmitted_oe_range_end = (u64)offset;
                current->journal_info = &dio_data;
        }
  
        ret = __blockdev_direct_IO(iocb, inode,
 -                                 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
 +                                 fs_info->fs_devices->latest_bdev,
                                   iter, btrfs_get_blocks_direct, NULL,
                                   btrfs_submit_direct, flags);
        if (iov_iter_rw(iter) == WRITE) {
@@@ -8952,7 -8976,7 +8952,7 @@@ int btrfs_page_mkwrite(struct vm_area_s
  {
        struct page *page = vmf->page;
        struct inode *inode = file_inode(vma->vm_file);
 -      struct btrfs_root *root = BTRFS_I(inode)->root;
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
        struct btrfs_ordered_extent *ordered;
        struct extent_state *cached_state = NULL;
@@@ -9027,8 -9051,7 +9027,8 @@@ again
        }
  
        if (page->index == ((size - 1) >> PAGE_SHIFT)) {
 -              reserved_space = round_up(size - page_start, root->sectorsize);
 +              reserved_space = round_up(size - page_start,
 +                                        fs_info->sectorsize);
                if (reserved_space < PAGE_SIZE) {
                        end = page_start + reserved_space - 1;
                        spin_lock(&BTRFS_I(inode)->lock);
        set_page_dirty(page);
        SetPageUptodate(page);
  
 -      BTRFS_I(inode)->last_trans = root->fs_info->generation;
 +      BTRFS_I(inode)->last_trans = fs_info->generation;
        BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
        BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
  
@@@ -9098,14 -9121,13 +9098,14 @@@ out_noreserve
  
  static int btrfs_truncate(struct inode *inode)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_block_rsv *rsv;
        int ret = 0;
        int err = 0;
        struct btrfs_trans_handle *trans;
 -      u64 mask = root->sectorsize - 1;
 -      u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
 +      u64 mask = fs_info->sectorsize - 1;
 +      u64 min_size = btrfs_calc_trunc_metadata_size(fs_info, 1);
  
        ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
                                       (u64)-1);
         * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
         * updating the inode.
         */
 -      rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
 +      rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
        if (!rsv)
                return -ENOMEM;
        rsv->size = min_size;
        }
  
        /* Migrate the slack space for the truncate to our reserve */
 -      ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
 +      ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
                                      min_size, 0);
        BUG_ON(ret);
  
                        break;
                }
  
 -              trans->block_rsv = &root->fs_info->trans_block_rsv;
 +              trans->block_rsv = &fs_info->trans_block_rsv;
                ret = btrfs_update_inode(trans, root, inode);
                if (ret) {
                        err = ret;
                        break;
                }
  
 -              btrfs_end_transaction(trans, root);
 -              btrfs_btree_balance_dirty(root);
 +              btrfs_end_transaction(trans);
 +              btrfs_btree_balance_dirty(fs_info);
  
                trans = btrfs_start_transaction(root, 2);
                if (IS_ERR(trans)) {
                        break;
                }
  
 -              ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
 +              ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
                                              rsv, min_size, 0);
                BUG_ON(ret);    /* shouldn't happen */
                trans->block_rsv = rsv;
        }
  
        if (trans) {
 -              trans->block_rsv = &root->fs_info->trans_block_rsv;
 +              trans->block_rsv = &fs_info->trans_block_rsv;
                ret = btrfs_update_inode(trans, root, inode);
                if (ret && !err)
                        err = ret;
  
 -              ret = btrfs_end_transaction(trans, root);
 -              btrfs_btree_balance_dirty(root);
 +              ret = btrfs_end_transaction(trans);
 +              btrfs_btree_balance_dirty(fs_info);
        }
  out:
 -      btrfs_free_block_rsv(root, rsv);
 +      btrfs_free_block_rsv(fs_info, rsv);
  
        if (ret && !err)
                err = ret;
@@@ -9344,7 -9366,6 +9344,7 @@@ static void btrfs_i_callback(struct rcu
  
  void btrfs_destroy_inode(struct inode *inode)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_ordered_extent *ordered;
        struct btrfs_root *root = BTRFS_I(inode)->root;
  
  
        if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
                     &BTRFS_I(inode)->runtime_flags)) {
 -              btrfs_info(root->fs_info, "inode %llu still on the orphan list",
 -                      btrfs_ino(inode));
 +              btrfs_info(fs_info, "inode %llu still on the orphan list",
 +                         btrfs_ino(inode));
                atomic_dec(&root->orphan_inodes);
        }
  
                if (!ordered)
                        break;
                else {
 -                      btrfs_err(root->fs_info,
 +                      btrfs_err(fs_info,
                                  "found ordered extent %llu %llu on inode cleanup",
                                  ordered->file_offset, ordered->len);
                        btrfs_remove_ordered_extent(inode, ordered);
@@@ -9488,7 -9509,6 +9488,7 @@@ static int btrfs_rename_exchange(struc
                              struct inode *new_dir,
                              struct dentry *new_dentry)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
        struct btrfs_trans_handle *trans;
        struct btrfs_root *root = BTRFS_I(old_dir)->root;
        struct btrfs_root *dest = BTRFS_I(new_dir)->root;
  
        /* close the race window with snapshot create/destroy ioctl */
        if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
 -              down_read(&root->fs_info->subvol_sem);
 +              down_read(&fs_info->subvol_sem);
        if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
 -              down_read(&dest->fs_info->subvol_sem);
 +              down_read(&fs_info->subvol_sem);
  
        /*
         * We want to reserve the absolute worst case amount of items.  So if
        /* Reference for the source. */
        if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
                /* force full log commit if subvolume involved. */
 -              btrfs_set_log_full_commit(root->fs_info, trans);
 +              btrfs_set_log_full_commit(fs_info, trans);
        } else {
                btrfs_pin_log_trans(root);
                root_log_pinned = true;
        /* And now for the dest. */
        if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
                /* force full log commit if subvolume involved. */
 -              btrfs_set_log_full_commit(dest->fs_info, trans);
 +              btrfs_set_log_full_commit(fs_info, trans);
        } else {
                btrfs_pin_log_trans(dest);
                dest_log_pinned = true;
@@@ -9676,12 -9696,12 +9676,12 @@@ out_fail
         * allow the tasks to sync it.
         */
        if (ret && (root_log_pinned || dest_log_pinned)) {
 -              if (btrfs_inode_in_log(old_dir, root->fs_info->generation) ||
 -                  btrfs_inode_in_log(new_dir, root->fs_info->generation) ||
 -                  btrfs_inode_in_log(old_inode, root->fs_info->generation) ||
 +              if (btrfs_inode_in_log(old_dir, fs_info->generation) ||
 +                  btrfs_inode_in_log(new_dir, fs_info->generation) ||
 +                  btrfs_inode_in_log(old_inode, fs_info->generation) ||
                    (new_inode &&
 -                   btrfs_inode_in_log(new_inode, root->fs_info->generation)))
 -                  btrfs_set_log_full_commit(root->fs_info, trans);
 +                   btrfs_inode_in_log(new_inode, fs_info->generation)))
 +                      btrfs_set_log_full_commit(fs_info, trans);
  
                if (root_log_pinned) {
                        btrfs_end_log_trans(root);
                        dest_log_pinned = false;
                }
        }
 -      ret = btrfs_end_transaction(trans, root);
 +      ret = btrfs_end_transaction(trans);
  out_notrans:
        if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
 -              up_read(&dest->fs_info->subvol_sem);
 +              up_read(&fs_info->subvol_sem);
        if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
 -              up_read(&root->fs_info->subvol_sem);
 +              up_read(&fs_info->subvol_sem);
  
        return ret;
  }
@@@ -9757,7 -9777,6 +9757,7 @@@ static int btrfs_rename(struct inode *o
                           struct inode *new_dir, struct dentry *new_dentry,
                           unsigned int flags)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
        struct btrfs_trans_handle *trans;
        unsigned int trans_num_items;
        struct btrfs_root *root = BTRFS_I(old_dir)->root;
  
        /* close the racy window with snapshot create/destroy ioctl */
        if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
 -              down_read(&root->fs_info->subvol_sem);
 +              down_read(&fs_info->subvol_sem);
        /*
         * We want to reserve the absolute worst case amount of items.  So if
         * both inodes are subvols and we need to unlink them then that would
        BTRFS_I(old_inode)->dir_index = 0ULL;
        if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
                /* force full log commit if subvolume involved. */
 -              btrfs_set_log_full_commit(root->fs_info, trans);
 +              btrfs_set_log_full_commit(fs_info, trans);
        } else {
                btrfs_pin_log_trans(root);
                log_pinned = true;
@@@ -9952,20 -9971,20 +9952,20 @@@ out_fail
         * allow the tasks to sync it.
         */
        if (ret && log_pinned) {
 -              if (btrfs_inode_in_log(old_dir, root->fs_info->generation) ||
 -                  btrfs_inode_in_log(new_dir, root->fs_info->generation) ||
 -                  btrfs_inode_in_log(old_inode, root->fs_info->generation) ||
 +              if (btrfs_inode_in_log(old_dir, fs_info->generation) ||
 +                  btrfs_inode_in_log(new_dir, fs_info->generation) ||
 +                  btrfs_inode_in_log(old_inode, fs_info->generation) ||
                    (new_inode &&
 -                   btrfs_inode_in_log(new_inode, root->fs_info->generation)))
 -                  btrfs_set_log_full_commit(root->fs_info, trans);
 +                   btrfs_inode_in_log(new_inode, fs_info->generation)))
 +                      btrfs_set_log_full_commit(fs_info, trans);
  
                btrfs_end_log_trans(root);
                log_pinned = false;
        }
 -      btrfs_end_transaction(trans, root);
 +      btrfs_end_transaction(trans);
  out_notrans:
        if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
 -              up_read(&root->fs_info->subvol_sem);
 +              up_read(&fs_info->subvol_sem);
  
        return ret;
  }
  
  int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
  {
 +      struct btrfs_fs_info *fs_info = root->fs_info;
        int ret;
  
 -      if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
 +      if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
                return -EROFS;
  
        ret = __start_delalloc_inodes(root, delay_iput, -1);
         * we have to make sure the IO is actually started and that
         * ordered extents get created before we return
         */
 -      atomic_inc(&root->fs_info->async_submit_draining);
 -      while (atomic_read(&root->fs_info->nr_async_submits) ||
 -            atomic_read(&root->fs_info->async_delalloc_pages)) {
 -              wait_event(root->fs_info->async_submit_wait,
 -                 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
 -                  atomic_read(&root->fs_info->async_delalloc_pages) == 0));
 -      }
 -      atomic_dec(&root->fs_info->async_submit_draining);
 +      atomic_inc(&fs_info->async_submit_draining);
 +      while (atomic_read(&fs_info->nr_async_submits) ||
 +             atomic_read(&fs_info->async_delalloc_pages)) {
 +              wait_event(fs_info->async_submit_wait,
 +                         (atomic_read(&fs_info->nr_async_submits) == 0 &&
 +                          atomic_read(&fs_info->async_delalloc_pages) == 0));
 +      }
 +      atomic_dec(&fs_info->async_submit_draining);
        return ret;
  }
  
  static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
                         const char *symname)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
        struct btrfs_trans_handle *trans;
        struct btrfs_root *root = BTRFS_I(dir)->root;
        struct btrfs_path *path;
        struct extent_buffer *leaf;
  
        name_len = strlen(symname);
 -      if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
 +      if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
                return -ENAMETOOLONG;
  
        /*
        d_instantiate(dentry, inode);
  
  out_unlock:
 -      btrfs_end_transaction(trans, root);
 +      btrfs_end_transaction(trans);
        if (drop_inode) {
                inode_dec_link_count(inode);
                iput(inode);
        }
 -      btrfs_btree_balance_dirty(root);
 +      btrfs_btree_balance_dirty(fs_info);
        return err;
  
  out_unlock_inode:
@@@ -10314,7 -10331,6 +10314,7 @@@ static int __btrfs_prealloc_file_range(
                                       loff_t actual_len, u64 *alloc_hint,
                                       struct btrfs_trans_handle *trans)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
        struct extent_map *em;
        struct btrfs_root *root = BTRFS_I(inode)->root;
                                min_size, 0, *alloc_hint, &ins, 1, 0);
                if (ret) {
                        if (own_trans)
 -                              btrfs_end_transaction(trans, root);
 +                              btrfs_end_transaction(trans);
                        break;
                }
 -              btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
 +              btrfs_dec_block_group_reservations(fs_info, ins.objectid);
  
                last_alloc = ins.offset;
                ret = insert_reserved_file_extent(trans, inode,
                                                  ins.offset, 0, 0, 0,
                                                  BTRFS_FILE_EXTENT_PREALLOC);
                if (ret) {
 -                      btrfs_free_reserved_extent(root, ins.objectid,
 +                      btrfs_free_reserved_extent(fs_info, ins.objectid,
                                                   ins.offset, 0);
                        btrfs_abort_transaction(trans, ret);
                        if (own_trans)
 -                              btrfs_end_transaction(trans, root);
 +                              btrfs_end_transaction(trans);
                        break;
                }
  
                em->block_len = ins.offset;
                em->orig_block_len = ins.offset;
                em->ram_bytes = ins.offset;
 -              em->bdev = root->fs_info->fs_devices->latest_bdev;
 +              em->bdev = fs_info->fs_devices->latest_bdev;
                set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
                em->generation = trans->transid;
  
                if (ret) {
                        btrfs_abort_transaction(trans, ret);
                        if (own_trans)
 -                              btrfs_end_transaction(trans, root);
 +                              btrfs_end_transaction(trans);
                        break;
                }
  
                if (own_trans)
 -                      btrfs_end_transaction(trans, root);
 +                      btrfs_end_transaction(trans);
        }
        if (cur_offset < end)
                btrfs_free_reserved_data_space(inode, cur_offset,
@@@ -10480,7 -10496,6 +10480,7 @@@ static int btrfs_permission(struct inod
  
  static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
  {
 +      struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
        struct btrfs_trans_handle *trans;
        struct btrfs_root *root = BTRFS_I(dir)->root;
        struct inode *inode = NULL;
        mark_inode_dirty(inode);
  
  out:
 -      btrfs_end_transaction(trans, root);
 +      btrfs_end_transaction(trans);
        if (ret)
                iput(inode);
 -      btrfs_balance_delayed_items(root);
 -      btrfs_btree_balance_dirty(root);
 +      btrfs_balance_delayed_items(fs_info);
 +      btrfs_btree_balance_dirty(fs_info);
        return ret;
  
  out_inode:
@@@ -10653,7 -10668,6 +10653,6 @@@ static const struct inode_operations bt
        .update_time    = btrfs_update_time,
  };
  static const struct inode_operations btrfs_symlink_inode_operations = {
-       .readlink       = generic_readlink,
        .get_link       = page_get_link,
        .getattr        = btrfs_getattr,
        .setattr        = btrfs_setattr,
diff --combined fs/ceph/inode.c
index 284f0d807151e4d28655e0087fa727164b701e64,9f125204c487c288a38871b5b77b980acffe684d..398e5328b30952410cc503e7e4d20918d3bdc671
@@@ -1023,17 -1023,16 +1023,17 @@@ static void update_dentry_lease(struct 
        long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
        struct inode *dir;
  
 -      /* only track leases on regular dentries */
 -      if (dentry->d_op != &ceph_dentry_ops)
 -              return;
 -
        spin_lock(&dentry->d_lock);
        dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
             dentry, duration, ttl);
  
        /* make lease_rdcache_gen match directory */
        dir = d_inode(dentry->d_parent);
 +
 +      /* only track leases on regular dentries */
 +      if (ceph_snap(dir) != CEPH_NOSNAP)
 +              goto out_unlock;
 +
        di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
  
        if (duration == 0)
@@@ -1203,7 -1202,12 +1203,7 @@@ retry_lookup
                                        err = -ENOMEM;
                                        goto done;
                                }
 -                              err = ceph_init_dentry(dn);
 -                              if (err < 0) {
 -                                      dput(dn);
 -                                      dput(parent);
 -                                      goto done;
 -                              }
 +                              err = 0;
                        } else if (d_really_is_positive(dn) &&
                                   (ceph_ino(d_inode(dn)) != vino.ino ||
                                    ceph_snap(d_inode(dn)) != vino.snap)) {
@@@ -1557,6 -1561,12 +1557,6 @@@ retry_lookup
                                err = -ENOMEM;
                                goto out;
                        }
 -                      ret = ceph_init_dentry(dn);
 -                      if (ret < 0) {
 -                              dput(dn);
 -                              err = ret;
 -                              goto out;
 -                      }
                } else if (d_really_is_positive(dn) &&
                           (ceph_ino(d_inode(dn)) != vino.ino ||
                            ceph_snap(d_inode(dn)) != vino.snap)) {
@@@ -1869,7 -1879,6 +1869,6 @@@ retry
   * symlinks
   */
  static const struct inode_operations ceph_symlink_iops = {
-       .readlink = generic_readlink,
        .get_link = simple_get_link,
        .setattr = ceph_setattr,
        .getattr = ceph_getattr,
diff --combined fs/f2fs/namei.c
index db33b5631dc81d16a88efd4a4f4de3f809be65b9,fcb00e7c3054bb3efc5e841be0f7f3c7af6acf88..56c19b0610a899a6351f72a16e26b788f956e3d5
@@@ -778,7 -778,7 +778,7 @@@ static int f2fs_rename(struct inode *ol
        up_write(&F2FS_I(old_inode)->i_sem);
  
        old_inode->i_ctime = current_time(old_inode);
 -      f2fs_mark_inode_dirty_sync(old_inode);
 +      f2fs_mark_inode_dirty_sync(old_inode, false);
  
        f2fs_delete_entry(old_entry, old_page, old_dir, NULL);
  
@@@ -938,7 -938,7 +938,7 @@@ static int f2fs_cross_rename(struct ino
                f2fs_i_links_write(old_dir, old_nlink > 0);
                up_write(&F2FS_I(old_dir)->i_sem);
        }
 -      f2fs_mark_inode_dirty_sync(old_dir);
 +      f2fs_mark_inode_dirty_sync(old_dir, false);
  
        /* update directory entry info of new dir inode */
        f2fs_set_link(new_dir, new_entry, new_page, old_inode);
                f2fs_i_links_write(new_dir, new_nlink > 0);
                up_write(&F2FS_I(new_dir)->i_sem);
        }
 -      f2fs_mark_inode_dirty_sync(new_dir);
 +      f2fs_mark_inode_dirty_sync(new_dir, false);
  
        f2fs_unlock_op(sbi);
  
@@@ -1075,7 -1075,6 +1075,6 @@@ errout
  }
  
  const struct inode_operations f2fs_encrypted_symlink_inode_operations = {
-       .readlink       = generic_readlink,
        .get_link       = f2fs_encrypted_get_link,
        .getattr        = f2fs_getattr,
        .setattr        = f2fs_setattr,
@@@ -1105,7 -1104,6 +1104,6 @@@ const struct inode_operations f2fs_dir_
  };
  
  const struct inode_operations f2fs_symlink_inode_operations = {
-       .readlink       = generic_readlink,
        .get_link       = f2fs_get_link,
        .getattr        = f2fs_getattr,
        .setattr        = f2fs_setattr,
diff --combined fs/fuse/dir.c
index 096f79997f75adf9603c4c6367635588535a8ae3,0c07d0c651de0406fcdfba506d37cb1bd6db5848..1f7c732f32b07f1bab9e4961f16cb52ee9f09f70
@@@ -1739,6 -1739,8 +1739,6 @@@ static int fuse_setattr(struct dentry *
                 * This should be done on write(), truncate() and chown().
                 */
                if (!fc->handle_killpriv) {
 -                      int kill;
 -
                        /*
                         * ia_mode calculation may have used stale i_mode.
                         * Refresh and recalculate.
                                return ret;
  
                        attr->ia_mode = inode->i_mode;
 -                      kill = should_remove_suid(entry);
 -                      if (kill & ATTR_KILL_SUID) {
 +                      if (inode->i_mode & S_ISUID) {
                                attr->ia_valid |= ATTR_MODE;
                                attr->ia_mode &= ~S_ISUID;
                        }
 -                      if (kill & ATTR_KILL_SGID) {
 +                      if ((inode->i_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
                                attr->ia_valid |= ATTR_MODE;
                                attr->ia_mode &= ~S_ISGID;
                        }
@@@ -1831,7 -1834,6 +1831,6 @@@ static const struct inode_operations fu
  static const struct inode_operations fuse_symlink_inode_operations = {
        .setattr        = fuse_setattr,
        .get_link       = fuse_get_link,
-       .readlink       = generic_readlink,
        .getattr        = fuse_getattr,
        .listxattr      = fuse_listxattr,
  };
diff --combined fs/libfs.c
index 76048705d9220d3234f36ad151dd93d81ef839f6,06e7e0c918e1fae815ba80beabd03159402ee4d4..6637aa60c1dac94cea638cc58b1b700bbb25b9c9
@@@ -465,8 -465,6 +465,8 @@@ EXPORT_SYMBOL(simple_write_begin)
   * is not called, so a filesystem that actually does store data in .write_inode
   * should extend on what's done here with a call to mark_inode_dirty() in the
   * case that i_size has changed.
 + *
 + * Use *ONLY* with simple_readpage()
   */
  int simple_write_end(struct file *file, struct address_space *mapping,
                        loff_t pos, unsigned len, unsigned copied,
        loff_t last_pos = pos + copied;
  
        /* zero the stale part of the page if we did a short copy */
 -      if (copied < len) {
 -              unsigned from = pos & (PAGE_SIZE - 1);
 -
 -              zero_user(page, from + copied, len - copied);
 -      }
 +      if (!PageUptodate(page)) {
 +              if (copied < len) {
 +                      unsigned from = pos & (PAGE_SIZE - 1);
  
 -      if (!PageUptodate(page))
 +                      zero_user(page, from + copied, len - copied);
 +              }
                SetPageUptodate(page);
 +      }
        /*
         * No need to use i_size_read() here, the i_size
         * cannot change under us because we hold the i_mutex.
@@@ -1131,7 -1129,6 +1131,6 @@@ EXPORT_SYMBOL(simple_get_link)
  
  const struct inode_operations simple_symlink_inode_operations = {
        .get_link = simple_get_link,
-       .readlink = generic_readlink
  };
  EXPORT_SYMBOL(simple_symlink_inode_operations);
  
diff --combined fs/namei.c
index 1c372debcbbe62c57d922448b86ee967e18d6584,c248a9e1edd23f79a8d8cf8e9fb0a011cba5a5d2..d9fc7617b9e48a9225f5a97a752ed3bd5db7b869
@@@ -1200,7 -1200,7 +1200,7 @@@ static int follow_managed(struct path *
                if (managed & DCACHE_MANAGE_TRANSIT) {
                        BUG_ON(!path->dentry->d_op);
                        BUG_ON(!path->dentry->d_op->d_manage);
 -                      ret = path->dentry->d_op->d_manage(path->dentry, false);
 +                      ret = path->dentry->d_op->d_manage(path, false);
                        if (ret < 0)
                                break;
                }
@@@ -1263,10 -1263,10 +1263,10 @@@ int follow_down_one(struct path *path
  }
  EXPORT_SYMBOL(follow_down_one);
  
 -static inline int managed_dentry_rcu(struct dentry *dentry)
 +static inline int managed_dentry_rcu(const struct path *path)
  {
 -      return (dentry->d_flags & DCACHE_MANAGE_TRANSIT) ?
 -              dentry->d_op->d_manage(dentry, true) : 0;
 +      return (path->dentry->d_flags & DCACHE_MANAGE_TRANSIT) ?
 +              path->dentry->d_op->d_manage(path, true) : 0;
  }
  
  /*
@@@ -1282,7 -1282,7 +1282,7 @@@ static bool __follow_mount_rcu(struct n
                 * Don't forget we might have a non-mountpoint managed dentry
                 * that wants to block transit.
                 */
 -              switch (managed_dentry_rcu(path->dentry)) {
 +              switch (managed_dentry_rcu(path)) {
                case -ECHILD:
                default:
                        return false;
@@@ -1392,7 -1392,8 +1392,7 @@@ int follow_down(struct path *path
                if (managed & DCACHE_MANAGE_TRANSIT) {
                        BUG_ON(!path->dentry->d_op);
                        BUG_ON(!path->dentry->d_op->d_manage);
 -                      ret = path->dentry->d_op->d_manage(
 -                              path->dentry, false);
 +                      ret = path->dentry->d_op->d_manage(path, false);
                        if (ret < 0)
                                return ret == -EISDIR ? 0 : ret;
                }
@@@ -1724,35 -1725,30 +1724,35 @@@ static int pick_link(struct nameidata *
        return 1;
  }
  
 +enum {WALK_FOLLOW = 1, WALK_MORE = 2};
 +
  /*
   * Do we need to follow links? We _really_ want to be able
   * to do this check without having to look at inode->i_op,
   * so we keep a cache of "no, this doesn't need follow_link"
   * for the common case.
   */
 -static inline int should_follow_link(struct nameidata *nd, struct path *link,
 -                                   int follow,
 -                                   struct inode *inode, unsigned seq)
 +static inline int step_into(struct nameidata *nd, struct path *path,
 +                          int flags, struct inode *inode, unsigned seq)
  {
 -      if (likely(!d_is_symlink(link->dentry)))
 -              return 0;
 -      if (!follow)
 +      if (!(flags & WALK_MORE) && nd->depth)
 +              put_link(nd);
 +      if (likely(!d_is_symlink(path->dentry)) ||
 +         !(flags & WALK_FOLLOW || nd->flags & LOOKUP_FOLLOW)) {
 +              /* not a symlink or should not follow */
 +              path_to_nameidata(path, nd);
 +              nd->inode = inode;
 +              nd->seq = seq;
                return 0;
 +      }
        /* make sure that d_is_symlink above matches inode */
        if (nd->flags & LOOKUP_RCU) {
 -              if (read_seqcount_retry(&link->dentry->d_seq, seq))
 +              if (read_seqcount_retry(&path->dentry->d_seq, seq))
                        return -ECHILD;
        }
 -      return pick_link(nd, link, inode, seq);
 +      return pick_link(nd, path, inode, seq);
  }
  
 -enum {WALK_GET = 1, WALK_PUT = 2};
 -
  static int walk_component(struct nameidata *nd, int flags)
  {
        struct path path;
         */
        if (unlikely(nd->last_type != LAST_NORM)) {
                err = handle_dots(nd, nd->last_type);
 -              if (flags & WALK_PUT)
 +              if (!(flags & WALK_MORE) && nd->depth)
                        put_link(nd);
                return err;
        }
                inode = d_backing_inode(path.dentry);
        }
  
 -      if (flags & WALK_PUT)
 -              put_link(nd);
 -      err = should_follow_link(nd, &path, flags & WALK_GET, inode, seq);
 -      if (unlikely(err))
 -              return err;
 -      path_to_nameidata(&path, nd);
 -      nd->inode = inode;
 -      nd->seq = seq;
 -      return 0;
 +      return step_into(nd, &path, flags, inode, seq);
  }
  
  /*
                        if (!name)
                                return 0;
                        /* last component of nested symlink */
 -                      err = walk_component(nd, WALK_GET | WALK_PUT);
 +                      err = walk_component(nd, WALK_FOLLOW);
                } else {
 -                      err = walk_component(nd, WALK_GET);
 +                      /* not the last component */
 +                      err = walk_component(nd, WALK_FOLLOW | WALK_MORE);
                }
                if (err < 0)
                        return err;
@@@ -2245,7 -2248,12 +2245,7 @@@ static inline int lookup_last(struct na
                nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
  
        nd->flags &= ~LOOKUP_PARENT;
 -      return walk_component(nd,
 -                      nd->flags & LOOKUP_FOLLOW
 -                              ? nd->depth
 -                                      ? WALK_PUT | WALK_GET
 -                                      : WALK_GET
 -                              : 0);
 +      return walk_component(nd, 0);
  }
  
  /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
@@@ -2550,9 -2558,28 +2550,9 @@@ int user_path_at_empty(int dfd, const c
  }
  EXPORT_SYMBOL(user_path_at_empty);
  
 -/*
 - * NB: most callers don't do anything directly with the reference to the
 - *     to struct filename, but the nd->last pointer points into the name string
 - *     allocated by getname. So we must hold the reference to it until all
 - *     path-walking is complete.
 - */
 -static inline struct filename *
 -user_path_parent(int dfd, const char __user *path,
 -               struct path *parent,
 -               struct qstr *last,
 -               int *type,
 -               unsigned int flags)
 -{
 -      /* only LOOKUP_REVAL is allowed in extra flags */
 -      return filename_parentat(dfd, getname(path), flags & LOOKUP_REVAL,
 -                               parent, last, type);
 -}
 -
  /**
   * mountpoint_last - look up last component for umount
   * @nd:   pathwalk nameidata - currently pointing at parent directory of "last"
 - * @path: pointer to container for result
   *
   * This is a special lookup_last function just for umount. In this case, we
   * need to resolve the path without doing any revalidation.
   *
   * Returns:
   * -error: if there was an error during lookup. This includes -ENOENT if the
 - *         lookup found a negative dentry. The nd->path reference will also be
 - *         put in this case.
 + *         lookup found a negative dentry.
   *
 - * 0:      if we successfully resolved nd->path and found it to not to be a
 - *         symlink that needs to be followed. "path" will also be populated.
 - *         The nd->path reference will also be put.
 + * 0:      if we successfully resolved nd->last and found it to not to be a
 + *         symlink that needs to be followed.
   *
   * 1:      if we successfully resolved nd->last and found it to be a symlink
 - *         that needs to be followed. "path" will be populated with the path
 - *         to the link, and nd->path will *not* be put.
 + *         that needs to be followed.
   */
  static int
 -mountpoint_last(struct nameidata *nd, struct path *path)
 +mountpoint_last(struct nameidata *nd)
  {
        int error = 0;
 -      struct dentry *dentry;
        struct dentry *dir = nd->path.dentry;
 +      struct path path;
  
        /* If we're in rcuwalk, drop out of it to handle last component */
        if (nd->flags & LOOKUP_RCU) {
                error = handle_dots(nd, nd->last_type);
                if (error)
                        return error;
 -              dentry = dget(nd->path.dentry);
 +              path.dentry = dget(nd->path.dentry);
        } else {
 -              dentry = d_lookup(dir, &nd->last);
 -              if (!dentry) {
 +              path.dentry = d_lookup(dir, &nd->last);
 +              if (!path.dentry) {
                        /*
                         * No cached dentry. Mounted dentries are pinned in the
                         * cache, so that means that this dentry is probably
                         * a symlink or the path doesn't actually point
                         * to a mounted dentry.
                         */
 -                      dentry = lookup_slow(&nd->last, dir,
 +                      path.dentry = lookup_slow(&nd->last, dir,
                                             nd->flags | LOOKUP_NO_REVAL);
 -                      if (IS_ERR(dentry))
 -                              return PTR_ERR(dentry);
 +                      if (IS_ERR(path.dentry))
 +                              return PTR_ERR(path.dentry);
                }
        }
 -      if (d_is_negative(dentry)) {
 -              dput(dentry);
 +      if (d_is_negative(path.dentry)) {
 +              dput(path.dentry);
                return -ENOENT;
        }
 -      if (nd->depth)
 -              put_link(nd);
 -      path->dentry = dentry;
 -      path->mnt = nd->path.mnt;
 -      error = should_follow_link(nd, path, nd->flags & LOOKUP_FOLLOW,
 -                                 d_backing_inode(dentry), 0);
 -      if (unlikely(error))
 -              return error;
 -      mntget(path->mnt);
 -      follow_mount(path);
 -      return 0;
 +      path.mnt = nd->path.mnt;
 +      return step_into(nd, &path, 0, d_backing_inode(path.dentry), 0);
  }
  
  /**
@@@ -2633,19 -2672,13 +2633,19 @@@ path_mountpoint(struct nameidata *nd, u
        if (IS_ERR(s))
                return PTR_ERR(s);
        while (!(err = link_path_walk(s, nd)) &&
 -              (err = mountpoint_last(nd, path)) > 0) {
 +              (err = mountpoint_last(nd)) > 0) {
                s = trailing_symlink(nd);
                if (IS_ERR(s)) {
                        err = PTR_ERR(s);
                        break;
                }
        }
 +      if (!err) {
 +              *path = nd->path;
 +              nd->path.mnt = NULL;
 +              nd->path.dentry = NULL;
 +              follow_mount(path);
 +      }
        terminate_walk(nd);
        return err;
  }
@@@ -2862,7 -2895,7 +2862,7 @@@ bool may_open_dev(const struct path *pa
                !(path->mnt->mnt_sb->s_iflags & SB_I_NODEV);
  }
  
 -static int may_open(struct path *path, int acc_mode, int flag)
 +static int may_open(const struct path *path, int acc_mode, int flag)
  {
        struct dentry *dentry = path->dentry;
        struct inode *inode = dentry->d_inode;
  
  static int handle_truncate(struct file *filp)
  {
 -      struct path *path = &filp->f_path;
 +      const struct path *path = &filp->f_path;
        struct inode *inode = path->dentry->d_inode;
        int error = get_write_access(inode);
        if (error)
@@@ -3302,11 -3335,18 +3302,11 @@@ static int do_last(struct nameidata *nd
        seq = 0;        /* out of RCU mode, so the value doesn't matter */
        inode = d_backing_inode(path.dentry);
  finish_lookup:
 -      if (nd->depth)
 -              put_link(nd);
 -      error = should_follow_link(nd, &path, nd->flags & LOOKUP_FOLLOW,
 -                                 inode, seq);
 +      error = step_into(nd, &path, 0, inode, seq);
        if (unlikely(error))
                return error;
 -
 -      path_to_nameidata(&path, nd);
 -      nd->inode = inode;
 -      nd->seq = seq;
 -      /* Why this, you ask?  _Now_ we might have grown LOOKUP_JUMPED... */
  finish_open:
 +      /* Why this, you ask?  _Now_ we might have grown LOOKUP_JUMPED... */
        error = complete_walk(nd);
        if (error)
                return error;
@@@ -3821,8 -3861,8 +3821,8 @@@ static long do_rmdir(int dfd, const cha
        int type;
        unsigned int lookup_flags = 0;
  retry:
 -      name = user_path_parent(dfd, pathname,
 -                              &path, &last, &type, lookup_flags);
 +      name = filename_parentat(dfd, getname(pathname), lookup_flags,
 +                              &path, &last, &type);
        if (IS_ERR(name))
                return PTR_ERR(name);
  
@@@ -3951,8 -3991,8 +3951,8 @@@ static long do_unlinkat(int dfd, const 
        struct inode *delegated_inode = NULL;
        unsigned int lookup_flags = 0;
  retry:
 -      name = user_path_parent(dfd, pathname,
 -                              &path, &last, &type, lookup_flags);
 +      name = filename_parentat(dfd, getname(pathname), lookup_flags,
 +                              &path, &last, &type);
        if (IS_ERR(name))
                return PTR_ERR(name);
  
@@@ -4305,7 -4345,11 +4305,7 @@@ int vfs_rename(struct inode *old_dir, s
        bool new_is_dir = false;
        unsigned max_links = new_dir->i_sb->s_max_links;
  
 -      /*
 -       * Check source == target.
 -       * On overlayfs need to look at underlying inodes.
 -       */
 -      if (d_real_inode(old_dentry) == d_real_inode(new_dentry))
 +      if (source == target)
                return 0;
  
        error = may_delete(old_dir, old_dentry, is_dir);
@@@ -4447,15 -4491,15 +4447,15 @@@ SYSCALL_DEFINE5(renameat2, int, olddfd
                target_flags = 0;
  
  retry:
 -      from = user_path_parent(olddfd, oldname,
 -                              &old_path, &old_last, &old_type, lookup_flags);
 +      from = filename_parentat(olddfd, getname(oldname), lookup_flags,
 +                              &old_path, &old_last, &old_type);
        if (IS_ERR(from)) {
                error = PTR_ERR(from);
                goto exit;
        }
  
 -      to = user_path_parent(newdfd, newname,
 -                              &new_path, &new_last, &new_type, lookup_flags);
 +      to = filename_parentat(newdfd, getname(newname), lookup_flags,
 +                              &new_path, &new_last, &new_type);
        if (IS_ERR(to)) {
                error = PTR_ERR(to);
                goto exit1;
@@@ -4606,7 -4650,8 +4606,8 @@@ out
   * have ->get_link() not calling nd_jump_link().  Using (or not using) it
   * for any given inode is up to filesystem.
   */
- int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen)
+ static int generic_readlink(struct dentry *dentry, char __user *buffer,
+                           int buflen)
  {
        DEFINE_DELAYED_CALL(done);
        struct inode *inode = d_inode(dentry);
        do_delayed_call(&done);
        return res;
  }
- EXPORT_SYMBOL(generic_readlink);
+ /**
+  * vfs_readlink - copy symlink body into userspace buffer
+  * @dentry: dentry on which to get symbolic link
+  * @buffer: user memory pointer
+  * @buflen: size of buffer
+  *
+  * Does not touch atime.  That's up to the caller if necessary
+  *
+  * Does not call security hook.
+  */
+ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen)
+ {
+       struct inode *inode = d_inode(dentry);
+       if (unlikely(!(inode->i_opflags & IOP_DEFAULT_READLINK))) {
+               if (unlikely(inode->i_op->readlink))
+                       return inode->i_op->readlink(dentry, buffer, buflen);
+               if (!d_is_symlink(dentry))
+                       return -EINVAL;
+               spin_lock(&inode->i_lock);
+               inode->i_opflags |= IOP_DEFAULT_READLINK;
+               spin_unlock(&inode->i_lock);
+       }
+       return generic_readlink(dentry, buffer, buflen);
+ }
+ EXPORT_SYMBOL(vfs_readlink);
  
  /**
   * vfs_get_link - get symlink body
@@@ -4739,7 -4813,6 +4769,6 @@@ int page_symlink(struct inode *inode, c
  EXPORT_SYMBOL(page_symlink);
  
  const struct inode_operations page_symlink_inode_operations = {
-       .readlink       = generic_readlink,
        .get_link       = page_get_link,
  };
  EXPORT_SYMBOL(page_symlink_inode_operations);
diff --combined fs/nfsd/nfs4xdr.c
index 79edde4577b29457767a3cb85c1714350184513d,645e1e3c31106c2f5cfe24bb763ccdd24aee1950..7ecf16be4a444ec250678e89ad210c7f19cbbc18
@@@ -33,7 -33,6 +33,7 @@@
   *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   */
  
 +#include <linux/fs_struct.h>
  #include <linux/file.h>
  #include <linux/slab.h>
  #include <linux/namei.h>
  
  #define NFSDDBG_FACILITY              NFSDDBG_XDR
  
 +u32 nfsd_suppattrs[3][3] = {
 +      {NFSD4_SUPPORTED_ATTRS_WORD0,
 +       NFSD4_SUPPORTED_ATTRS_WORD1,
 +       NFSD4_SUPPORTED_ATTRS_WORD2},
 +
 +      {NFSD4_1_SUPPORTED_ATTRS_WORD0,
 +       NFSD4_1_SUPPORTED_ATTRS_WORD1,
 +       NFSD4_1_SUPPORTED_ATTRS_WORD2},
 +
 +      {NFSD4_1_SUPPORTED_ATTRS_WORD0,
 +       NFSD4_1_SUPPORTED_ATTRS_WORD1,
 +       NFSD4_2_SUPPORTED_ATTRS_WORD2},
 +};
 +
  /*
   * As per referral draft, the fsid for a referral MUST be different from the fsid of the containing
   * directory in order to indicate to the client that a filesystem boundary is present
@@@ -300,7 -285,7 +300,7 @@@ nfsd4_decode_bitmap(struct nfsd4_compou
  static __be32
  nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
                   struct iattr *iattr, struct nfs4_acl **acl,
 -                 struct xdr_netobj *label)
 +                 struct xdr_netobj *label, int *umask)
  {
        int expected_len, len = 0;
        u32 dummy32;
        if ((status = nfsd4_decode_bitmap(argp, bmval)))
                return status;
  
 +      if (bmval[0] & ~NFSD_WRITEABLE_ATTRS_WORD0
 +          || bmval[1] & ~NFSD_WRITEABLE_ATTRS_WORD1
 +          || bmval[2] & ~NFSD_WRITEABLE_ATTRS_WORD2) {
 +              if (nfsd_attrs_supported(argp->minorversion, bmval))
 +                      return nfserr_inval;
 +              return nfserr_attrnotsupp;
 +      }
 +
        READ_BUF(4);
        expected_len = be32_to_cpup(p++);
  
                        return nfserr_jukebox;
        }
  #endif
 -
 -      if (bmval[0] & ~NFSD_WRITEABLE_ATTRS_WORD0
 -          || bmval[1] & ~NFSD_WRITEABLE_ATTRS_WORD1
 -          || bmval[2] & ~NFSD_WRITEABLE_ATTRS_WORD2)
 -              READ_BUF(expected_len - len);
 -      else if (len != expected_len)
 +      if (bmval[2] & FATTR4_WORD2_MODE_UMASK) {
 +              if (!umask)
 +                      goto xdr_error;
 +              READ_BUF(8);
 +              len += 8;
 +              dummy32 = be32_to_cpup(p++);
 +              iattr->ia_mode = dummy32 & (S_IFMT | S_IALLUGO);
 +              dummy32 = be32_to_cpup(p++);
 +              *umask = dummy32 & S_IRWXUGO;
 +              iattr->ia_valid |= ATTR_MODE;
 +      }
 +      if (len != expected_len)
                goto xdr_error;
  
        DECODE_TAIL;
@@@ -663,8 -634,7 +663,8 @@@ nfsd4_decode_create(struct nfsd4_compou
                return status;
  
        status = nfsd4_decode_fattr(argp, create->cr_bmval, &create->cr_iattr,
 -                                  &create->cr_acl, &create->cr_label);
 +                                  &create->cr_acl, &create->cr_label,
 +                                  &current->fs->umask);
        if (status)
                goto out;
  
@@@ -909,15 -879,13 +909,15 @@@ nfsd4_decode_open(struct nfsd4_compound
        case NFS4_OPEN_NOCREATE:
                break;
        case NFS4_OPEN_CREATE:
 +              current->fs->umask = 0;
                READ_BUF(4);
                open->op_createmode = be32_to_cpup(p++);
                switch (open->op_createmode) {
                case NFS4_CREATE_UNCHECKED:
                case NFS4_CREATE_GUARDED:
                        status = nfsd4_decode_fattr(argp, open->op_bmval,
 -                              &open->op_iattr, &open->op_acl, &open->op_label);
 +                              &open->op_iattr, &open->op_acl, &open->op_label,
 +                              &current->fs->umask);
                        if (status)
                                goto out;
                        break;
                        READ_BUF(NFS4_VERIFIER_SIZE);
                        COPYMEM(open->op_verf.data, NFS4_VERIFIER_SIZE);
                        status = nfsd4_decode_fattr(argp, open->op_bmval,
 -                              &open->op_iattr, &open->op_acl, &open->op_label);
 +                              &open->op_iattr, &open->op_acl, &open->op_label,
 +                              &current->fs->umask);
                        if (status)
                                goto out;
                        break;
@@@ -1169,7 -1136,7 +1169,7 @@@ nfsd4_decode_setattr(struct nfsd4_compo
        if (status)
                return status;
        return nfsd4_decode_fattr(argp, setattr->sa_bmval, &setattr->sa_iattr,
 -                                &setattr->sa_acl, &setattr->sa_label);
 +                                &setattr->sa_acl, &setattr->sa_label, NULL);
  }
  
  static __be32
@@@ -2373,7 -2340,9 +2373,7 @@@ nfsd4_encode_fattr(struct xdr_stream *x
        struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  
        BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
 -      BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
 -      BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
 -      BUG_ON(bmval2 & ~nfsd_suppattrs2(minorversion));
 +      BUG_ON(!nfsd_attrs_supported(minorversion, bmval));
  
        if (exp->ex_fslocs.migrated) {
                status = fattr_handle_absent_fs(&bmval0, &bmval1, &bmval2, &rdattr_err);
        p++;                /* to be backfilled later */
  
        if (bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
 -              u32 word0 = nfsd_suppattrs0(minorversion);
 -              u32 word1 = nfsd_suppattrs1(minorversion);
 -              u32 word2 = nfsd_suppattrs2(minorversion);
 +              u32 *supp = nfsd_suppattrs[minorversion];
  
                if (!IS_POSIXACL(dentry->d_inode))
 -                      word0 &= ~FATTR4_WORD0_ACL;
 +                      supp[0] &= ~FATTR4_WORD0_ACL;
                if (!contextsupport)
 -                      word2 &= ~FATTR4_WORD2_SECURITY_LABEL;
 -              if (!word2) {
 +                      supp[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
 +              if (!supp[2]) {
                        p = xdr_reserve_space(xdr, 12);
                        if (!p)
                                goto out_resource;
                        *p++ = cpu_to_be32(2);
 -                      *p++ = cpu_to_be32(word0);
 -                      *p++ = cpu_to_be32(word1);
 +                      *p++ = cpu_to_be32(supp[0]);
 +                      *p++ = cpu_to_be32(supp[1]);
                } else {
                        p = xdr_reserve_space(xdr, 16);
                        if (!p)
                                goto out_resource;
                        *p++ = cpu_to_be32(3);
 -                      *p++ = cpu_to_be32(word0);
 -                      *p++ = cpu_to_be32(word1);
 -                      *p++ = cpu_to_be32(word2);
 +                      *p++ = cpu_to_be32(supp[0]);
 +                      *p++ = cpu_to_be32(supp[1]);
 +                      *p++ = cpu_to_be32(supp[2]);
                }
        }
        if (bmval0 & FATTR4_WORD0_TYPE) {
@@@ -3605,10 -3576,10 +3605,10 @@@ nfsd4_encode_readlink(struct nfsd4_comp
        if (!p)
                return nfserr_resource;
        /*
-        * XXX: By default, the ->readlink() VFS op will truncate symlinks
-        * if they would overflow the buffer.  Is this kosher in NFSv4?  If
-        * not, one easy fix is: if ->readlink() precisely fills the buffer,
-        * assume that truncation occurred, and return NFS4ERR_RESOURCE.
+        * XXX: By default, vfs_readlink() will truncate symlinks if they
+        * would overflow the buffer.  Is this kosher in NFSv4?  If not, one
+        * easy fix is: if vfs_readlink() precisely fills the buffer, assume
+        * that truncation occurred, and return NFS4ERR_RESOURCE.
         */
        nfserr = nfsd_readlink(readlink->rl_rqstp, readlink->rl_fhp,
                                                (char *)p, &maxcount);
diff --combined fs/nfsd/vfs.c
index 357e844aee8440c7969dee0359cee8ff71fc223c,b854f02c1c36a4eccb7ccee83c1d855bdfd54954..7a21abe7caf7623e8354dc56a1057d9469e22d39
@@@ -509,7 -509,8 +509,7 @@@ __be32 nfsd4_set_nfs4_label(struct svc_
  __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
                u64 dst_pos, u64 count)
  {
 -      return nfserrno(vfs_clone_file_range(src, src_pos, dst, dst_pos,
 -                      count));
 +      return nfserrno(do_clone_file_range(src, src_pos, dst, dst_pos, count));
  }
  
  ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
@@@ -1450,7 -1451,6 +1450,6 @@@ do_nfsd_create(struct svc_rqst *rqstp, 
  __be32
  nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
  {
-       struct inode    *inode;
        mm_segment_t    oldfs;
        __be32          err;
        int             host_err;
  
        path.mnt = fhp->fh_export->ex_path.mnt;
        path.dentry = fhp->fh_dentry;
-       inode = d_inode(path.dentry);
  
        err = nfserr_inval;
-       if (!inode->i_op->readlink)
+       if (!d_is_symlink(path.dentry))
                goto out;
  
        touch_atime(&path);
         */
  
        oldfs = get_fs(); set_fs(KERNEL_DS);
-       host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
+       host_err = vfs_readlink(path.dentry, (char __user *)buf, *lenp);
        set_fs(oldfs);
  
        if (host_err < 0)
diff --combined fs/overlayfs/inode.c
index 1ab8b0dbc23788f10c61e0e734e10f42cd717545,19e0089c3dbf5e8559f9c470d6db192fb2472cf4..08643ac44a0278ed04be96d6df267e3b13821692
  #include <linux/posix_acl.h>
  #include "overlayfs.h"
  
 -static int ovl_copy_up_truncate(struct dentry *dentry)
 -{
 -      int err;
 -      struct dentry *parent;
 -      struct kstat stat;
 -      struct path lowerpath;
 -      const struct cred *old_cred;
 -
 -      parent = dget_parent(dentry);
 -      err = ovl_copy_up(parent);
 -      if (err)
 -              goto out_dput_parent;
 -
 -      ovl_path_lower(dentry, &lowerpath);
 -
 -      old_cred = ovl_override_creds(dentry->d_sb);
 -      err = vfs_getattr(&lowerpath, &stat);
 -      if (!err) {
 -              stat.size = 0;
 -              err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat);
 -      }
 -      revert_creds(old_cred);
 -
 -out_dput_parent:
 -      dput(parent);
 -      return err;
 -}
 -
  int ovl_setattr(struct dentry *dentry, struct iattr *attr)
  {
        int err;
        if (err)
                goto out;
  
 -      if (attr->ia_valid & ATTR_SIZE) {
 -              struct inode *realinode = d_inode(ovl_dentry_real(dentry));
 -
 -              err = -ETXTBSY;
 -              if (atomic_read(&realinode->i_writecount) < 0)
 -                      goto out_drop_write;
 -      }
 -
        err = ovl_copy_up(dentry);
        if (!err) {
 -              struct inode *winode = NULL;
 -
                upperdentry = ovl_dentry_upper(dentry);
  
 -              if (attr->ia_valid & ATTR_SIZE) {
 -                      winode = d_inode(upperdentry);
 -                      err = get_write_access(winode);
 -                      if (err)
 -                              goto out_drop_write;
 -              }
 -
                if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
                        attr->ia_valid &= ~ATTR_MODE;
  
                if (!err)
                        ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
                inode_unlock(upperdentry->d_inode);
 -
 -              if (winode)
 -                      put_write_access(winode);
        }
 -out_drop_write:
        ovl_drop_write(dentry);
  out:
        return err;
@@@ -253,7 -302,10 +253,7 @@@ int ovl_open_maybe_copy_up(struct dentr
        if (ovl_open_need_copy_up(file_flags, type, realpath.dentry)) {
                err = ovl_want_write(dentry);
                if (!err) {
 -                      if (file_flags & O_TRUNC)
 -                              err = ovl_copy_up_truncate(dentry);
 -                      else
 -                              err = ovl_copy_up(dentry);
 +                      err = ovl_copy_up_flags(dentry, file_flags);
                        ovl_drop_write(dentry);
                }
        }
@@@ -296,13 -348,12 +296,12 @@@ static const struct inode_operations ov
  static const struct inode_operations ovl_symlink_inode_operations = {
        .setattr        = ovl_setattr,
        .get_link       = ovl_get_link,
-       .readlink       = generic_readlink,
        .getattr        = ovl_getattr,
        .listxattr      = ovl_listxattr,
        .update_time    = ovl_update_time,
  };
  
 -static void ovl_fill_inode(struct inode *inode, umode_t mode)
 +static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev)
  {
        inode->i_ino = get_next_ino();
        inode->i_mode = mode;
        inode->i_acl = inode->i_default_acl = ACL_DONT_CACHE;
  #endif
  
 -      mode &= S_IFMT;
 -      switch (mode) {
 +      switch (mode & S_IFMT) {
 +      case S_IFREG:
 +              inode->i_op = &ovl_file_inode_operations;
 +              break;
 +
        case S_IFDIR:
                inode->i_op = &ovl_dir_inode_operations;
                inode->i_fop = &ovl_dir_operations;
                break;
  
        default:
 -              WARN(1, "illegal file type: %i\n", mode);
 -              /* Fall through */
 -
 -      case S_IFREG:
 -      case S_IFSOCK:
 -      case S_IFBLK:
 -      case S_IFCHR:
 -      case S_IFIFO:
                inode->i_op = &ovl_file_inode_operations;
 +              init_special_inode(inode, mode, rdev);
                break;
        }
  }
  
 -struct inode *ovl_new_inode(struct super_block *sb, umode_t mode)
 +struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev)
  {
        struct inode *inode;
  
        inode = new_inode(sb);
        if (inode)
 -              ovl_fill_inode(inode, mode);
 +              ovl_fill_inode(inode, mode, rdev);
  
        return inode;
  }
@@@ -362,7 -417,7 +361,7 @@@ struct inode *ovl_get_inode(struct supe
        inode = iget5_locked(sb, (unsigned long) realinode,
                             ovl_inode_test, ovl_inode_set, realinode);
        if (inode && inode->i_state & I_NEW) {
 -              ovl_fill_inode(inode, realinode->i_mode);
 +              ovl_fill_inode(inode, realinode->i_mode, realinode->i_rdev);
                set_nlink(inode, realinode->i_nlink);
                unlock_new_inode(inode);
        }
diff --combined fs/proc/inode.c
index 783bc19644d18d2b5b64de10f10169a6e957cc28,827c0df5baa6b32bfecec7886a50647da578d25e..873300164dc6b1413da81a6e4d6d56d2a4e2843d
@@@ -138,16 -138,6 +138,16 @@@ static void unuse_pde(struct proc_dir_e
  /* pde is locked */
  static void close_pdeo(struct proc_dir_entry *pde, struct pde_opener *pdeo)
  {
 +      /*
 +       * close() (proc_reg_release()) can't delete an entry and proceed:
 +       * ->release hook needs to be available at the right moment.
 +       *
 +       * rmmod (remove_proc_entry() et al) can't delete an entry and proceed:
 +       * "struct file" needs to be available at the right moment.
 +       *
 +       * Therefore, first process to enter this function does ->release() and
 +       * signals its completion to the other process which does nothing.
 +       */
        if (pdeo->closing) {
                /* somebody else is doing that, just wait */
                DECLARE_COMPLETION_ONSTACK(c);
                spin_lock(&pde->pde_unload_lock);
        } else {
                struct file *file;
 -              pdeo->closing = 1;
 +              pdeo->closing = true;
                spin_unlock(&pde->pde_unload_lock);
                file = pdeo->file;
                pde->proc_fops->release(file_inode(file), file);
                spin_lock(&pde->pde_unload_lock);
 -              list_del_init(&pdeo->lh);
 +              /* After ->release. */
 +              list_del(&pdeo->lh);
                if (pdeo->c)
                        complete(pdeo->c);
                kfree(pdeo);
@@@ -178,8 -167,6 +178,8 @@@ void proc_entry_rundown(struct proc_dir
        if (atomic_add_return(BIAS, &de->in_use) != BIAS)
                wait_for_completion(&c);
  
 +      /* ->pde_openers list can't grow from now on. */
 +
        spin_lock(&de->pde_unload_lock);
        while (!list_empty(&de->pde_openers)) {
                struct pde_opener *pdeo;
@@@ -325,17 -312,16 +325,17 @@@ static int proc_reg_open(struct inode *
        struct pde_opener *pdeo;
  
        /*
 -       * What for, you ask? Well, we can have open, rmmod, remove_proc_entry
 -       * sequence. ->release won't be called because ->proc_fops will be
 -       * cleared. Depending on complexity of ->release, consequences vary.
 +       * Ensure that
 +       * 1) PDE's ->release hook will be called no matter what
 +       *    either normally by close()/->release, or forcefully by
 +       *    rmmod/remove_proc_entry.
 +       *
 +       * 2) rmmod isn't blocked by opening file in /proc and sitting on
 +       *    the descriptor (including "rmmod foo </proc/foo" scenario).
         *
 -       * We can't wait for mercy when close will be done for real, it's
 -       * deadlockable: rmmod foo </proc/foo . So, we're going to do ->release
 -       * by hand in remove_proc_entry(). For this, save opener's credentials
 -       * for later.
 +       * Save every "struct file" with custom ->release hook.
         */
 -      pdeo = kzalloc(sizeof(struct pde_opener), GFP_KERNEL);
 +      pdeo = kmalloc(sizeof(struct pde_opener), GFP_KERNEL);
        if (!pdeo)
                return -ENOMEM;
  
        if (rv == 0 && release) {
                /* To know what to release. */
                pdeo->file = file;
 -              /* Strictly for "too late" ->release in proc_reg_release(). */
 +              pdeo->closing = false;
 +              pdeo->c = NULL;
                spin_lock(&pde->pde_unload_lock);
                list_add(&pdeo->lh, &pde->pde_openers);
                spin_unlock(&pde->pde_unload_lock);
@@@ -425,7 -410,6 +425,6 @@@ static const char *proc_get_link(struc
  }
  
  const struct inode_operations proc_link_inode_operations = {
-       .readlink       = generic_readlink,
        .get_link       = proc_get_link,
  };
  
diff --combined fs/ubifs/file.c
index aa0625f4f6427677f38f8c5d6e177f58572ad05d,a7eae181e1bdd892aa13939c1ccb2db4ec16726b..b0d783774c963c97f32df7649feeb8fb7e4e3e4f
@@@ -78,13 -78,6 +78,13 @@@ static int read_block(struct inode *ino
                goto dump;
  
        dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
 +
 +      if (ubifs_crypt_is_encrypted(inode)) {
 +              err = ubifs_decrypt(inode, dn, &dlen, block);
 +              if (err)
 +                      goto dump;
 +      }
 +
        out_len = UBIFS_BLOCK_SIZE;
        err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len,
                               le16_to_cpu(dn->compr_type));
@@@ -657,13 -650,6 +657,13 @@@ static int populate_page(struct ubifs_i
  
                        dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
                        out_len = UBIFS_BLOCK_SIZE;
 +
 +                      if (ubifs_crypt_is_encrypted(inode)) {
 +                              err = ubifs_decrypt(inode, dn, &dlen, page_block);
 +                              if (err)
 +                                      goto out_err;
 +                      }
 +
                        err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len,
                                               le16_to_cpu(dn->compr_type));
                        if (err || len != out_len)
@@@ -1608,15 -1594,6 +1608,15 @@@ static const struct vm_operations_struc
  static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
  {
        int err;
 +      struct inode *inode = file->f_mapping->host;
 +
 +      if (ubifs_crypt_is_encrypted(inode)) {
 +              err = fscrypt_get_encryption_info(inode);
 +              if (err)
 +                      return -EACCES;
 +              if (!fscrypt_has_encryption_key(inode))
 +                      return -ENOKEY;
 +      }
  
        err = generic_file_mmap(file, vma);
        if (err)
        return 0;
  }
  
 +static int ubifs_file_open(struct inode *inode, struct file *filp)
 +{
 +      int ret;
 +      struct dentry *dir;
 +      struct ubifs_info *c = inode->i_sb->s_fs_info;
 +
 +      if (ubifs_crypt_is_encrypted(inode)) {
 +              ret = fscrypt_get_encryption_info(inode);
 +              if (ret)
 +                      return -EACCES;
 +              if (!fscrypt_has_encryption_key(inode))
 +                      return -ENOKEY;
 +      }
 +
 +      dir = dget_parent(file_dentry(filp));
 +      if (ubifs_crypt_is_encrypted(d_inode(dir)) &&
 +                      !fscrypt_has_permitted_context(d_inode(dir), inode)) {
 +              ubifs_err(c, "Inconsistent encryption contexts: %lu/%lu",
 +                        (unsigned long) d_inode(dir)->i_ino,
 +                        (unsigned long) inode->i_ino);
 +              dput(dir);
 +              ubifs_ro_mode(c, -EPERM);
 +              return -EPERM;
 +      }
 +      dput(dir);
 +
 +      return 0;
 +}
 +
 +static const char *ubifs_get_link(struct dentry *dentry,
 +                                          struct inode *inode,
 +                                          struct delayed_call *done)
 +{
 +      int err;
 +      struct fscrypt_symlink_data *sd;
 +      struct ubifs_inode *ui = ubifs_inode(inode);
 +      struct fscrypt_str cstr;
 +      struct fscrypt_str pstr;
 +
 +      if (!ubifs_crypt_is_encrypted(inode))
 +              return ui->data;
 +
 +      if (!dentry)
 +              return ERR_PTR(-ECHILD);
 +
 +      err = fscrypt_get_encryption_info(inode);
 +      if (err)
 +              return ERR_PTR(err);
 +
 +      sd = (struct fscrypt_symlink_data *)ui->data;
 +      cstr.name = sd->encrypted_path;
 +      cstr.len = le16_to_cpu(sd->len);
 +
 +      if (cstr.len == 0)
 +              return ERR_PTR(-ENOENT);
 +
 +      if ((cstr.len + sizeof(struct fscrypt_symlink_data) - 1) > ui->data_len)
 +              return ERR_PTR(-EIO);
 +
 +      err = fscrypt_fname_alloc_buffer(inode, cstr.len, &pstr);
 +      if (err)
 +              return ERR_PTR(err);
 +
 +      err = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr);
 +      if (err) {
 +              fscrypt_fname_free_buffer(&pstr);
 +              return ERR_PTR(err);
 +      }
 +
 +      pstr.name[pstr.len] = '\0';
 +
 +      // XXX this probably won't happen anymore...
 +      if (pstr.name[0] == '\0') {
 +              fscrypt_fname_free_buffer(&pstr);
 +              return ERR_PTR(-ENOENT);
 +      }
 +
 +      set_delayed_call(done, kfree_link, pstr.name);
 +      return pstr.name;
 +}
 +
 +
  const struct address_space_operations ubifs_file_address_operations = {
        .readpage       = ubifs_readpage,
        .writepage      = ubifs_writepage,
@@@ -1733,8 -1628,7 +1733,7 @@@ const struct inode_operations ubifs_fil
  };
  
  const struct inode_operations ubifs_symlink_inode_operations = {
-       .readlink    = generic_readlink,
 -      .get_link    = simple_get_link,
 +      .get_link    = ubifs_get_link,
        .setattr     = ubifs_setattr,
        .getattr     = ubifs_getattr,
        .listxattr   = ubifs_listxattr,
@@@ -1752,7 -1646,6 +1751,7 @@@ const struct file_operations ubifs_file
        .unlocked_ioctl = ubifs_ioctl,
        .splice_read    = generic_file_splice_read,
        .splice_write   = iter_file_splice_write,
 +      .open           = ubifs_file_open,
  #ifdef CONFIG_COMPAT
        .compat_ioctl   = ubifs_compat_ioctl,
  #endif
diff --combined fs/xfs/xfs_ioctl.c
index fc563b82aea65a666aecd67f349046c06d68147b,9b12f7c993e9a7fe6df932dfb287d250c1ff6d3e..c67cfb451fd3a74dad2d365e372b4e5fa296eecd
@@@ -287,7 -287,7 +287,7 @@@ xfs_readlink_by_handle
                return PTR_ERR(dentry);
  
        /* Restrict this handle operation to symlinks only. */
-       if (!d_inode(dentry)->i_op->readlink) {
+       if (!d_is_symlink(dentry)) {
                error = -EINVAL;
                goto out_dput;
        }
                goto out_dput;
        }
  
-       error = d_inode(dentry)->i_op->readlink(dentry, hreq->ohandle, olen);
+       error = vfs_readlink(dentry, hreq->ohandle, olen);
  
   out_dput:
        dput(dentry);
@@@ -639,7 -639,7 +639,7 @@@ xfs_ioc_space
                return error;
  
        xfs_ilock(ip, iolock);
 -      error = xfs_break_layouts(inode, &iolock, false);
 +      error = xfs_break_layouts(inode, &iolock);
        if (error)
                goto out_unlock;
  
@@@ -910,14 -910,16 +910,14 @@@ xfs_ioc_fsgetxattr
        if (attr) {
                if (ip->i_afp) {
                        if (ip->i_afp->if_flags & XFS_IFEXTENTS)
 -                              fa.fsx_nextents = ip->i_afp->if_bytes /
 -                                                      sizeof(xfs_bmbt_rec_t);
 +                              fa.fsx_nextents = xfs_iext_count(ip->i_afp);
                        else
                                fa.fsx_nextents = ip->i_d.di_anextents;
                } else
                        fa.fsx_nextents = 0;
        } else {
                if (ip->i_df.if_flags & XFS_IFEXTENTS)
 -                      fa.fsx_nextents = ip->i_df.if_bytes /
 -                                              sizeof(xfs_bmbt_rec_t);
 +                      fa.fsx_nextents = xfs_iext_count(&ip->i_df);
                else
                        fa.fsx_nextents = ip->i_d.di_nextents;
        }
diff --combined fs/xfs/xfs_iops.c
index b930be0b1596592de8fc71aecbb25c033c513f17,de79f29c7e5be4da318dc08a562eeaedce9c3490..308bebb6dfd266f85ae225ef0c235128bb7b36ba
@@@ -983,13 -983,15 +983,13 @@@ xfs_vn_setattr
                struct xfs_inode        *ip = XFS_I(d_inode(dentry));
                uint                    iolock = XFS_IOLOCK_EXCL;
  
 -              xfs_ilock(ip, iolock);
 -              error = xfs_break_layouts(d_inode(dentry), &iolock, true);
 -              if (!error) {
 -                      xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
 -                      iolock |= XFS_MMAPLOCK_EXCL;
 +              error = xfs_break_layouts(d_inode(dentry), &iolock);
 +              if (error)
 +                      return error;
  
 -                      error = xfs_vn_setattr_size(dentry, iattr);
 -              }
 -              xfs_iunlock(ip, iolock);
 +              xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
 +              error = xfs_vn_setattr_size(dentry, iattr);
 +              xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
        } else {
                error = xfs_vn_setattr_nonsize(dentry, iattr);
        }
@@@ -1120,7 -1122,6 +1120,6 @@@ static const struct inode_operations xf
  };
  
  static const struct inode_operations xfs_symlink_inode_operations = {
-       .readlink               = generic_readlink,
        .get_link               = xfs_vn_get_link,
        .getattr                = xfs_vn_getattr,
        .setattr                = xfs_vn_setattr,
  };
  
  static const struct inode_operations xfs_inline_symlink_inode_operations = {
-       .readlink               = generic_readlink,
        .get_link               = xfs_vn_get_link_inline,
        .getattr                = xfs_vn_getattr,
        .setattr                = xfs_vn_setattr,
diff --combined include/linux/fs.h
index 358789650607b398e3e03078c171dc84a4dfd460,e343d784651a8ab5e817d3a22e3c8dced8b7f189..e6e4146bf9ae5ee9def7da466f369127895713df
@@@ -28,6 -28,7 +28,6 @@@
  #include <linux/uidgid.h>
  #include <linux/lockdep.h>
  #include <linux/percpu-rwsem.h>
 -#include <linux/blk_types.h>
  #include <linux/workqueue.h>
  #include <linux/percpu-rwsem.h>
  #include <linux/delayed_call.h>
@@@ -37,7 -38,6 +37,7 @@@
  
  struct backing_dev_info;
  struct bdi_writeback;
 +struct bio;
  struct export_operations;
  struct hd_geometry;
  struct iovec;
@@@ -151,6 -151,58 +151,6 @@@ typedef int (dio_iodone_t)(struct kioc
   */
  #define CHECK_IOVEC_ONLY -1
  
 -/*
 - * The below are the various read and write flags that we support. Some of
 - * them include behavioral modifiers that send information down to the
 - * block layer and IO scheduler. They should be used along with a req_op.
 - * Terminology:
 - *
 - *    The block layer uses device plugging to defer IO a little bit, in
 - *    the hope that we will see more IO very shortly. This increases
 - *    coalescing of adjacent IO and thus reduces the number of IOs we
 - *    have to send to the device. It also allows for better queuing,
 - *    if the IO isn't mergeable. If the caller is going to be waiting
 - *    for the IO, then he must ensure that the device is unplugged so
 - *    that the IO is dispatched to the driver.
 - *
 - *    All IO is handled async in Linux. This is fine for background
 - *    writes, but for reads or writes that someone waits for completion
 - *    on, we want to notify the block layer and IO scheduler so that they
 - *    know about it. That allows them to make better scheduling
 - *    decisions. So when the below references 'sync' and 'async', it
 - *    is referencing this priority hint.
 - *
 - * With that in mind, the available types are:
 - *
 - * READ                       A normal read operation. Device will be plugged.
 - * READ_SYNC          A synchronous read. Device is not plugged, caller can
 - *                    immediately wait on this read without caring about
 - *                    unplugging.
 - * WRITE              A normal async write. Device will be plugged.
 - * WRITE_SYNC         Synchronous write. Identical to WRITE, but passes down
 - *                    the hint that someone will be waiting on this IO
 - *                    shortly. The write equivalent of READ_SYNC.
 - * WRITE_ODIRECT      Special case write for O_DIRECT only.
 - * WRITE_FLUSH                Like WRITE_SYNC but with preceding cache flush.
 - * WRITE_FUA          Like WRITE_SYNC but data is guaranteed to be on
 - *                    non-volatile media on completion.
 - * WRITE_FLUSH_FUA    Combination of WRITE_FLUSH and FUA. The IO is preceded
 - *                    by a cache flush and data is guaranteed to be on
 - *                    non-volatile media on completion.
 - *
 - */
 -#define RW_MASK                       REQ_OP_WRITE
 -
 -#define READ                  REQ_OP_READ
 -#define WRITE                 REQ_OP_WRITE
 -
 -#define READ_SYNC             REQ_SYNC
 -#define WRITE_SYNC            (REQ_SYNC | REQ_NOIDLE)
 -#define WRITE_ODIRECT         REQ_SYNC
 -#define WRITE_FLUSH           (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH)
 -#define WRITE_FUA             (REQ_SYNC | REQ_NOIDLE | REQ_FUA)
 -#define WRITE_FLUSH_FUA               (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH | REQ_FUA)
 -
  /*
   * Attribute flags.  These should be or-ed together to figure out what
   * has been changed!
@@@ -543,6 -595,7 +543,7 @@@ is_uncached_acl(struct posix_acl *acl
  #define IOP_LOOKUP    0x0002
  #define IOP_NOFOLLOW  0x0004
  #define IOP_XATTR     0x0008
+ #define IOP_DEFAULT_READLINK  0x0010
  
  /*
   * Keep mostly read-only and often accessed (especially for
@@@ -1726,30 -1779,11 +1727,30 @@@ extern ssize_t vfs_writev(struct file *
                unsigned long, loff_t *, int);
  extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
                                   loff_t, size_t, unsigned int);
 +extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
 +                                    struct inode *inode_out, loff_t pos_out,
 +                                    u64 *len, bool is_dedupe);
  extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
                struct file *file_out, loff_t pos_out, u64 len);
 +extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
 +                                       struct inode *dest, loff_t destoff,
 +                                       loff_t len, bool *is_same);
  extern int vfs_dedupe_file_range(struct file *file,
                                 struct file_dedupe_range *same);
  
 +static inline int do_clone_file_range(struct file *file_in, loff_t pos_in,
 +                                    struct file *file_out, loff_t pos_out,
 +                                    u64 len)
 +{
 +      int ret;
 +
 +      sb_start_write(file_inode(file_out)->i_sb);
 +      ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len);
 +      sb_end_write(file_inode(file_out)->i_sb);
 +
 +      return ret;
 +}
 +
  struct super_operations {
        struct inode *(*alloc_inode)(struct super_block *sb);
        void (*destroy_inode)(struct inode *);
@@@ -2090,11 -2124,11 +2091,11 @@@ extern int may_umount_tree(struct vfsmo
  extern int may_umount(struct vfsmount *);
  extern long do_mount(const char *, const char __user *,
                     const char *, unsigned long, void *);
 -extern struct vfsmount *collect_mounts(struct path *);
 +extern struct vfsmount *collect_mounts(const struct path *);
  extern void drop_collected_mounts(struct vfsmount *);
  extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
                          struct vfsmount *);
 -extern int vfs_statfs(struct path *, struct kstatfs *);
 +extern int vfs_statfs(const struct path *, struct kstatfs *);
  extern int user_statfs(const char __user *, struct kstatfs *);
  extern int fd_statfs(int, struct kstatfs *);
  extern int vfs_ustat(dev_t, struct kstatfs *);
@@@ -2466,6 -2500,19 +2467,6 @@@ extern void make_bad_inode(struct inod
  extern bool is_bad_inode(struct inode *);
  
  #ifdef CONFIG_BLOCK
 -static inline bool op_is_write(unsigned int op)
 -{
 -      return op == REQ_OP_READ ? false : true;
 -}
 -
 -/*
 - * return data direction, READ or WRITE
 - */
 -static inline int bio_data_dir(struct bio *bio)
 -{
 -      return op_is_write(bio_op(bio)) ? WRITE : READ;
 -}
 -
  extern void check_disk_size_change(struct gendisk *disk,
                                   struct block_device *bdev);
  extern int revalidate_disk(struct gendisk *);
@@@ -2663,7 -2710,7 +2664,7 @@@ extern struct file * open_exec(const ch
   
  /* fs/dcache.c -- generic fs support functions */
  extern bool is_subdir(struct dentry *, struct dentry *);
 -extern bool path_is_under(struct path *, struct path *);
 +extern bool path_is_under(const struct path *, const struct path *);
  
  extern char *file_path(struct file *, char *, int);
  
@@@ -2736,6 -2783,7 +2737,6 @@@ static inline void remove_inode_hash(st
  extern void inode_sb_list_add(struct inode *inode);
  
  #ifdef CONFIG_BLOCK
 -extern blk_qc_t submit_bio(struct bio *);
  extern int bdev_read_only(struct block_device *);
  #endif
  extern int set_blocksize(struct block_device *, int);
@@@ -2867,7 -2915,6 +2868,6 @@@ extern int __page_symlink(struct inode 
  extern int page_symlink(struct inode *inode, const char *symname, int len);
  extern const struct inode_operations page_symlink_inode_operations;
  extern void kfree_link(void *);
- extern int generic_readlink(struct dentry *, char __user *, int);
  extern void generic_fillattr(struct inode *, struct kstat *);
  int vfs_getattr_nosec(struct path *path, struct kstat *stat);
  extern int vfs_getattr(struct path *, struct kstat *);
@@@ -2888,6 -2935,7 +2888,7 @@@ extern int vfs_lstat(const char __user 
  extern int vfs_fstat(unsigned int, struct kstat *);
  extern int vfs_fstatat(int , const char __user *, struct kstat *, int);
  extern const char *vfs_get_link(struct dentry *, struct delayed_call *);
+ extern int vfs_readlink(struct dentry *, char __user *, int);
  
  extern int __generic_block_fiemap(struct inode *inode,
                                  struct fiemap_extent_info *fieinfo,
diff --combined mm/shmem.c
index 54287d443806243fba2efa67e6b5ec71f3a4f56d,aa47e6baecde992df81f29dbd4b86d5834cd11a0..b1b20dc63265029e0f5e77221d8a2d851032992f
@@@ -300,19 -300,18 +300,19 @@@ void shmem_uncharge(struct inode *inode
  static int shmem_radix_tree_replace(struct address_space *mapping,
                        pgoff_t index, void *expected, void *replacement)
  {
 +      struct radix_tree_node *node;
        void **pslot;
        void *item;
  
        VM_BUG_ON(!expected);
        VM_BUG_ON(!replacement);
 -      pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
 -      if (!pslot)
 +      item = __radix_tree_lookup(&mapping->page_tree, index, &node, &pslot);
 +      if (!item)
                return -ENOENT;
 -      item = radix_tree_deref_slot_protected(pslot, &mapping->tree_lock);
        if (item != expected)
                return -ENOENT;
 -      radix_tree_replace_slot(pslot, replacement);
 +      __radix_tree_replace(&mapping->page_tree, node, pslot,
 +                           replacement, NULL, NULL);
        return 0;
  }
  
@@@ -371,7 -370,6 +371,7 @@@ static bool shmem_confirm_swap(struct a
  
  int shmem_huge __read_mostly;
  
 +#if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
  static int shmem_parse_huge(const char *str)
  {
        if (!strcmp(str, "never"))
@@@ -409,7 -407,6 +409,7 @@@ static const char *shmem_format_huge(in
                return "bad_val";
        }
  }
 +#endif
  
  static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
                struct shrink_control *sc, unsigned long nr_to_split)
@@@ -661,8 -658,8 +661,8 @@@ unsigned long shmem_partial_swap_usage(
                        swapped++;
  
                if (need_resched()) {
 +                      slot = radix_tree_iter_resume(slot, &iter);
                        cond_resched_rcu();
 -                      slot = radix_tree_iter_next(&iter);
                }
        }
  
@@@ -1049,30 -1046,6 +1049,30 @@@ static void shmem_evict_inode(struct in
        clear_inode(inode);
  }
  
 +static unsigned long find_swap_entry(struct radix_tree_root *root, void *item)
 +{
 +      struct radix_tree_iter iter;
 +      void **slot;
 +      unsigned long found = -1;
 +      unsigned int checked = 0;
 +
 +      rcu_read_lock();
 +      radix_tree_for_each_slot(slot, root, &iter, 0) {
 +              if (*slot == item) {
 +                      found = iter.index;
 +                      break;
 +              }
 +              checked++;
 +              if ((checked % 4096) != 0)
 +                      continue;
 +              slot = radix_tree_iter_resume(slot, &iter);
 +              cond_resched_rcu();
 +      }
 +
 +      rcu_read_unlock();
 +      return found;
 +}
 +
  /*
   * If swap found in inode, free it and move page from swapcache to filecache.
   */
@@@ -1086,7 -1059,7 +1086,7 @@@ static int shmem_unuse_inode(struct shm
        int error = 0;
  
        radswap = swp_to_radix_entry(swap);
 -      index = radix_tree_locate_item(&mapping->page_tree, radswap);
 +      index = find_swap_entry(&mapping->page_tree, radswap);
        if (index == -1)
                return -EAGAIN; /* tell shmem_unuse we found nothing */
  
@@@ -1566,7 -1539,7 +1566,7 @@@ static int shmem_getpage_gfp(struct ino
        struct mm_struct *fault_mm, int *fault_type)
  {
        struct address_space *mapping = inode->i_mapping;
 -      struct shmem_inode_info *info;
 +      struct shmem_inode_info *info = SHMEM_I(inode);
        struct shmem_sb_info *sbinfo;
        struct mm_struct *charge_mm;
        struct mem_cgroup *memcg;
@@@ -1616,6 -1589,7 +1616,6 @@@ repeat
         * Fast cache lookup did not find it:
         * bring it back from swap or allocate.
         */
 -      info = SHMEM_I(inode);
        sbinfo = SHMEM_SB(inode->i_sb);
        charge_mm = fault_mm ? : current->mm;
  
@@@ -1863,6 -1837,7 +1863,6 @@@ unlock
                put_page(page);
        }
        if (error == -ENOSPC && !once++) {
 -              info = SHMEM_I(inode);
                spin_lock_irq(&info->lock);
                shmem_recalc_inode(inode);
                spin_unlock_irq(&info->lock);
        return error;
  }
  
 +/*
 + * This is like autoremove_wake_function, but it removes the wait queue
 + * entry unconditionally - even if something else had already woken the
 + * target.
 + */
 +static int synchronous_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
 +{
 +      int ret = default_wake_function(wait, mode, sync, key);
 +      list_del_init(&wait->task_list);
 +      return ret;
 +}
 +
  static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  {
        struct inode *inode = file_inode(vma->vm_file);
                    vmf->pgoff >= shmem_falloc->start &&
                    vmf->pgoff < shmem_falloc->next) {
                        wait_queue_head_t *shmem_falloc_waitq;
 -                      DEFINE_WAIT(shmem_fault_wait);
 +                      DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
  
                        ret = VM_FAULT_NOPAGE;
                        if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
@@@ -2471,8 -2434,8 +2471,8 @@@ static void shmem_tag_pins(struct addre
                }
  
                if (need_resched()) {
 +                      slot = radix_tree_iter_resume(slot, &iter);
                        cond_resched_rcu();
 -                      slot = radix_tree_iter_next(&iter);
                }
        }
        rcu_read_unlock();
@@@ -2541,8 -2504,8 +2541,8 @@@ static int shmem_wait_for_pins(struct a
                        spin_unlock_irq(&mapping->tree_lock);
  continue_resched:
                        if (need_resched()) {
 +                              slot = radix_tree_iter_resume(slot, &iter);
                                cond_resched_rcu();
 -                              slot = radix_tree_iter_next(&iter);
                        }
                }
                rcu_read_unlock();
@@@ -2702,7 -2665,6 +2702,7 @@@ static long shmem_fallocate(struct fil
                spin_lock(&inode->i_lock);
                inode->i_private = NULL;
                wake_up_all(&shmem_falloc_waitq);
 +              WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.task_list));
                spin_unlock(&inode->i_lock);
                error = 0;
                goto out;
@@@ -3212,7 -3174,6 +3212,6 @@@ static ssize_t shmem_listxattr(struct d
  #endif /* CONFIG_TMPFS_XATTR */
  
  static const struct inode_operations shmem_short_symlink_operations = {
-       .readlink       = generic_readlink,
        .get_link       = simple_get_link,
  #ifdef CONFIG_TMPFS_XATTR
        .listxattr      = shmem_listxattr,
  };
  
  static const struct inode_operations shmem_symlink_inode_operations = {
-       .readlink       = generic_readlink,
        .get_link       = shmem_get_link,
  #ifdef CONFIG_TMPFS_XATTR
        .listxattr      = shmem_listxattr,
This page took 0.300119 seconds and 4 git commands to generate.