]> Git Repo - linux.git/commitdiff
Merge tag 'for-5.13-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
authorLinus Torvalds <[email protected]>
Mon, 17 May 2021 16:55:10 +0000 (09:55 -0700)
committerLinus Torvalds <[email protected]>
Mon, 17 May 2021 16:55:10 +0000 (09:55 -0700)
Pull btrfs fixes from David Sterba:
 "A few more fixes:

   - fix fiemap to print extents that could get misreported due to
     internal extent splitting and logical merging for fiemap output

   - fix RCU stalls during delayed iputs

   - fix removed dentries still existing after log is synced"

* tag 'for-5.13-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: fix removed dentries still existing after log is synced
  btrfs: return whole extents in fiemap
  btrfs: avoid RCU stalls while running delayed iputs
  btrfs: return 0 for dev_extent_hole_check_zoned hole_start in case of error

1  2 
fs/btrfs/extent_io.c
fs/btrfs/inode.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c

diff --combined fs/btrfs/extent_io.c
index 074a78a202b8363b5aee1a74d33cf200b0b164e4,360d997c722632fe73b5b4aab55698ecab9bebe5..83b9c64ba76e63a3e2a9f41ae2cda5bec7c3fd4f
@@@ -3421,12 -3421,15 +3421,12 @@@ int btrfs_do_readpage(struct page *page
        }
  
        if (page->index == last_byte >> PAGE_SHIFT) {
 -              char *userpage;
                size_t zero_offset = offset_in_page(last_byte);
  
                if (zero_offset) {
                        iosize = PAGE_SIZE - zero_offset;
 -                      userpage = kmap_atomic(page);
 -                      memset(userpage + zero_offset, 0, iosize);
 +                      memzero_page(page, zero_offset, iosize);
                        flush_dcache_page(page);
 -                      kunmap_atomic(userpage);
                }
        }
        begin_page_read(fs_info, page);
                u64 disk_bytenr;
  
                if (cur >= last_byte) {
 -                      char *userpage;
                        struct extent_state *cached = NULL;
  
                        iosize = PAGE_SIZE - pg_offset;
 -                      userpage = kmap_atomic(page);
 -                      memset(userpage + pg_offset, 0, iosize);
 +                      memzero_page(page, pg_offset, iosize);
                        flush_dcache_page(page);
 -                      kunmap_atomic(userpage);
                        set_extent_uptodate(tree, cur, cur + iosize - 1,
                                            &cached, GFP_NOFS);
                        unlock_extent_cached(tree, cur,
  
                /* we've found a hole, just zero and go on */
                if (block_start == EXTENT_MAP_HOLE) {
 -                      char *userpage;
                        struct extent_state *cached = NULL;
  
 -                      userpage = kmap_atomic(page);
 -                      memset(userpage + pg_offset, 0, iosize);
 +                      memzero_page(page, pg_offset, iosize);
                        flush_dcache_page(page);
 -                      kunmap_atomic(userpage);
  
                        set_extent_uptodate(tree, cur, cur + iosize - 1,
                                            &cached, GFP_NOFS);
@@@ -3836,7 -3845,12 +3836,7 @@@ static int __extent_writepage(struct pa
        }
  
        if (page->index == end_index) {
 -              char *userpage;
 -
 -              userpage = kmap_atomic(page);
 -              memset(userpage + pg_offset, 0,
 -                     PAGE_SIZE - pg_offset);
 -              kunmap_atomic(userpage);
 +              memzero_page(page, pg_offset, PAGE_SIZE - pg_offset);
                flush_dcache_page(page);
        }
  
@@@ -5196,7 -5210,7 +5196,7 @@@ int extent_fiemap(struct btrfs_inode *i
                  u64 start, u64 len)
  {
        int ret = 0;
-       u64 off = start;
+       u64 off;
        u64 max = start + len;
        u32 flags = 0;
        u32 found_type;
                goto out_free_ulist;
        }
  
+       /*
+        * We can't initialize that to 'start' as this could miss extents due
+        * to extent item merging
+        */
+       off = 0;
        start = round_down(start, btrfs_inode_sectorsize(inode));
        len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
  
diff --combined fs/btrfs/inode.c
index eb6fddf40841c7c43adf67f92e51067c3abba365,095e452f59f0f57d47e0f513641550a00fa155cc..c6164ae16e2a4fc794681ce8c0058282a7596bb9
@@@ -646,12 -646,17 +646,12 @@@ again
                if (!ret) {
                        unsigned long offset = offset_in_page(total_compressed);
                        struct page *page = pages[nr_pages - 1];
 -                      char *kaddr;
  
                        /* zero the tail end of the last page, we might be
                         * sending it down to disk
                         */
 -                      if (offset) {
 -                              kaddr = kmap_atomic(page);
 -                              memset(kaddr + offset, 0,
 -                                     PAGE_SIZE - offset);
 -                              kunmap_atomic(kaddr);
 -                      }
 +                      if (offset)
 +                              memzero_page(page, offset, PAGE_SIZE - offset);
                        will_compress = 1;
                }
        }
@@@ -3241,6 -3246,7 +3241,7 @@@ void btrfs_run_delayed_iputs(struct btr
                inode = list_first_entry(&fs_info->delayed_iputs,
                                struct btrfs_inode, delayed_iput);
                run_delayed_iput_locked(fs_info, inode);
+               cond_resched_lock(&fs_info->delayed_iput_lock);
        }
        spin_unlock(&fs_info->delayed_iput_lock);
  }
@@@ -4828,6 -4834,7 +4829,6 @@@ int btrfs_truncate_block(struct btrfs_i
        struct btrfs_ordered_extent *ordered;
        struct extent_state *cached_state = NULL;
        struct extent_changeset *data_reserved = NULL;
 -      char *kaddr;
        bool only_release_metadata = false;
        u32 blocksize = fs_info->sectorsize;
        pgoff_t index = from >> PAGE_SHIFT;
@@@ -4919,13 -4926,15 +4920,13 @@@ again
        if (offset != blocksize) {
                if (!len)
                        len = blocksize - offset;
 -              kaddr = kmap(page);
                if (front)
 -                      memset(kaddr + (block_start - page_offset(page)),
 -                              0, offset);
 +                      memzero_page(page, (block_start - page_offset(page)),
 +                                   offset);
                else
 -                      memset(kaddr + (block_start - page_offset(page)) +  offset,
 -                              0, len);
 +                      memzero_page(page, (block_start - page_offset(page)) + offset,
 +                                   len);
                flush_dcache_page(page);
 -              kunmap(page);
        }
        ClearPageChecked(page);
        set_page_dirty(page);
@@@ -6824,9 -6833,11 +6825,9 @@@ static noinline int uncompress_inline(s
         * cover that region here.
         */
  
 -      if (max_size + pg_offset < PAGE_SIZE) {
 -              char *map = kmap(page);
 -              memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset);
 -              kunmap(page);
 -      }
 +      if (max_size + pg_offset < PAGE_SIZE)
 +              memzero_page(page,  pg_offset + max_size,
 +                           PAGE_SIZE - max_size - pg_offset);
        kfree(tmp);
        return ret;
  }
@@@ -8496,6 -8507,7 +8497,6 @@@ vm_fault_t btrfs_page_mkwrite(struct vm
        struct btrfs_ordered_extent *ordered;
        struct extent_state *cached_state = NULL;
        struct extent_changeset *data_reserved = NULL;
 -      char *kaddr;
        unsigned long zero_start;
        loff_t size;
        vm_fault_t ret;
@@@ -8609,8 -8621,10 +8610,8 @@@ again
                zero_start = PAGE_SIZE;
  
        if (zero_start != PAGE_SIZE) {
 -              kaddr = kmap(page);
 -              memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start);
 +              memzero_page(page, zero_start, PAGE_SIZE - zero_start);
                flush_dcache_page(page);
 -              kunmap(page);
        }
        ClearPageChecked(page);
        set_page_dirty(page);
@@@ -10607,8 -10621,6 +10608,8 @@@ static const struct inode_operations bt
        .set_acl        = btrfs_set_acl,
        .update_time    = btrfs_update_time,
        .tmpfile        = btrfs_tmpfile,
 +      .fileattr_get   = btrfs_fileattr_get,
 +      .fileattr_set   = btrfs_fileattr_set,
  };
  
  static const struct file_operations btrfs_dir_file_operations = {
@@@ -10662,8 -10674,6 +10663,8 @@@ static const struct inode_operations bt
        .get_acl        = btrfs_get_acl,
        .set_acl        = btrfs_set_acl,
        .update_time    = btrfs_update_time,
 +      .fileattr_get   = btrfs_fileattr_get,
 +      .fileattr_set   = btrfs_fileattr_set,
  };
  static const struct inode_operations btrfs_special_inode_operations = {
        .getattr        = btrfs_getattr,
diff --combined fs/btrfs/tree-log.c
index 95a600034d61283752969105450e57e954f2ac99,fd6b1f13112ed1ba876659f9fb0f3b89cd05ad55..14ec61048483918901fb051d12dd395258fbc149
@@@ -4138,8 -4138,7 +4138,8 @@@ static noinline int copy_items(struct b
        return ret;
  }
  
 -static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
 +static int extent_cmp(void *priv, const struct list_head *a,
 +                    const struct list_head *b)
  {
        struct extent_map *em1, *em2;
  
@@@ -6463,6 -6462,24 +6463,24 @@@ void btrfs_log_new_name(struct btrfs_tr
            (!old_dir || old_dir->logged_trans < trans->transid))
                return;
  
+       /*
+        * If we are doing a rename (old_dir is not NULL) from a directory that
+        * was previously logged, make sure the next log attempt on the directory
+        * is not skipped and logs the inode again. This is because the log may
+        * not currently be authoritative for a range including the old
+        * BTRFS_DIR_ITEM_KEY and BTRFS_DIR_INDEX_KEY keys, so we want to make
+        * sure after a log replay we do not end up with both the new and old
+        * dentries around (in case the inode is a directory we would have a
+        * directory with two hard links and 2 inode references for different
+        * parents). The next log attempt of old_dir will happen at
+        * btrfs_log_all_parents(), called through btrfs_log_inode_parent()
+        * below, because we have previously set inode->last_unlink_trans to the
+        * current transaction ID, either here or at btrfs_record_unlink_dir() in
+        * case inode is a directory.
+        */
+       if (old_dir)
+               old_dir->logged_trans = 0;
        btrfs_init_log_ctx(&ctx, &inode->vfs_inode);
        ctx.logging_new_name = true;
        /*
diff --combined fs/btrfs/volumes.c
index 9a1ead0c4a3113b24b0a7a658cf20cd84778e7af,bc53939fef48f4762a0246ea1536c3dd0b969fb9..47d27059d06411a78d8cc1d2fff1675ee7656a0d
@@@ -1224,8 -1224,7 +1224,8 @@@ static int open_fs_devices(struct btrfs
        return 0;
  }
  
 -static int devid_cmp(void *priv, struct list_head *a, struct list_head *b)
 +static int devid_cmp(void *priv, const struct list_head *a,
 +                   const struct list_head *b)
  {
        struct btrfs_device *dev1, *dev2;
  
@@@ -1459,7 -1458,7 +1459,7 @@@ static bool dev_extent_hole_check_zoned
                /* Given hole range was invalid (outside of device) */
                if (ret == -ERANGE) {
                        *hole_start += *hole_size;
-                       *hole_size = false;
+                       *hole_size = 0;
                        return true;
                }
  
This page took 0.123104 seconds and 4 git commands to generate.