]> Git Repo - linux.git/commitdiff
Merge tag 'for-5.13-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
authorLinus Torvalds <[email protected]>
Fri, 21 May 2021 23:24:12 +0000 (13:24 -1000)
committerLinus Torvalds <[email protected]>
Fri, 21 May 2021 23:24:12 +0000 (13:24 -1000)
Pull btrfs fixes from David Sterba:
 "A few more fixes:

   - fix unaligned compressed writes in zoned mode

   - fix false positive lockdep warning when cloning inline extent

   - remove wrong BUG_ON in tree-log error handling"

* tag 'for-5.13-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: zoned: fix parallel compressed writes
  btrfs: zoned: pass start block to btrfs_use_zone_append
  btrfs: do not BUG_ON in link_to_fixup_dir
  btrfs: release path before starting transaction when cloning inline extent

1  2 
fs/btrfs/compression.c
fs/btrfs/extent_io.c
fs/btrfs/inode.c
fs/btrfs/reflink.c
fs/btrfs/tree-log.c

diff --combined fs/btrfs/compression.c
index 2bea01d23a5b57352cb659bbd5a20aa06017fc89,91743a0b34c5162a314d613336d02070f310471d..d17ac301032e42d643663ceecae7e98a350f76a6
@@@ -28,6 -28,7 +28,7 @@@
  #include "compression.h"
  #include "extent_io.h"
  #include "extent_map.h"
+ #include "zoned.h"
  
  static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
  
@@@ -349,6 -350,7 +350,7 @@@ static void end_compressed_bio_write(st
         */
        inode = cb->inode;
        cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
+       btrfs_record_physical_zoned(inode, cb->start, bio);
        btrfs_writepage_endio_finish_ordered(cb->compressed_pages[0],
                        cb->start, cb->start + cb->len - 1,
                        bio->bi_status == BLK_STS_OK);
@@@ -401,6 -403,8 +403,8 @@@ blk_status_t btrfs_submit_compressed_wr
        u64 first_byte = disk_start;
        blk_status_t ret;
        int skip_sum = inode->flags & BTRFS_INODE_NODATASUM;
+       const bool use_append = btrfs_use_zone_append(inode, disk_start);
+       const unsigned int bio_op = use_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE;
  
        WARN_ON(!PAGE_ALIGNED(start));
        cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
        cb->nr_pages = nr_pages;
  
        bio = btrfs_bio_alloc(first_byte);
-       bio->bi_opf = REQ_OP_WRITE | write_flags;
+       bio->bi_opf = bio_op | write_flags;
        bio->bi_private = cb;
        bio->bi_end_io = end_compressed_bio_write;
  
+       if (use_append) {
+               struct extent_map *em;
+               struct map_lookup *map;
+               struct block_device *bdev;
+               em = btrfs_get_chunk_map(fs_info, disk_start, PAGE_SIZE);
+               if (IS_ERR(em)) {
+                       kfree(cb);
+                       bio_put(bio);
+                       return BLK_STS_NOTSUPP;
+               }
+               map = em->map_lookup;
+               /* We only support single profile for now */
+               ASSERT(map->num_stripes == 1);
+               bdev = map->stripes[0].dev->bdev;
+               bio_set_dev(bio, bdev);
+               free_extent_map(em);
+       }
        if (blkcg_css) {
                bio->bi_opf |= REQ_CGROUP_PUNT;
                kthread_associate_blkcg(blkcg_css);
        bytes_left = compressed_len;
        for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
                int submit = 0;
+               int len;
  
                page = compressed_pages[pg_index];
                page->mapping = inode->vfs_inode.i_mapping;
                        submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
                                                          0);
  
+               if (pg_index == 0 && use_append)
+                       len = bio_add_zone_append_page(bio, page, PAGE_SIZE, 0);
+               else
+                       len = bio_add_page(bio, page, PAGE_SIZE, 0);
                page->mapping = NULL;
-               if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
-                   PAGE_SIZE) {
+               if (submit || len < PAGE_SIZE) {
                        /*
                         * inc the count before we submit the bio so
                         * we know the end IO handler won't happen before
                        }
  
                        bio = btrfs_bio_alloc(first_byte);
-                       bio->bi_opf = REQ_OP_WRITE | write_flags;
+                       bio->bi_opf = bio_op | write_flags;
                        bio->bi_private = cb;
                        bio->bi_end_io = end_compressed_bio_write;
                        if (blkcg_css)
                                bio->bi_opf |= REQ_CGROUP_PUNT;
+                       /*
+                        * Use bio_add_page() to ensure the bio has at least one
+                        * page.
+                        */
                        bio_add_page(bio, page, PAGE_SIZE, 0);
                }
                if (bytes_left < PAGE_SIZE) {
@@@ -591,13 -625,16 +625,13 @@@ static noinline int add_ra_bio_pages(st
                free_extent_map(em);
  
                if (page->index == end_index) {
 -                      char *userpage;
                        size_t zero_offset = offset_in_page(isize);
  
                        if (zero_offset) {
                                int zeros;
                                zeros = PAGE_SIZE - zero_offset;
 -                              userpage = kmap_atomic(page);
 -                              memset(userpage + zero_offset, 0, zeros);
 +                              memzero_page(page, zero_offset, zeros);
                                flush_dcache_page(page);
 -                              kunmap_atomic(userpage);
                        }
                }
  
diff --combined fs/btrfs/extent_io.c
index 83b9c64ba76e63a3e2a9f41ae2cda5bec7c3fd4f,d9f20ca3ac7dbe3a3a47c22e38256a69faeebb93..dee2dafbc872f379aa2fc861a822104aea7768f0
@@@ -3421,12 -3421,15 +3421,12 @@@ int btrfs_do_readpage(struct page *page
        }
  
        if (page->index == last_byte >> PAGE_SHIFT) {
 -              char *userpage;
                size_t zero_offset = offset_in_page(last_byte);
  
                if (zero_offset) {
                        iosize = PAGE_SIZE - zero_offset;
 -                      userpage = kmap_atomic(page);
 -                      memset(userpage + zero_offset, 0, iosize);
 +                      memzero_page(page, zero_offset, iosize);
                        flush_dcache_page(page);
 -                      kunmap_atomic(userpage);
                }
        }
        begin_page_read(fs_info, page);
                u64 disk_bytenr;
  
                if (cur >= last_byte) {
 -                      char *userpage;
                        struct extent_state *cached = NULL;
  
                        iosize = PAGE_SIZE - pg_offset;
 -                      userpage = kmap_atomic(page);
 -                      memset(userpage + pg_offset, 0, iosize);
 +                      memzero_page(page, pg_offset, iosize);
                        flush_dcache_page(page);
 -                      kunmap_atomic(userpage);
                        set_extent_uptodate(tree, cur, cur + iosize - 1,
                                            &cached, GFP_NOFS);
                        unlock_extent_cached(tree, cur,
  
                /* we've found a hole, just zero and go on */
                if (block_start == EXTENT_MAP_HOLE) {
 -                      char *userpage;
                        struct extent_state *cached = NULL;
  
 -                      userpage = kmap_atomic(page);
 -                      memset(userpage + pg_offset, 0, iosize);
 +                      memzero_page(page, pg_offset, iosize);
                        flush_dcache_page(page);
 -                      kunmap_atomic(userpage);
  
                        set_extent_uptodate(tree, cur, cur + iosize - 1,
                                            &cached, GFP_NOFS);
@@@ -3753,7 -3762,7 +3753,7 @@@ static noinline_for_stack int __extent_
                /* Note that em_end from extent_map_end() is exclusive */
                iosize = min(em_end, end + 1) - cur;
  
-               if (btrfs_use_zone_append(inode, em))
+               if (btrfs_use_zone_append(inode, em->block_start))
                        opf = REQ_OP_ZONE_APPEND;
  
                free_extent_map(em);
@@@ -3836,7 -3845,12 +3836,7 @@@ static int __extent_writepage(struct pa
        }
  
        if (page->index == end_index) {
 -              char *userpage;
 -
 -              userpage = kmap_atomic(page);
 -              memset(userpage + pg_offset, 0,
 -                     PAGE_SIZE - pg_offset);
 -              kunmap_atomic(userpage);
 +              memzero_page(page, pg_offset, PAGE_SIZE - pg_offset);
                flush_dcache_page(page);
        }
  
diff --combined fs/btrfs/inode.c
index c6164ae16e2a4fc794681ce8c0058282a7596bb9,bb4ab408d67017fbbbf2743a867f1ab2390e3311..33f14573f2ecd8fbf1e3d15619786842786abfce
@@@ -646,12 -646,17 +646,12 @@@ again
                if (!ret) {
                        unsigned long offset = offset_in_page(total_compressed);
                        struct page *page = pages[nr_pages - 1];
 -                      char *kaddr;
  
                        /* zero the tail end of the last page, we might be
                         * sending it down to disk
                         */
 -                      if (offset) {
 -                              kaddr = kmap_atomic(page);
 -                              memset(kaddr + offset, 0,
 -                                     PAGE_SIZE - offset);
 -                              kunmap_atomic(kaddr);
 -                      }
 +                      if (offset)
 +                              memzero_page(page, offset, PAGE_SIZE - offset);
                        will_compress = 1;
                }
        }
@@@ -4829,6 -4834,7 +4829,6 @@@ int btrfs_truncate_block(struct btrfs_i
        struct btrfs_ordered_extent *ordered;
        struct extent_state *cached_state = NULL;
        struct extent_changeset *data_reserved = NULL;
 -      char *kaddr;
        bool only_release_metadata = false;
        u32 blocksize = fs_info->sectorsize;
        pgoff_t index = from >> PAGE_SHIFT;
@@@ -4920,13 -4926,15 +4920,13 @@@ again
        if (offset != blocksize) {
                if (!len)
                        len = blocksize - offset;
 -              kaddr = kmap(page);
                if (front)
 -                      memset(kaddr + (block_start - page_offset(page)),
 -                              0, offset);
 +                      memzero_page(page, (block_start - page_offset(page)),
 +                                   offset);
                else
 -                      memset(kaddr + (block_start - page_offset(page)) +  offset,
 -                              0, len);
 +                      memzero_page(page, (block_start - page_offset(page)) + offset,
 +                                   len);
                flush_dcache_page(page);
 -              kunmap(page);
        }
        ClearPageChecked(page);
        set_page_dirty(page);
@@@ -6825,9 -6833,11 +6825,9 @@@ static noinline int uncompress_inline(s
         * cover that region here.
         */
  
 -      if (max_size + pg_offset < PAGE_SIZE) {
 -              char *map = kmap(page);
 -              memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset);
 -              kunmap(page);
 -      }
 +      if (max_size + pg_offset < PAGE_SIZE)
 +              memzero_page(page,  pg_offset + max_size,
 +                           PAGE_SIZE - max_size - pg_offset);
        kfree(tmp);
        return ret;
  }
@@@ -7786,7 -7796,7 +7786,7 @@@ static int btrfs_dio_iomap_begin(struc
        iomap->bdev = fs_info->fs_devices->latest_bdev;
        iomap->length = len;
  
-       if (write && btrfs_use_zone_append(BTRFS_I(inode), em))
+       if (write && btrfs_use_zone_append(BTRFS_I(inode), em->block_start))
                iomap->flags |= IOMAP_F_ZONE_APPEND;
  
        free_extent_map(em);
@@@ -8497,6 -8507,7 +8497,6 @@@ vm_fault_t btrfs_page_mkwrite(struct vm
        struct btrfs_ordered_extent *ordered;
        struct extent_state *cached_state = NULL;
        struct extent_changeset *data_reserved = NULL;
 -      char *kaddr;
        unsigned long zero_start;
        loff_t size;
        vm_fault_t ret;
@@@ -8610,8 -8621,10 +8610,8 @@@ again
                zero_start = PAGE_SIZE;
  
        if (zero_start != PAGE_SIZE) {
 -              kaddr = kmap(page);
 -              memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start);
 +              memzero_page(page, zero_start, PAGE_SIZE - zero_start);
                flush_dcache_page(page);
 -              kunmap(page);
        }
        ClearPageChecked(page);
        set_page_dirty(page);
@@@ -10608,8 -10621,6 +10608,8 @@@ static const struct inode_operations bt
        .set_acl        = btrfs_set_acl,
        .update_time    = btrfs_update_time,
        .tmpfile        = btrfs_tmpfile,
 +      .fileattr_get   = btrfs_fileattr_get,
 +      .fileattr_set   = btrfs_fileattr_set,
  };
  
  static const struct file_operations btrfs_dir_file_operations = {
@@@ -10663,8 -10674,6 +10663,8 @@@ static const struct inode_operations bt
        .get_acl        = btrfs_get_acl,
        .set_acl        = btrfs_set_acl,
        .update_time    = btrfs_update_time,
 +      .fileattr_get   = btrfs_fileattr_get,
 +      .fileattr_set   = btrfs_fileattr_set,
  };
  static const struct inode_operations btrfs_special_inode_operations = {
        .getattr        = btrfs_getattr,
diff --combined fs/btrfs/reflink.c
index 3928ecc40d7b07d831e342ce61f51165c20d2f1d,06682128d8fae61d978bf3e35b7f7e5356a4ecdf..d434dc78dadf5ccf37d32cf0be97d9b17d19b8c5
@@@ -129,8 -129,12 +129,8 @@@ static int copy_inline_to_page(struct b
         * So what's in the range [500, 4095] corresponds to zeroes.
         */
        if (datal < block_size) {
 -              char *map;
 -
 -              map = kmap(page);
 -              memset(map + datal, 0, block_size - datal);
 +              memzero_page(page, datal, block_size - datal);
                flush_dcache_page(page);
 -              kunmap(page);
        }
  
        SetPageUptodate(page);
@@@ -281,6 -285,11 +281,11 @@@ copy_inline_extent
        ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end);
  out:
        if (!ret && !trans) {
+               /*
+                * Release path before starting a new transaction so we don't
+                * hold locks that would confuse lockdep.
+                */
+               btrfs_release_path(path);
                /*
                 * No transaction here means we copied the inline extent into a
                 * page of the destination inode.
diff --combined fs/btrfs/tree-log.c
index 14ec61048483918901fb051d12dd395258fbc149,c17d6b827b42e5024a9313c88278614aa3b0b6ab..326be57f282816dabc8766af26d0722940531aef
@@@ -1858,8 -1858,6 +1858,6 @@@ static noinline int link_to_fixup_dir(s
                ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
        } else if (ret == -EEXIST) {
                ret = 0;
-       } else {
-               BUG(); /* Logic Error */
        }
        iput(inode);
  
@@@ -4138,8 -4136,7 +4136,8 @@@ static noinline int copy_items(struct b
        return ret;
  }
  
 -static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
 +static int extent_cmp(void *priv, const struct list_head *a,
 +                    const struct list_head *b)
  {
        struct extent_map *em1, *em2;
  
This page took 0.102389 seconds and 4 git commands to generate.