]> Git Repo - linux.git/commitdiff
Merge tag 'for-5.16-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
authorLinus Torvalds <[email protected]>
Thu, 18 Nov 2021 20:41:14 +0000 (12:41 -0800)
committerLinus Torvalds <[email protected]>
Thu, 18 Nov 2021 20:41:14 +0000 (12:41 -0800)
Pull btrfs fixes from David Sterba:
 "Several xes and one old ioctl deprecation. Namely there's fix for
  crashes/warnings with lzo compression that was suspected to be caused
  by first pull merge resolution, but it was a different bug.

  Summary:

   - regression fix for a crash in lzo due to missing boundary checks of
     the page array

   - fix crashes on ARM64 due to missing barriers when synchronizing
     status bits between work queues

   - silence lockdep when reading chunk tree during mount

   - fix false positive warning in integrity checker on devices with
     disabled write caching

   - fix signedness of bitfields in scrub

   - start deprecation of balance v1 ioctl"

* tag 'for-5.16-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: deprecate BTRFS_IOC_BALANCE ioctl
  btrfs: make 1-bit bit-fields of scrub_page unsigned int
  btrfs: check-integrity: fix a warning on write caching disabled disk
  btrfs: silence lockdep when reading chunk tree during mount
  btrfs: fix memory ordering between normal and ordered work functions
  btrfs: fix a out-of-bound access in copy_compressed_data_to_page()

1  2 
fs/btrfs/disk-io.c
fs/btrfs/ioctl.c
fs/btrfs/lzo.c
fs/btrfs/volumes.c

diff --combined fs/btrfs/disk-io.c
index 59c3be8c1f4c667b718ba1f81ce0cd684cc65106,847aabb306760df8cfe6bae8cc4d4c080819ba35..514ead6e93b6f2fc5dd687e365cedd1de4f8997f
@@@ -3748,7 -3748,7 +3748,7 @@@ struct btrfs_super_block *btrfs_read_de
        else if (ret)
                return ERR_PTR(ret);
  
 -      if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode))
 +      if (bytenr + BTRFS_SUPER_INFO_SIZE >= bdev_nr_bytes(bdev))
                return ERR_PTR(-EINVAL);
  
        page = read_cache_page_gfp(mapping, bytenr >> PAGE_SHIFT, GFP_NOFS);
@@@ -3978,11 -3978,23 +3978,23 @@@ static void btrfs_end_empty_barrier(str
   */
  static void write_dev_flush(struct btrfs_device *device)
  {
-       struct request_queue *q = bdev_get_queue(device->bdev);
        struct bio *bio = device->flush_bio;
  
+ #ifndef CONFIG_BTRFS_FS_CHECK_INTEGRITY
+       /*
+        * When a disk has write caching disabled, we skip submission of a bio
+        * with flush and sync requests before writing the superblock, since
+        * it's not needed. However when the integrity checker is enabled, this
+        * results in reports that there are metadata blocks referred by a
+        * superblock that were not properly flushed. So don't skip the bio
+        * submission only when the integrity checker is enabled for the sake
+        * of simplicity, since this is a debug tool and not meant for use in
+        * non-debug builds.
+        */
+       struct request_queue *q = bdev_get_queue(device->bdev);
        if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
                return;
+ #endif
  
        bio_reset(bio);
        bio->bi_end_io = btrfs_end_empty_barrier;
diff --combined fs/btrfs/ioctl.c
index fb8cc9642ac40ad2fda24feb2cb6c787e88378b4,012fbfdfbebf29ff91e3dee2d9863e1e7e1d31e8..92138ac2a4e2aceee153fbfc101ffd5acff42aa1
@@@ -1691,7 -1691,7 +1691,7 @@@ static noinline int btrfs_ioctl_resize(
        }
  
        if (!strcmp(sizestr, "max"))
 -              new_size = device->bdev->bd_inode->i_size;
 +              new_size = bdev_nr_bytes(device->bdev);
        else {
                if (sizestr[0] == '-') {
                        mod = -1;
                ret = -EINVAL;
                goto out_finish;
        }
 -      if (new_size > device->bdev->bd_inode->i_size) {
 +      if (new_size > bdev_nr_bytes(device->bdev)) {
                ret = -EFBIG;
                goto out_finish;
        }
@@@ -2222,8 -2222,9 +2222,8 @@@ static noinline int search_ioctl(struc
        key.offset = sk->min_offset;
  
        while (1) {
 -              ret = fault_in_pages_writeable(ubuf + sk_offset,
 -                                             *buf_size - sk_offset);
 -              if (ret)
 +              ret = -EFAULT;
 +              if (fault_in_writeable(ubuf + sk_offset, *buf_size - sk_offset))
                        break;
  
                ret = btrfs_search_forward(root, &key, path, sk->min_transid);
@@@ -3985,6 -3986,10 +3985,10 @@@ static long btrfs_ioctl_balance(struct 
        bool need_unlock; /* for mut. excl. ops lock */
        int ret;
  
+       if (!arg)
+               btrfs_warn(fs_info,
+       "IOC_BALANCE ioctl (v1) is deprecated and will be removed in kernel 5.18");
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
  
diff --combined fs/btrfs/lzo.c
index 65cb0766e62d648dbaf5d487cada8e1b6495692f,f410ceabcdbd8d1b207482620a07cce67e074a52..9febb80258252a41cf54b64598844706f8b87871
@@@ -125,14 -125,17 +125,18 @@@ static inline size_t read_compress_leng
  static int copy_compressed_data_to_page(char *compressed_data,
                                        size_t compressed_size,
                                        struct page **out_pages,
+                                       unsigned long max_nr_page,
                                        u32 *cur_out,
                                        const u32 sectorsize)
  {
        u32 sector_bytes_left;
        u32 orig_out;
        struct page *cur_page;
 +      char *kaddr;
  
+       if ((*cur_out / PAGE_SIZE) >= max_nr_page)
+               return -E2BIG;
        /*
         * We never allow a segment header crossing sector boundary, previous
         * run should ensure we have enough space left inside the sector.
                out_pages[*cur_out / PAGE_SIZE] = cur_page;
        }
  
 -      write_compress_length(page_address(cur_page) + offset_in_page(*cur_out),
 +      kaddr = kmap(cur_page);
 +      write_compress_length(kaddr + offset_in_page(*cur_out),
                              compressed_size);
        *cur_out += LZO_LEN;
  
                u32 copy_len = min_t(u32, sectorsize - *cur_out % sectorsize,
                                     orig_out + compressed_size - *cur_out);
  
 +              kunmap(cur_page);
++
+               if ((*cur_out / PAGE_SIZE) >= max_nr_page)
+                       return -E2BIG;
                cur_page = out_pages[*cur_out / PAGE_SIZE];
                /* Allocate a new page */
                if (!cur_page) {
                                return -ENOMEM;
                        out_pages[*cur_out / PAGE_SIZE] = cur_page;
                }
 +              kaddr = kmap(cur_page);
  
 -              memcpy(page_address(cur_page) + offset_in_page(*cur_out),
 +              memcpy(kaddr + offset_in_page(*cur_out),
                       compressed_data + *cur_out - orig_out, copy_len);
  
                *cur_out += copy_len;
         */
        sector_bytes_left = round_up(*cur_out, sectorsize) - *cur_out;
        if (sector_bytes_left >= LZO_LEN || sector_bytes_left == 0)
 -              return 0;
 +              goto out;
  
        /* The remaining size is not enough, pad it with zeros */
 -      memset(page_address(cur_page) + offset_in_page(*cur_out), 0,
 +      memset(kaddr + offset_in_page(*cur_out), 0,
               sector_bytes_left);
        *cur_out += sector_bytes_left;
 +
 +out:
 +      kunmap(cur_page);
        return 0;
  }
  
@@@ -202,7 -202,7 +210,8 @@@ int lzo_compress_pages(struct list_hea
        struct workspace *workspace = list_entry(ws, struct workspace, list);
        const u32 sectorsize = btrfs_sb(mapping->host->i_sb)->sectorsize;
        struct page *page_in = NULL;
 +      char *sizes_ptr;
+       const unsigned long max_nr_page = *out_pages;
        int ret = 0;
        /* Points to the file offset of input data */
        u64 cur_in = start;
        u32 cur_out = 0;
        u32 len = *total_out;
  
+       ASSERT(max_nr_page > 0);
        *out_pages = 0;
        *total_out = 0;
        *total_in = 0;
         */
        cur_out += LZO_LEN;
        while (cur_in < start + len) {
 +              char *data_in;
                const u32 sectorsize_mask = sectorsize - 1;
                u32 sector_off = (cur_in - start) & sectorsize_mask;
                u32 in_len;
                /* Compress at most one sector of data each time */
                in_len = min_t(u32, start + len - cur_in, sectorsize - sector_off);
                ASSERT(in_len);
 -              ret = lzo1x_1_compress(page_address(page_in) +
 +              data_in = kmap(page_in);
 +              ret = lzo1x_1_compress(data_in +
                                       offset_in_page(cur_in), in_len,
                                       workspace->cbuf, &out_len,
                                       workspace->mem);
 +              kunmap(page_in);
                if (ret < 0) {
                        pr_debug("BTRFS: lzo in loop returned %d\n", ret);
                        ret = -EIO;
                }
  
                ret = copy_compressed_data_to_page(workspace->cbuf, out_len,
-                                                  pages, &cur_out, sectorsize);
+                                                  pages, max_nr_page,
+                                                  &cur_out, sectorsize);
                if (ret < 0)
                        goto out;
  
        }
  
        /* Store the size of all chunks of compressed data */
 -      write_compress_length(page_address(pages[0]), cur_out);
 +      sizes_ptr = kmap_local_page(pages[0]);
 +      write_compress_length(sizes_ptr, cur_out);
 +      kunmap_local(sizes_ptr);
  
        ret = 0;
        *total_out = cur_out;
@@@ -294,7 -291,6 +305,7 @@@ static void copy_compressed_segment(str
        u32 orig_in = *cur_in;
  
        while (*cur_in < orig_in + len) {
 +              char *kaddr;
                struct page *cur_page;
                u32 copy_len = min_t(u32, PAGE_SIZE - offset_in_page(*cur_in),
                                          orig_in + len - *cur_in);
                ASSERT(copy_len);
                cur_page = cb->compressed_pages[*cur_in / PAGE_SIZE];
  
 +              kaddr = kmap(cur_page);
                memcpy(dest + *cur_in - orig_in,
 -                      page_address(cur_page) + offset_in_page(*cur_in),
 +                      kaddr + offset_in_page(*cur_in),
                        copy_len);
 +              kunmap(cur_page);
  
                *cur_in += copy_len;
        }
@@@ -317,7 -311,6 +328,7 @@@ int lzo_decompress_bio(struct list_hea
        struct workspace *workspace = list_entry(ws, struct workspace, list);
        const struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
        const u32 sectorsize = fs_info->sectorsize;
 +      char *kaddr;
        int ret;
        /* Compressed data length, can be unaligned */
        u32 len_in;
        /* Bytes decompressed so far */
        u32 cur_out = 0;
  
 -      len_in = read_compress_length(page_address(cb->compressed_pages[0]));
 +      kaddr = kmap(cb->compressed_pages[0]);
 +      len_in = read_compress_length(kaddr);
 +      kunmap(cb->compressed_pages[0]);
        cur_in += LZO_LEN;
  
        /*
                       (cur_in + LZO_LEN - 1) / sectorsize);
                cur_page = cb->compressed_pages[cur_in / PAGE_SIZE];
                ASSERT(cur_page);
 -              seg_len = read_compress_length(page_address(cur_page) +
 -                                             offset_in_page(cur_in));
 +              kaddr = kmap(cur_page);
 +              seg_len = read_compress_length(kaddr + offset_in_page(cur_in));
 +              kunmap(cur_page);
                cur_in += LZO_LEN;
  
                /* Copy the compressed segment payload into workspace */
@@@ -449,7 -439,7 +460,7 @@@ int lzo_decompress(struct list_head *ws
        destlen = min_t(unsigned long, destlen, PAGE_SIZE);
        bytes = min_t(unsigned long, destlen, out_len - start_byte);
  
 -      kaddr = page_address(dest_page);
 +      kaddr = kmap_local_page(dest_page);
        memcpy(kaddr, workspace->buf + start_byte, bytes);
  
        /*
         */
        if (bytes < destlen)
                memset(kaddr+bytes, 0, destlen-bytes);
 +      kunmap_local(kaddr);
  out:
        return ret;
  }
diff --combined fs/btrfs/volumes.c
index 61ac57bcbf1ae4116a58863f920479572528828c,cc80f2a97a0ba9fc369a84871d6f6e25c873e577..0997e3cd74e915c3f056eeff302a7cc1de7dba96
@@@ -509,7 -509,7 +509,7 @@@ btrfs_get_bdev_and_sb(const char *devic
        }
  
        if (flush)
 -              filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
 +              sync_blockdev(*bdev);
        ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
        if (ret) {
                blkdev_put(*bdev, flags);
@@@ -1293,7 -1293,7 +1293,7 @@@ static struct btrfs_super_block *btrfs_
        pgoff_t index;
  
        /* make sure our super fits in the device */
 -      if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
 +      if (bytenr + PAGE_SIZE >= bdev_nr_bytes(bdev))
                return ERR_PTR(-EINVAL);
  
        /* make sure our super fits in the page */
@@@ -2657,8 -2657,8 +2657,8 @@@ int btrfs_init_new_device(struct btrfs_
        device->io_width = fs_info->sectorsize;
        device->io_align = fs_info->sectorsize;
        device->sector_size = fs_info->sectorsize;
 -      device->total_bytes = round_down(i_size_read(bdev->bd_inode),
 -                                       fs_info->sectorsize);
 +      device->total_bytes =
 +              round_down(bdev_nr_bytes(bdev), fs_info->sectorsize);
        device->disk_total_bytes = device->total_bytes;
        device->commit_total_bytes = device->total_bytes;
        set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
@@@ -7313,7 -7313,7 +7313,7 @@@ static int read_one_dev(struct extent_b
  
        fill_device_from_item(leaf, dev_item, device);
        if (device->bdev) {
 -              u64 max_total_bytes = i_size_read(device->bdev->bd_inode);
 +              u64 max_total_bytes = bdev_nr_bytes(device->bdev);
  
                if (device->total_bytes > max_total_bytes) {
                        btrfs_err(fs_info,
@@@ -7558,6 -7558,19 +7558,19 @@@ int btrfs_read_chunk_tree(struct btrfs_
         */
        fs_info->fs_devices->total_rw_bytes = 0;
  
+       /*
+        * Lockdep complains about possible circular locking dependency between
+        * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores
+        * used for freeze procection of a fs (struct super_block.s_writers),
+        * which we take when starting a transaction, and extent buffers of the
+        * chunk tree if we call read_one_dev() while holding a lock on an
+        * extent buffer of the chunk tree. Since we are mounting the filesystem
+        * and at this point there can't be any concurrent task modifying the
+        * chunk tree, to keep it simple, just skip locking on the chunk tree.
+        */
+       ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags));
+       path->skip_locking = 1;
        /*
         * Read all device items, and then all the chunk items. All
         * device items are found before any chunk item (their object id
                                goto error;
                        break;
                }
-               /*
-                * The nodes on level 1 are not locked but we don't need to do
-                * that during mount time as nothing else can access the tree
-                */
                node = path->nodes[1];
                if (node) {
                        if (last_ra_node != node->start) {
                         * requirement for chunk allocation, see the comment on
                         * top of btrfs_chunk_alloc() for details.
                         */
-                       ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags));
                        chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
                        ret = read_one_chunk(&found_key, leaf, chunk);
                        if (ret)
This page took 0.12011 seconds and 4 git commands to generate.