]> Git Repo - J-linux.git/commitdiff
Merge tag 'for-5.8-rc6-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
authorLinus Torvalds <[email protected]>
Fri, 24 Jul 2020 21:11:43 +0000 (14:11 -0700)
committerLinus Torvalds <[email protected]>
Fri, 24 Jul 2020 21:11:43 +0000 (14:11 -0700)
Pull btrfs fixes from David Sterba:
 "A few resouce leak fixes from recent patches, all are stable material.

  The problems have been observed during testing or have a reproducer"

* tag 'for-5.8-rc6-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: fix mount failure caused by race with umount
  btrfs: fix page leaks after failure to lock page for delalloc
  btrfs: qgroup: fix data leak caused by race between writeback and truncate
  btrfs: fix double free on ulist after backref resolution failure

1  2 
fs/btrfs/extent_io.c
fs/btrfs/inode.c

diff --combined fs/btrfs/extent_io.c
index 608f93438b294e465d71a2c4e919f446bce4c714,72c836b54efc0d687b64fd8e6165a331002d6756..60278e52c37abb8c35a1ae42098f43e622f70930
@@@ -1999,7 -1999,8 +1999,8 @@@ static int __process_pages_contig(struc
                                if (!PageDirty(pages[i]) ||
                                    pages[i]->mapping != mapping) {
                                        unlock_page(pages[i]);
-                                       put_page(pages[i]);
+                                       for (; i < ret; i++)
+                                               put_page(pages[i]);
                                        err = -EAGAIN;
                                        goto out;
                                }
@@@ -3099,16 -3100,22 +3100,16 @@@ static int submit_extent_page(unsigned 
  static void attach_extent_buffer_page(struct extent_buffer *eb,
                                      struct page *page)
  {
 -      if (!PagePrivate(page)) {
 -              SetPagePrivate(page);
 -              get_page(page);
 -              set_page_private(page, (unsigned long)eb);
 -      } else {
 +      if (!PagePrivate(page))
 +              attach_page_private(page, eb);
 +      else
                WARN_ON(page->private != (unsigned long)eb);
 -      }
  }
  
  void set_page_extent_mapped(struct page *page)
  {
 -      if (!PagePrivate(page)) {
 -              SetPagePrivate(page);
 -              get_page(page);
 -              set_page_private(page, EXTENT_PAGE_PRIVATE);
 -      }
 +      if (!PagePrivate(page))
 +              attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE);
  }
  
  static struct extent_map *
@@@ -4384,32 -4391,51 +4385,32 @@@ int extent_writepages(struct address_sp
        return ret;
  }
  
 -int extent_readpages(struct address_space *mapping, struct list_head *pages,
 -                   unsigned nr_pages)
 +void extent_readahead(struct readahead_control *rac)
  {
        struct bio *bio = NULL;
        unsigned long bio_flags = 0;
        struct page *pagepool[16];
        struct extent_map *em_cached = NULL;
 -      int nr = 0;
        u64 prev_em_start = (u64)-1;
 +      int nr;
  
 -      while (!list_empty(pages)) {
 -              u64 contig_end = 0;
 -
 -              for (nr = 0; nr < ARRAY_SIZE(pagepool) && !list_empty(pages);) {
 -                      struct page *page = lru_to_page(pages);
 -
 -                      prefetchw(&page->flags);
 -                      list_del(&page->lru);
 -                      if (add_to_page_cache_lru(page, mapping, page->index,
 -                                              readahead_gfp_mask(mapping))) {
 -                              put_page(page);
 -                              break;
 -                      }
 -
 -                      pagepool[nr++] = page;
 -                      contig_end = page_offset(page) + PAGE_SIZE - 1;
 -              }
 -
 -              if (nr) {
 -                      u64 contig_start = page_offset(pagepool[0]);
 +      while ((nr = readahead_page_batch(rac, pagepool))) {
 +              u64 contig_start = page_offset(pagepool[0]);
 +              u64 contig_end = page_offset(pagepool[nr - 1]) + PAGE_SIZE - 1;
  
 -                      ASSERT(contig_start + nr * PAGE_SIZE - 1 == contig_end);
 +              ASSERT(contig_start + nr * PAGE_SIZE - 1 == contig_end);
  
 -                      contiguous_readpages(pagepool, nr, contig_start,
 -                                   contig_end, &em_cached, &bio, &bio_flags,
 -                                   &prev_em_start);
 -              }
 +              contiguous_readpages(pagepool, nr, contig_start, contig_end,
 +                              &em_cached, &bio, &bio_flags, &prev_em_start);
        }
  
        if (em_cached)
                free_extent_map(em_cached);
  
 -      if (bio)
 -              return submit_one_bio(bio, 0, bio_flags);
 -      return 0;
 +      if (bio) {
 +              if (submit_one_bio(bio, 0, bio_flags))
 +                      return;
 +      }
  }
  
  /*
@@@ -4927,7 -4953,10 +4928,7 @@@ static void btrfs_release_extent_buffer
                         * We need to make sure we haven't be attached
                         * to a new eb.
                         */
 -                      ClearPagePrivate(page);
 -                      set_page_private(page, 0);
 -                      /* One for the page private */
 -                      put_page(page);
 +                      detach_page_private(page);
                }
  
                if (mapped)
diff --combined fs/btrfs/inode.c
index 43c803c16b4824d695f84c8fca0b1835ecc82511,b7dd5124941e0a6473b2c957c2b38b43a9abbd52..6862cd7e21a996badf559082318a7163f6be912e
@@@ -4925,8 -4925,8 +4925,8 @@@ static void evict_inode_truncate_pages(
  
        /*
         * Keep looping until we have no more ranges in the io tree.
 -       * We can have ongoing bios started by readpages (called from readahead)
 -       * that have their endio callback (extent_io.c:end_bio_extent_readpage)
 +       * We can have ongoing bios started by readahead that have
 +       * their endio callback (extent_io.c:end_bio_extent_readpage)
         * still in progress (unlocked the pages in the bio but did not yet
         * unlocked the ranges in the io tree). Therefore this means some
         * ranges can still be locked and eviction started because before
@@@ -7121,11 -7121,11 +7121,11 @@@ static int lock_extent_direct(struct in
                         * for it to complete) and then invalidate the pages for
                         * this range (through invalidate_inode_pages2_range()),
                         * but that can lead us to a deadlock with a concurrent
 -                       * call to readpages() (a buffered read or a defrag call
 +                       * call to readahead (a buffered read or a defrag call
                         * triggered a readahead) on a page lock due to an
                         * ordered dio extent we created before but did not have
                         * yet a corresponding bio submitted (whence it can not
 -                       * complete), which makes readpages() wait for that
 +                       * complete), which makes readahead wait for that
                         * ordered extent to complete while holding a lock on
                         * that page.
                         */
        return ret;
  }
  
 -#define BTRFS_FIEMAP_FLAGS    (FIEMAP_FLAG_SYNC)
 -
  static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                __u64 start, __u64 len)
  {
        int     ret;
  
 -      ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS);
 +      ret = fiemap_prep(inode, fieinfo, start, &len, 0);
        if (ret)
                return ret;
  
@@@ -7993,16 -7995,21 +7993,16 @@@ static int btrfs_writepages(struct addr
        return extent_writepages(mapping, wbc);
  }
  
 -static int
 -btrfs_readpages(struct file *file, struct address_space *mapping,
 -              struct list_head *pages, unsigned nr_pages)
 +static void btrfs_readahead(struct readahead_control *rac)
  {
 -      return extent_readpages(mapping, pages, nr_pages);
 +      extent_readahead(rac);
  }
  
  static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
  {
        int ret = try_release_extent_mapping(page, gfp_flags);
 -      if (ret == 1) {
 -              ClearPagePrivate(page);
 -              set_page_private(page, 0);
 -              put_page(page);
 -      }
 +      if (ret == 1)
 +              detach_page_private(page);
        return ret;
  }
  
@@@ -8024,8 -8031,14 +8024,8 @@@ static int btrfs_migratepage(struct add
        if (ret != MIGRATEPAGE_SUCCESS)
                return ret;
  
 -      if (page_has_private(page)) {
 -              ClearPagePrivate(page);
 -              get_page(newpage);
 -              set_page_private(newpage, page_private(page));
 -              set_page_private(page, 0);
 -              put_page(page);
 -              SetPagePrivate(newpage);
 -      }
 +      if (page_has_private(page))
 +              attach_page_private(newpage, detach_page_private(page));
  
        if (PagePrivate2(page)) {
                ClearPagePrivate2(page);
@@@ -8123,20 -8136,17 +8123,17 @@@ again
        /*
         * Qgroup reserved space handler
         * Page here will be either
-        * 1) Already written to disk
-        *    In this case, its reserved space is released from data rsv map
-        *    and will be freed by delayed_ref handler finally.
-        *    So even we call qgroup_free_data(), it won't decrease reserved
-        *    space.
-        * 2) Not written to disk
-        *    This means the reserved space should be freed here. However,
-        *    if a truncate invalidates the page (by clearing PageDirty)
-        *    and the page is accounted for while allocating extent
-        *    in btrfs_check_data_free_space() we let delayed_ref to
-        *    free the entire extent.
+        * 1) Already written to disk or ordered extent already submitted
+        *    Then its QGROUP_RESERVED bit in io_tree is already cleaned.
+        *    Qgroup will be handled by its qgroup_record then.
+        *    btrfs_qgroup_free_data() call will do nothing here.
+        *
+        * 2) Not written to disk yet
+        *    Then btrfs_qgroup_free_data() call will clear the QGROUP_RESERVED
+        *    bit of its io_tree, and free the qgroup reserved data space.
+        *    Since the IO will never happen for this page.
         */
-       if (PageDirty(page))
-               btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE);
+       btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE);
        if (!inode_evicting) {
                clear_extent_bit(tree, page_start, page_end, EXTENT_LOCKED |
                                 EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
        }
  
        ClearPageChecked(page);
 -      if (PagePrivate(page)) {
 -              ClearPagePrivate(page);
 -              set_page_private(page, 0);
 -              put_page(page);
 -      }
 +      detach_page_private(page);
  }
  
  /*
@@@ -10238,7 -10252,7 +10235,7 @@@ static const struct address_space_opera
        .readpage       = btrfs_readpage,
        .writepage      = btrfs_writepage,
        .writepages     = btrfs_writepages,
 -      .readpages      = btrfs_readpages,
 +      .readahead      = btrfs_readahead,
        .direct_IO      = btrfs_direct_IO,
        .invalidatepage = btrfs_invalidatepage,
        .releasepage    = btrfs_releasepage,
This page took 0.130783 seconds and 4 git commands to generate.