]> Git Repo - linux.git/commitdiff
btrfs: merge btrfs_folio_unlock_writer() into btrfs_folio_end_writer_lock()
authorQu Wenruo <[email protected]>
Mon, 2 Sep 2024 04:27:08 +0000 (13:57 +0930)
committerDavid Sterba <[email protected]>
Tue, 10 Sep 2024 14:51:22 +0000 (16:51 +0200)
The function btrfs_folio_unlock_writer() is already calling
btrfs_folio_end_writer_lock() to do the heavy lifting work, the only
missing 0 writer check.

Thus there is no need to keep two different functions, move the 0 writer
check into btrfs_folio_end_writer_lock(), and remove
btrfs_folio_unlock_writer().

Signed-off-by: Qu Wenruo <[email protected]>
Reviewed-by: David Sterba <[email protected]>
Signed-off-by: David Sterba <[email protected]>
fs/btrfs/extent_io.c
fs/btrfs/subpage.c
fs/btrfs/subpage.h

index 485d88f9947bd920b7521b20ed2fae802414fefd..70be1150c34e318b8acf899ffc1766de537eb244 100644 (file)
@@ -2220,7 +2220,7 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f
                                                       cur, cur_len, !ret);
                        mapping_set_error(mapping, ret);
                }
-               btrfs_folio_unlock_writer(fs_info, folio, cur, cur_len);
+               btrfs_folio_end_writer_lock(fs_info, folio, cur, cur_len);
                if (ret < 0)
                        found_error = true;
 next_page:
index 7fe58c4d992324baadbe786df3cd653ac1ba334c..83660fa82c3236deb21a26b1614ccfb696f7ffe0 100644 (file)
@@ -378,13 +378,47 @@ int btrfs_folio_start_writer_lock(const struct btrfs_fs_info *fs_info,
        return 0;
 }
 
+/*
+ * Handle different locked folios:
+ *
+ * - Non-subpage folio
+ *   Just unlock it.
+ *
+ * - folio locked but without any subpage locked
+ *   This happens either before writepage_delalloc() or the delalloc range is
+ *   already handled by previous folio.
+ *   We can simple unlock it.
+ *
+ * - folio locked with subpage range locked.
+ *   We go through the locked sectors inside the range and clear their locked
+ *   bitmap, reduce the writer lock number, and unlock the page if that's
+ *   the last locked range.
+ */
 void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info,
                                 struct folio *folio, u64 start, u32 len)
 {
+       struct btrfs_subpage *subpage = folio_get_private(folio);
+
+       ASSERT(folio_test_locked(folio));
+
        if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) {
                folio_unlock(folio);
                return;
        }
+
+       /*
+        * For subpage case, there are two types of locked page.  With or
+        * without writers number.
+        *
+        * Since we own the page lock, no one else could touch subpage::writers
+        * and we are safe to do several atomic operations without spinlock.
+        */
+       if (atomic_read(&subpage->writers) == 0) {
+               /* No writers, locked by plain lock_page(). */
+               folio_unlock(folio);
+               return;
+       }
+
        btrfs_subpage_clamp_range(folio, &start, &len);
        if (btrfs_subpage_end_and_test_writer(fs_info, folio, start, len))
                folio_unlock(folio);
@@ -702,53 +736,6 @@ void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
        spin_unlock_irqrestore(&subpage->lock, flags);
 }
 
-/*
- * Handle different locked pages with different page sizes:
- *
- * - Page locked by plain lock_page()
- *   It should not have any subpage::writers count.
- *   Can be unlocked by unlock_page().
- *   This is the most common locked page for extent_writepage() called
- *   inside extent_write_cache_pages().
- *   Rarer cases include the @locked_page from extent_write_locked_range().
- *
- * - Page locked by lock_delalloc_pages()
- *   There is only one caller, all pages except @locked_page for
- *   extent_write_locked_range().
- *   In this case, we have to call subpage helper to handle the case.
- */
-void btrfs_folio_unlock_writer(struct btrfs_fs_info *fs_info,
-                              struct folio *folio, u64 start, u32 len)
-{
-       struct btrfs_subpage *subpage;
-
-       ASSERT(folio_test_locked(folio));
-       /* For non-subpage case, we just unlock the page */
-       if (!btrfs_is_subpage(fs_info, folio->mapping)) {
-               folio_unlock(folio);
-               return;
-       }
-
-       ASSERT(folio_test_private(folio) && folio_get_private(folio));
-       subpage = folio_get_private(folio);
-
-       /*
-        * For subpage case, there are two types of locked page.  With or
-        * without writers number.
-        *
-        * Since we own the page lock, no one else could touch subpage::writers
-        * and we are safe to do several atomic operations without spinlock.
-        */
-       if (atomic_read(&subpage->writers) == 0) {
-               /* No writers, locked by plain lock_page() */
-               folio_unlock(folio);
-               return;
-       }
-
-       /* Have writers, use proper subpage helper to end it */
-       btrfs_folio_end_writer_lock(fs_info, folio, start, len);
-}
-
 /*
  * This is for folio already locked by plain lock_page()/folio_lock(), which
  * doesn't have any subpage awareness.
index f90e0c4f4cabd27e361b8f0c970b22f0def4f72f..f805261e0999c3a84abe7aea0090a57b38dd83c7 100644 (file)
@@ -155,8 +155,6 @@ bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
 
 void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
                                  struct folio *folio, u64 start, u32 len);
-void btrfs_folio_unlock_writer(struct btrfs_fs_info *fs_info,
-                              struct folio *folio, u64 start, u32 len);
 void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info *fs_info,
                                    struct folio *folio,
                                    unsigned long *ret_bitmap);
This page took 0.067896 seconds and 4 git commands to generate.