]> Git Repo - linux.git/commitdiff
btrfs: convert try_release_extent_buffer() to take a folio
authorLi Zetao <[email protected]>
Wed, 28 Aug 2024 18:28:58 +0000 (02:28 +0800)
committerDavid Sterba <[email protected]>
Tue, 10 Sep 2024 14:51:20 +0000 (16:51 +0200)
The old page API is being gradually replaced and converted to use folio
to improve code readability and avoid repeated conversion between page
and folio.

Signed-off-by: Li Zetao <[email protected]>
Reviewed-by: David Sterba <[email protected]>
Signed-off-by: David Sterba <[email protected]>
fs/btrfs/disk-io.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h

index 612460e07b2e74689b1593ef598003b2dabd2710..25d768e67e37248ecf742bf4cc64ba4581dbd68f 100644 (file)
@@ -525,7 +525,7 @@ static bool btree_release_folio(struct folio *folio, gfp_t gfp_flags)
        if (folio_test_writeback(folio) || folio_test_dirty(folio))
                return false;
 
-       return try_release_extent_buffer(&folio->page);
+       return try_release_extent_buffer(folio);
 }
 
 static void btree_invalidate_folio(struct folio *folio, size_t offset,
index f5508cfb36d9cc0076bd7b6a11a1f2d375c4fe32..f8b001053d0591a7d3e81315a76c20de46a38b5f 100644 (file)
@@ -4147,21 +4147,20 @@ static int try_release_subpage_extent_buffer(struct folio *folio)
 
 }
 
-int try_release_extent_buffer(struct page *page)
+int try_release_extent_buffer(struct folio *folio)
 {
-       struct folio *folio = page_folio(page);
        struct extent_buffer *eb;
 
-       if (page_to_fs_info(page)->nodesize < PAGE_SIZE)
-               return try_release_subpage_extent_buffer(page_folio(page));
+       if (folio_to_fs_info(folio)->nodesize < PAGE_SIZE)
+               return try_release_subpage_extent_buffer(folio);
 
        /*
         * We need to make sure nobody is changing folio private, as we rely on
         * folio private as the pointer to extent buffer.
         */
-       spin_lock(&page->mapping->i_private_lock);
+       spin_lock(&folio->mapping->i_private_lock);
        if (!folio_test_private(folio)) {
-               spin_unlock(&page->mapping->i_private_lock);
+               spin_unlock(&folio->mapping->i_private_lock);
                return 1;
        }
 
@@ -4176,10 +4175,10 @@ int try_release_extent_buffer(struct page *page)
        spin_lock(&eb->refs_lock);
        if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
                spin_unlock(&eb->refs_lock);
-               spin_unlock(&page->mapping->i_private_lock);
+               spin_unlock(&folio->mapping->i_private_lock);
                return 0;
        }
-       spin_unlock(&page->mapping->i_private_lock);
+       spin_unlock(&folio->mapping->i_private_lock);
 
        /*
         * If tree ref isn't set then we know the ref on this eb is a real ref,
index 1d9b30021109b9a8c728f662883d9c74385c8754..345774c84c4bcade791ad5fa9122384c62036868 100644 (file)
@@ -237,7 +237,7 @@ static inline void extent_changeset_free(struct extent_changeset *changeset)
 }
 
 bool try_release_extent_mapping(struct page *page, gfp_t mask);
-int try_release_extent_buffer(struct page *page);
+int try_release_extent_buffer(struct folio *folio);
 
 int btrfs_read_folio(struct file *file, struct folio *folio);
 void extent_write_locked_range(struct inode *inode, const struct folio *locked_folio,
This page took 0.076549 seconds and 4 git commands to generate.