]> Git Repo - linux.git/commitdiff
btrfs: more efficient chunk map iteration when device replace finishes
authorFilipe Manana <[email protected]>
Thu, 25 Jul 2024 10:48:10 +0000 (11:48 +0100)
committerDavid Sterba <[email protected]>
Tue, 10 Sep 2024 14:51:17 +0000 (16:51 +0200)
When iterating the chunk maps when a device replace finishes we are doing
a full rbtree search for each chunk map, which is not the most efficient
thing to do, wasting CPU time. As we are holding a write lock on the tree
during the whole iteration, we can simply start from the first node in the
tree and then move to the next chunk map by doing a rb_next() call - the
only exception is when we need to reschedule, in which case we have to do
a full rbtree search since we dropped the write lock and the tree may have
changed (chunk maps may have been removed and the tree got rebalanced).
So just do that.

Signed-off-by: Filipe Manana <[email protected]>
Reviewed-by: David Sterba <[email protected]>
Signed-off-by: David Sterba <[email protected]>
fs/btrfs/dev-replace.c

index 20cf5e95f2bcd2e2ed6f892c6bc804299c8c1ff5..83d5cdd77f293e17f2375357c5a95859afc1640d 100644 (file)
@@ -824,8 +824,7 @@ static void btrfs_dev_replace_update_device_in_mapping_tree(
                                                struct btrfs_device *srcdev,
                                                struct btrfs_device *tgtdev)
 {
-       u64 start = 0;
-       int i;
+       struct rb_node *node;
 
        /*
         * The chunk mutex must be held so that no new chunks can be created
@@ -836,19 +835,34 @@ static void btrfs_dev_replace_update_device_in_mapping_tree(
        lockdep_assert_held(&fs_info->chunk_mutex);
 
        write_lock(&fs_info->mapping_tree_lock);
-       do {
+       node = rb_first_cached(&fs_info->mapping_tree);
+       while (node) {
+               struct rb_node *next = rb_next(node);
                struct btrfs_chunk_map *map;
+               u64 next_start;
 
-               map = btrfs_find_chunk_map_nolock(fs_info, start, U64_MAX);
-               if (!map)
-                       break;
-               for (i = 0; i < map->num_stripes; i++)
+               map = rb_entry(node, struct btrfs_chunk_map, rb_node);
+               next_start = map->start + map->chunk_len;
+
+               for (int i = 0; i < map->num_stripes; i++)
                        if (srcdev == map->stripes[i].dev)
                                map->stripes[i].dev = tgtdev;
-               start = map->start + map->chunk_len;
-               btrfs_free_chunk_map(map);
-               cond_resched_rwlock_write(&fs_info->mapping_tree_lock);
-       } while (start);
+
+               if (cond_resched_rwlock_write(&fs_info->mapping_tree_lock)) {
+                       map = btrfs_find_chunk_map_nolock(fs_info, next_start, U64_MAX);
+                       if (!map)
+                               break;
+                       node = &map->rb_node;
+                       /*
+                        * Drop the lookup reference since we are holding the
+                        * lock in write mode and no one can remove the chunk
+                        * map from the tree and drop its tree reference.
+                        */
+                       btrfs_free_chunk_map(map);
+               } else {
+                       node = next;
+               }
+       }
        write_unlock(&fs_info->mapping_tree_lock);
 }
 
This page took 0.057577 seconds and 4 git commands to generate.