if (!down_read_trylock(&inode->i_mmap_lock))
return 0;
- write_lock(&tree->lock);
+ /*
+ * We want to be fast because we can be called from any path trying to
+ * allocate memory, so if the lock is busy we don't want to spend time
+ * waiting for it - either some task is about to do IO for the inode or
+ * we may have another task shrinking extent maps, here in this code, so
+ * skip this inode.
+ */
+ if (!write_trylock(&tree->lock)) {
+ up_read(&inode->i_mmap_lock);
+ return 0;
+ }
+
- node = rb_first_cached(&tree->map);
+ node = rb_first(&tree->root);
while (node) {
+ struct rb_node *next = rb_next(node);
struct extent_map *em;
em = rb_entry(node, struct extent_map, rb_node);
- node = rb_next(node);
- (*scanned)++;
+ ctx->scanned++;
if (em->flags & EXTENT_FLAG_PINNED)
goto next;
break;
/*
- * Restart if we had to reschedule, and any extent maps that were
- * pinned before may have become unpinned after we released the
- * lock and took it again.
+ * Stop if we need to reschedule or there's contention on the
+ * lock. This is to avoid slowing other tasks trying to take the
+ * lock and because the shrinker might be called during a memory
+ * allocation path and we want to avoid taking a very long time
+ * and slowing down all sorts of tasks.
*/
- if (cond_resched_rwlock_write(&tree->lock))
- node = rb_first(&tree->root);
- else
- node = next;
+ if (need_resched() || rwlock_needbreak(&tree->lock))
+ break;
++ node = next;
}
write_unlock(&tree->lock);
up_read(&inode->i_mmap_lock);
args.ino = ino;
args.root = root;
- inode = iget5_locked_rcu(s, hashval, btrfs_find_actor,
- inode = iget5_locked(root->fs_info->sb, hashval, btrfs_find_actor,
++ inode = iget5_locked_rcu(root->fs_info->sb, hashval, btrfs_find_actor,
btrfs_init_locked_inode,
(void *)&args);
return inode;