1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2011 STRATO. All rights reserved.
7 #include <linux/rbtree.h>
8 #include <trace/events/btrfs.h>
13 #include "transaction.h"
14 #include "delayed-ref.h"
17 #include "tree-mod-log.h"
19 #include "accessors.h"
20 #include "extent-tree.h"
21 #include "relocation.h"
22 #include "tree-checker.h"
24 /* Just arbitrary numbers so we can be sure one of these happened. */
25 #define BACKREF_FOUND_SHARED 6
26 #define BACKREF_FOUND_NOT_SHARED 7
28 struct extent_inode_elem {
32 struct extent_inode_elem *next;
35 static int check_extent_in_eb(struct btrfs_backref_walk_ctx *ctx,
36 const struct btrfs_key *key,
37 const struct extent_buffer *eb,
38 const struct btrfs_file_extent_item *fi,
39 struct extent_inode_elem **eie)
41 const u64 data_len = btrfs_file_extent_num_bytes(eb, fi);
42 u64 offset = key->offset;
43 struct extent_inode_elem *e;
48 if (!btrfs_file_extent_compression(eb, fi) &&
49 !btrfs_file_extent_encryption(eb, fi) &&
50 !btrfs_file_extent_other_encoding(eb, fi)) {
53 data_offset = btrfs_file_extent_offset(eb, fi);
55 if (ctx->extent_item_pos < data_offset ||
56 ctx->extent_item_pos >= data_offset + data_len)
58 offset += ctx->extent_item_pos - data_offset;
61 if (!ctx->indirect_ref_iterator || !ctx->cache_lookup)
64 cached = ctx->cache_lookup(eb->start, ctx->user_ctx, &root_ids,
69 for (int i = 0; i < root_count; i++) {
72 ret = ctx->indirect_ref_iterator(key->objectid, offset,
73 data_len, root_ids[i],
80 e = kmalloc(sizeof(*e), GFP_NOFS);
85 e->inum = key->objectid;
87 e->num_bytes = data_len;
93 static void free_inode_elem_list(struct extent_inode_elem *eie)
95 struct extent_inode_elem *eie_next;
97 for (; eie; eie = eie_next) {
103 static int find_extent_in_eb(struct btrfs_backref_walk_ctx *ctx,
104 const struct extent_buffer *eb,
105 struct extent_inode_elem **eie)
108 struct btrfs_key key;
109 struct btrfs_file_extent_item *fi;
116 * from the shared data ref, we only have the leaf but we need
117 * the key. thus, we must look into all items and see that we
118 * find one (some) with a reference to our extent item.
120 nritems = btrfs_header_nritems(eb);
121 for (slot = 0; slot < nritems; ++slot) {
122 btrfs_item_key_to_cpu(eb, &key, slot);
123 if (key.type != BTRFS_EXTENT_DATA_KEY)
125 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
126 extent_type = btrfs_file_extent_type(eb, fi);
127 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
129 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
130 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
131 if (disk_byte != ctx->bytenr)
134 ret = check_extent_in_eb(ctx, &key, eb, fi, eie);
135 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
143 struct rb_root_cached root;
147 #define PREFTREE_INIT { .root = RB_ROOT_CACHED, .count = 0 }
150 struct preftree direct; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
151 struct preftree indirect; /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
152 struct preftree indirect_missing_keys;
156 * Checks for a shared extent during backref search.
158 * The share_count tracks prelim_refs (direct and indirect) having a
160 * - incremented when a ref->count transitions to >0
161 * - decremented when a ref->count transitions to <1
164 struct btrfs_backref_share_check_ctx *ctx;
165 struct btrfs_root *root;
170 * Counts number of inodes that refer to an extent (different inodes in
171 * the same root or different roots) that we could find. The sharedness
172 * check typically stops once this counter gets greater than 1, so it
173 * may not reflect the total number of inodes.
177 * The number of times we found our inode refers to the data extent we
178 * are determining the sharedness. In other words, how many file extent
179 * items we could find for our inode that point to our target data
180 * extent. The value we get here after finishing the extent sharedness
181 * check may be smaller than reality, but if it ends up being greater
182 * than 1, then we know for sure the inode has multiple file extent
183 * items that point to our inode, and we can safely assume it's useful
184 * to cache the sharedness check result.
187 bool have_delayed_delete_refs;
190 static inline int extent_is_shared(struct share_check *sc)
192 return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
195 static struct kmem_cache *btrfs_prelim_ref_cache;
197 int __init btrfs_prelim_ref_init(void)
199 btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
200 sizeof(struct prelim_ref),
204 if (!btrfs_prelim_ref_cache)
209 void __cold btrfs_prelim_ref_exit(void)
211 kmem_cache_destroy(btrfs_prelim_ref_cache);
214 static void free_pref(struct prelim_ref *ref)
216 kmem_cache_free(btrfs_prelim_ref_cache, ref);
220 * Return 0 when both refs are for the same block (and can be merged).
221 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
222 * indicates a 'higher' block.
224 static int prelim_ref_compare(struct prelim_ref *ref1,
225 struct prelim_ref *ref2)
227 if (ref1->level < ref2->level)
229 if (ref1->level > ref2->level)
231 if (ref1->root_id < ref2->root_id)
233 if (ref1->root_id > ref2->root_id)
235 if (ref1->key_for_search.type < ref2->key_for_search.type)
237 if (ref1->key_for_search.type > ref2->key_for_search.type)
239 if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
241 if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
243 if (ref1->key_for_search.offset < ref2->key_for_search.offset)
245 if (ref1->key_for_search.offset > ref2->key_for_search.offset)
247 if (ref1->parent < ref2->parent)
249 if (ref1->parent > ref2->parent)
255 static void update_share_count(struct share_check *sc, int oldcount,
256 int newcount, struct prelim_ref *newref)
258 if ((!sc) || (oldcount == 0 && newcount < 1))
261 if (oldcount > 0 && newcount < 1)
263 else if (oldcount < 1 && newcount > 0)
266 if (newref->root_id == sc->root->root_key.objectid &&
267 newref->wanted_disk_byte == sc->data_bytenr &&
268 newref->key_for_search.objectid == sc->inum)
269 sc->self_ref_count += newref->count;
273 * Add @newref to the @root rbtree, merging identical refs.
275 * Callers should assume that newref has been freed after calling.
277 static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
278 struct preftree *preftree,
279 struct prelim_ref *newref,
280 struct share_check *sc)
282 struct rb_root_cached *root;
284 struct rb_node *parent = NULL;
285 struct prelim_ref *ref;
287 bool leftmost = true;
289 root = &preftree->root;
290 p = &root->rb_root.rb_node;
294 ref = rb_entry(parent, struct prelim_ref, rbnode);
295 result = prelim_ref_compare(ref, newref);
298 } else if (result > 0) {
302 /* Identical refs, merge them and free @newref */
303 struct extent_inode_elem *eie = ref->inode_list;
305 while (eie && eie->next)
309 ref->inode_list = newref->inode_list;
311 eie->next = newref->inode_list;
312 trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
315 * A delayed ref can have newref->count < 0.
316 * The ref->count is updated to follow any
317 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
319 update_share_count(sc, ref->count,
320 ref->count + newref->count, newref);
321 ref->count += newref->count;
327 update_share_count(sc, 0, newref->count, newref);
329 trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
330 rb_link_node(&newref->rbnode, parent, p);
331 rb_insert_color_cached(&newref->rbnode, root, leftmost);
335 * Release the entire tree. We don't care about internal consistency so
336 * just free everything and then reset the tree root.
338 static void prelim_release(struct preftree *preftree)
340 struct prelim_ref *ref, *next_ref;
342 rbtree_postorder_for_each_entry_safe(ref, next_ref,
343 &preftree->root.rb_root, rbnode) {
344 free_inode_elem_list(ref->inode_list);
348 preftree->root = RB_ROOT_CACHED;
353 * the rules for all callers of this function are:
354 * - obtaining the parent is the goal
355 * - if you add a key, you must know that it is a correct key
356 * - if you cannot add the parent or a correct key, then we will look into the
357 * block later to set a correct key
361 * backref type | shared | indirect | shared | indirect
362 * information | tree | tree | data | data
363 * --------------------+--------+----------+--------+----------
364 * parent logical | y | - | - | -
365 * key to resolve | - | y | y | y
366 * tree block logical | - | - | - | -
367 * root for resolving | y | y | y | y
369 * - column 1: we've the parent -> done
370 * - column 2, 3, 4: we use the key to find the parent
372 * on disk refs (inline or keyed)
373 * ==============================
374 * backref type | shared | indirect | shared | indirect
375 * information | tree | tree | data | data
376 * --------------------+--------+----------+--------+----------
377 * parent logical | y | - | y | -
378 * key to resolve | - | - | - | y
379 * tree block logical | y | y | y | y
380 * root for resolving | - | y | y | y
382 * - column 1, 3: we've the parent -> done
383 * - column 2: we take the first key from the block to find the parent
384 * (see add_missing_keys)
385 * - column 4: we use the key to find the parent
387 * additional information that's available but not required to find the parent
388 * block might help in merging entries to gain some speed.
390 static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
391 struct preftree *preftree, u64 root_id,
392 const struct btrfs_key *key, int level, u64 parent,
393 u64 wanted_disk_byte, int count,
394 struct share_check *sc, gfp_t gfp_mask)
396 struct prelim_ref *ref;
398 if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
401 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
405 ref->root_id = root_id;
407 ref->key_for_search = *key;
409 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
411 ref->inode_list = NULL;
414 ref->parent = parent;
415 ref->wanted_disk_byte = wanted_disk_byte;
416 prelim_ref_insert(fs_info, preftree, ref, sc);
417 return extent_is_shared(sc);
420 /* direct refs use root == 0, key == NULL */
421 static int add_direct_ref(const struct btrfs_fs_info *fs_info,
422 struct preftrees *preftrees, int level, u64 parent,
423 u64 wanted_disk_byte, int count,
424 struct share_check *sc, gfp_t gfp_mask)
426 return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
427 parent, wanted_disk_byte, count, sc, gfp_mask);
430 /* indirect refs use parent == 0 */
431 static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
432 struct preftrees *preftrees, u64 root_id,
433 const struct btrfs_key *key, int level,
434 u64 wanted_disk_byte, int count,
435 struct share_check *sc, gfp_t gfp_mask)
437 struct preftree *tree = &preftrees->indirect;
440 tree = &preftrees->indirect_missing_keys;
441 return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
442 wanted_disk_byte, count, sc, gfp_mask);
445 static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
447 struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
448 struct rb_node *parent = NULL;
449 struct prelim_ref *ref = NULL;
450 struct prelim_ref target = {};
453 target.parent = bytenr;
457 ref = rb_entry(parent, struct prelim_ref, rbnode);
458 result = prelim_ref_compare(ref, &target);
470 static int add_all_parents(struct btrfs_backref_walk_ctx *ctx,
471 struct btrfs_root *root, struct btrfs_path *path,
472 struct ulist *parents,
473 struct preftrees *preftrees, struct prelim_ref *ref,
478 struct extent_buffer *eb;
479 struct btrfs_key key;
480 struct btrfs_key *key_for_search = &ref->key_for_search;
481 struct btrfs_file_extent_item *fi;
482 struct extent_inode_elem *eie = NULL, *old = NULL;
484 u64 wanted_disk_byte = ref->wanted_disk_byte;
490 eb = path->nodes[level];
491 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
498 * 1. We normally enter this function with the path already pointing to
499 * the first item to check. But sometimes, we may enter it with
501 * 2. We are searching for normal backref but bytenr of this leaf
502 * matches shared data backref
503 * 3. The leaf owner is not equal to the root we are searching
505 * For these cases, go to the next leaf before we continue.
508 if (path->slots[0] >= btrfs_header_nritems(eb) ||
509 is_shared_data_backref(preftrees, eb->start) ||
510 ref->root_id != btrfs_header_owner(eb)) {
511 if (ctx->time_seq == BTRFS_SEQ_LAST)
512 ret = btrfs_next_leaf(root, path);
514 ret = btrfs_next_old_leaf(root, path, ctx->time_seq);
517 while (!ret && count < ref->count) {
519 slot = path->slots[0];
521 btrfs_item_key_to_cpu(eb, &key, slot);
523 if (key.objectid != key_for_search->objectid ||
524 key.type != BTRFS_EXTENT_DATA_KEY)
528 * We are searching for normal backref but bytenr of this leaf
529 * matches shared data backref, OR
530 * the leaf owner is not equal to the root we are searching for
533 (is_shared_data_backref(preftrees, eb->start) ||
534 ref->root_id != btrfs_header_owner(eb))) {
535 if (ctx->time_seq == BTRFS_SEQ_LAST)
536 ret = btrfs_next_leaf(root, path);
538 ret = btrfs_next_old_leaf(root, path, ctx->time_seq);
541 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
542 type = btrfs_file_extent_type(eb, fi);
543 if (type == BTRFS_FILE_EXTENT_INLINE)
545 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
546 data_offset = btrfs_file_extent_offset(eb, fi);
548 if (disk_byte == wanted_disk_byte) {
551 if (ref->key_for_search.offset == key.offset - data_offset)
555 if (!ctx->ignore_extent_item_pos) {
556 ret = check_extent_in_eb(ctx, &key, eb, fi, &eie);
557 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
563 ret = ulist_add_merge_ptr(parents, eb->start,
564 eie, (void **)&old, GFP_NOFS);
567 if (!ret && !ctx->ignore_extent_item_pos) {
575 if (ctx->time_seq == BTRFS_SEQ_LAST)
576 ret = btrfs_next_item(root, path);
578 ret = btrfs_next_old_item(root, path, ctx->time_seq);
581 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
582 free_inode_elem_list(eie);
590 * resolve an indirect backref in the form (root_id, key, level)
591 * to a logical address
593 static int resolve_indirect_ref(struct btrfs_backref_walk_ctx *ctx,
594 struct btrfs_path *path,
595 struct preftrees *preftrees,
596 struct prelim_ref *ref, struct ulist *parents)
598 struct btrfs_root *root;
599 struct extent_buffer *eb;
602 int level = ref->level;
603 struct btrfs_key search_key = ref->key_for_search;
606 * If we're search_commit_root we could possibly be holding locks on
607 * other tree nodes. This happens when qgroups does backref walks when
608 * adding new delayed refs. To deal with this we need to look in cache
609 * for the root, and if we don't find it then we need to search the
610 * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage
613 if (path->search_commit_root)
614 root = btrfs_get_fs_root_commit_root(ctx->fs_info, path, ref->root_id);
616 root = btrfs_get_fs_root(ctx->fs_info, ref->root_id, false);
622 if (!path->search_commit_root &&
623 test_bit(BTRFS_ROOT_DELETING, &root->state)) {
628 if (btrfs_is_testing(ctx->fs_info)) {
633 if (path->search_commit_root)
634 root_level = btrfs_header_level(root->commit_root);
635 else if (ctx->time_seq == BTRFS_SEQ_LAST)
636 root_level = btrfs_header_level(root->node);
638 root_level = btrfs_old_root_level(root, ctx->time_seq);
640 if (root_level + 1 == level)
644 * We can often find data backrefs with an offset that is too large
645 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
646 * subtracting a file's offset with the data offset of its
647 * corresponding extent data item. This can happen for example in the
650 * So if we detect such case we set the search key's offset to zero to
651 * make sure we will find the matching file extent item at
652 * add_all_parents(), otherwise we will miss it because the offset
653 * taken form the backref is much larger then the offset of the file
654 * extent item. This can make us scan a very large number of file
655 * extent items, but at least it will not make us miss any.
657 * This is an ugly workaround for a behaviour that should have never
658 * existed, but it does and a fix for the clone ioctl would touch a lot
659 * of places, cause backwards incompatibility and would not fix the
660 * problem for extents cloned with older kernels.
662 if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
663 search_key.offset >= LLONG_MAX)
664 search_key.offset = 0;
665 path->lowest_level = level;
666 if (ctx->time_seq == BTRFS_SEQ_LAST)
667 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
669 ret = btrfs_search_old_slot(root, &search_key, path, ctx->time_seq);
671 btrfs_debug(ctx->fs_info,
672 "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
673 ref->root_id, level, ref->count, ret,
674 ref->key_for_search.objectid, ref->key_for_search.type,
675 ref->key_for_search.offset);
679 eb = path->nodes[level];
681 if (WARN_ON(!level)) {
686 eb = path->nodes[level];
689 ret = add_all_parents(ctx, root, path, parents, preftrees, ref, level);
691 btrfs_put_root(root);
693 path->lowest_level = 0;
694 btrfs_release_path(path);
698 static struct extent_inode_elem *
699 unode_aux_to_inode_list(struct ulist_node *node)
703 return (struct extent_inode_elem *)(uintptr_t)node->aux;
706 static void free_leaf_list(struct ulist *ulist)
708 struct ulist_node *node;
709 struct ulist_iterator uiter;
711 ULIST_ITER_INIT(&uiter);
712 while ((node = ulist_next(ulist, &uiter)))
713 free_inode_elem_list(unode_aux_to_inode_list(node));
719 * We maintain three separate rbtrees: one for direct refs, one for
720 * indirect refs which have a key, and one for indirect refs which do not
721 * have a key. Each tree does merge on insertion.
723 * Once all of the references are located, we iterate over the tree of
724 * indirect refs with missing keys. An appropriate key is located and
725 * the ref is moved onto the tree for indirect refs. After all missing
726 * keys are thus located, we iterate over the indirect ref tree, resolve
727 * each reference, and then insert the resolved reference onto the
728 * direct tree (merging there too).
730 * New backrefs (i.e., for parent nodes) are added to the appropriate
731 * rbtree as they are encountered. The new backrefs are subsequently
734 static int resolve_indirect_refs(struct btrfs_backref_walk_ctx *ctx,
735 struct btrfs_path *path,
736 struct preftrees *preftrees,
737 struct share_check *sc)
741 struct ulist *parents;
742 struct ulist_node *node;
743 struct ulist_iterator uiter;
744 struct rb_node *rnode;
746 parents = ulist_alloc(GFP_NOFS);
751 * We could trade memory usage for performance here by iterating
752 * the tree, allocating new refs for each insertion, and then
753 * freeing the entire indirect tree when we're done. In some test
754 * cases, the tree can grow quite large (~200k objects).
756 while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
757 struct prelim_ref *ref;
759 ref = rb_entry(rnode, struct prelim_ref, rbnode);
760 if (WARN(ref->parent,
761 "BUG: direct ref found in indirect tree")) {
766 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
767 preftrees->indirect.count--;
769 if (ref->count == 0) {
774 if (sc && ref->root_id != sc->root->root_key.objectid) {
776 ret = BACKREF_FOUND_SHARED;
779 err = resolve_indirect_ref(ctx, path, preftrees, ref, parents);
781 * we can only tolerate ENOENT,otherwise,we should catch error
782 * and return directly.
784 if (err == -ENOENT) {
785 prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref,
794 /* we put the first parent into the ref at hand */
795 ULIST_ITER_INIT(&uiter);
796 node = ulist_next(parents, &uiter);
797 ref->parent = node ? node->val : 0;
798 ref->inode_list = unode_aux_to_inode_list(node);
800 /* Add a prelim_ref(s) for any other parent(s). */
801 while ((node = ulist_next(parents, &uiter))) {
802 struct prelim_ref *new_ref;
804 new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
811 memcpy(new_ref, ref, sizeof(*ref));
812 new_ref->parent = node->val;
813 new_ref->inode_list = unode_aux_to_inode_list(node);
814 prelim_ref_insert(ctx->fs_info, &preftrees->direct,
819 * Now it's a direct ref, put it in the direct tree. We must
820 * do this last because the ref could be merged/freed here.
822 prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref, NULL);
824 ulist_reinit(parents);
829 * We may have inode lists attached to refs in the parents ulist, so we
830 * must free them before freeing the ulist and its refs.
832 free_leaf_list(parents);
837 * read tree blocks and add keys where required.
839 static int add_missing_keys(struct btrfs_fs_info *fs_info,
840 struct preftrees *preftrees, bool lock)
842 struct prelim_ref *ref;
843 struct extent_buffer *eb;
844 struct preftree *tree = &preftrees->indirect_missing_keys;
845 struct rb_node *node;
847 while ((node = rb_first_cached(&tree->root))) {
848 struct btrfs_tree_parent_check check = { 0 };
850 ref = rb_entry(node, struct prelim_ref, rbnode);
851 rb_erase_cached(node, &tree->root);
853 BUG_ON(ref->parent); /* should not be a direct ref */
854 BUG_ON(ref->key_for_search.type);
855 BUG_ON(!ref->wanted_disk_byte);
857 check.level = ref->level - 1;
858 check.owner_root = ref->root_id;
860 eb = read_tree_block(fs_info, ref->wanted_disk_byte, &check);
865 if (!extent_buffer_uptodate(eb)) {
867 free_extent_buffer(eb);
872 btrfs_tree_read_lock(eb);
873 if (btrfs_header_level(eb) == 0)
874 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
876 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
878 btrfs_tree_read_unlock(eb);
879 free_extent_buffer(eb);
880 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
887 * add all currently queued delayed refs from this head whose seq nr is
888 * smaller or equal that seq to the list
890 static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
891 struct btrfs_delayed_ref_head *head, u64 seq,
892 struct preftrees *preftrees, struct share_check *sc)
894 struct btrfs_delayed_ref_node *node;
895 struct btrfs_key key;
900 spin_lock(&head->lock);
901 for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
902 node = rb_entry(n, struct btrfs_delayed_ref_node,
907 switch (node->action) {
908 case BTRFS_ADD_DELAYED_EXTENT:
909 case BTRFS_UPDATE_DELAYED_HEAD:
912 case BTRFS_ADD_DELAYED_REF:
913 count = node->ref_mod;
915 case BTRFS_DROP_DELAYED_REF:
916 count = node->ref_mod * -1;
921 switch (node->type) {
922 case BTRFS_TREE_BLOCK_REF_KEY: {
923 /* NORMAL INDIRECT METADATA backref */
924 struct btrfs_delayed_tree_ref *ref;
925 struct btrfs_key *key_ptr = NULL;
927 if (head->extent_op && head->extent_op->update_key) {
928 btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
932 ref = btrfs_delayed_node_to_tree_ref(node);
933 ret = add_indirect_ref(fs_info, preftrees, ref->root,
934 key_ptr, ref->level + 1,
935 node->bytenr, count, sc,
939 case BTRFS_SHARED_BLOCK_REF_KEY: {
940 /* SHARED DIRECT METADATA backref */
941 struct btrfs_delayed_tree_ref *ref;
943 ref = btrfs_delayed_node_to_tree_ref(node);
945 ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
946 ref->parent, node->bytenr, count,
950 case BTRFS_EXTENT_DATA_REF_KEY: {
951 /* NORMAL INDIRECT DATA backref */
952 struct btrfs_delayed_data_ref *ref;
953 ref = btrfs_delayed_node_to_data_ref(node);
955 key.objectid = ref->objectid;
956 key.type = BTRFS_EXTENT_DATA_KEY;
957 key.offset = ref->offset;
960 * If we have a share check context and a reference for
961 * another inode, we can't exit immediately. This is
962 * because even if this is a BTRFS_ADD_DELAYED_REF
963 * reference we may find next a BTRFS_DROP_DELAYED_REF
964 * which cancels out this ADD reference.
966 * If this is a DROP reference and there was no previous
967 * ADD reference, then we need to signal that when we
968 * process references from the extent tree (through
969 * add_inline_refs() and add_keyed_refs()), we should
970 * not exit early if we find a reference for another
971 * inode, because one of the delayed DROP references
972 * may cancel that reference in the extent tree.
975 sc->have_delayed_delete_refs = true;
977 ret = add_indirect_ref(fs_info, preftrees, ref->root,
978 &key, 0, node->bytenr, count, sc,
982 case BTRFS_SHARED_DATA_REF_KEY: {
983 /* SHARED DIRECT FULL backref */
984 struct btrfs_delayed_data_ref *ref;
986 ref = btrfs_delayed_node_to_data_ref(node);
988 ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
989 node->bytenr, count, sc,
997 * We must ignore BACKREF_FOUND_SHARED until all delayed
998 * refs have been checked.
1000 if (ret && (ret != BACKREF_FOUND_SHARED))
1004 ret = extent_is_shared(sc);
1006 spin_unlock(&head->lock);
1011 * add all inline backrefs for bytenr to the list
1013 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1015 static int add_inline_refs(struct btrfs_backref_walk_ctx *ctx,
1016 struct btrfs_path *path,
1017 int *info_level, struct preftrees *preftrees,
1018 struct share_check *sc)
1022 struct extent_buffer *leaf;
1023 struct btrfs_key key;
1024 struct btrfs_key found_key;
1027 struct btrfs_extent_item *ei;
1032 * enumerate all inline refs
1034 leaf = path->nodes[0];
1035 slot = path->slots[0];
1037 item_size = btrfs_item_size(leaf, slot);
1038 BUG_ON(item_size < sizeof(*ei));
1040 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
1042 if (ctx->check_extent_item) {
1043 ret = ctx->check_extent_item(ctx->bytenr, ei, leaf, ctx->user_ctx);
1048 flags = btrfs_extent_flags(leaf, ei);
1049 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1051 ptr = (unsigned long)(ei + 1);
1052 end = (unsigned long)ei + item_size;
1054 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
1055 flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1056 struct btrfs_tree_block_info *info;
1058 info = (struct btrfs_tree_block_info *)ptr;
1059 *info_level = btrfs_tree_block_level(leaf, info);
1060 ptr += sizeof(struct btrfs_tree_block_info);
1062 } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
1063 *info_level = found_key.offset;
1065 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1069 struct btrfs_extent_inline_ref *iref;
1073 iref = (struct btrfs_extent_inline_ref *)ptr;
1074 type = btrfs_get_extent_inline_ref_type(leaf, iref,
1075 BTRFS_REF_TYPE_ANY);
1076 if (type == BTRFS_REF_TYPE_INVALID)
1079 offset = btrfs_extent_inline_ref_offset(leaf, iref);
1082 case BTRFS_SHARED_BLOCK_REF_KEY:
1083 ret = add_direct_ref(ctx->fs_info, preftrees,
1084 *info_level + 1, offset,
1085 ctx->bytenr, 1, NULL, GFP_NOFS);
1087 case BTRFS_SHARED_DATA_REF_KEY: {
1088 struct btrfs_shared_data_ref *sdref;
1091 sdref = (struct btrfs_shared_data_ref *)(iref + 1);
1092 count = btrfs_shared_data_ref_count(leaf, sdref);
1094 ret = add_direct_ref(ctx->fs_info, preftrees, 0, offset,
1095 ctx->bytenr, count, sc, GFP_NOFS);
1098 case BTRFS_TREE_BLOCK_REF_KEY:
1099 ret = add_indirect_ref(ctx->fs_info, preftrees, offset,
1100 NULL, *info_level + 1,
1101 ctx->bytenr, 1, NULL, GFP_NOFS);
1103 case BTRFS_EXTENT_DATA_REF_KEY: {
1104 struct btrfs_extent_data_ref *dref;
1108 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1109 count = btrfs_extent_data_ref_count(leaf, dref);
1110 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1112 key.type = BTRFS_EXTENT_DATA_KEY;
1113 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1115 if (sc && key.objectid != sc->inum &&
1116 !sc->have_delayed_delete_refs) {
1117 ret = BACKREF_FOUND_SHARED;
1121 root = btrfs_extent_data_ref_root(leaf, dref);
1123 if (!ctx->skip_data_ref ||
1124 !ctx->skip_data_ref(root, key.objectid, key.offset,
1126 ret = add_indirect_ref(ctx->fs_info, preftrees,
1127 root, &key, 0, ctx->bytenr,
1128 count, sc, GFP_NOFS);
1136 ptr += btrfs_extent_inline_ref_size(type);
1143 * add all non-inline backrefs for bytenr to the list
1145 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1147 static int add_keyed_refs(struct btrfs_backref_walk_ctx *ctx,
1148 struct btrfs_root *extent_root,
1149 struct btrfs_path *path,
1150 int info_level, struct preftrees *preftrees,
1151 struct share_check *sc)
1153 struct btrfs_fs_info *fs_info = extent_root->fs_info;
1156 struct extent_buffer *leaf;
1157 struct btrfs_key key;
1160 ret = btrfs_next_item(extent_root, path);
1168 slot = path->slots[0];
1169 leaf = path->nodes[0];
1170 btrfs_item_key_to_cpu(leaf, &key, slot);
1172 if (key.objectid != ctx->bytenr)
1174 if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1176 if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1180 case BTRFS_SHARED_BLOCK_REF_KEY:
1181 /* SHARED DIRECT METADATA backref */
1182 ret = add_direct_ref(fs_info, preftrees,
1183 info_level + 1, key.offset,
1184 ctx->bytenr, 1, NULL, GFP_NOFS);
1186 case BTRFS_SHARED_DATA_REF_KEY: {
1187 /* SHARED DIRECT FULL backref */
1188 struct btrfs_shared_data_ref *sdref;
1191 sdref = btrfs_item_ptr(leaf, slot,
1192 struct btrfs_shared_data_ref);
1193 count = btrfs_shared_data_ref_count(leaf, sdref);
1194 ret = add_direct_ref(fs_info, preftrees, 0,
1195 key.offset, ctx->bytenr, count,
1199 case BTRFS_TREE_BLOCK_REF_KEY:
1200 /* NORMAL INDIRECT METADATA backref */
1201 ret = add_indirect_ref(fs_info, preftrees, key.offset,
1202 NULL, info_level + 1, ctx->bytenr,
1205 case BTRFS_EXTENT_DATA_REF_KEY: {
1206 /* NORMAL INDIRECT DATA backref */
1207 struct btrfs_extent_data_ref *dref;
1211 dref = btrfs_item_ptr(leaf, slot,
1212 struct btrfs_extent_data_ref);
1213 count = btrfs_extent_data_ref_count(leaf, dref);
1214 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1216 key.type = BTRFS_EXTENT_DATA_KEY;
1217 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1219 if (sc && key.objectid != sc->inum &&
1220 !sc->have_delayed_delete_refs) {
1221 ret = BACKREF_FOUND_SHARED;
1225 root = btrfs_extent_data_ref_root(leaf, dref);
1227 if (!ctx->skip_data_ref ||
1228 !ctx->skip_data_ref(root, key.objectid, key.offset,
1230 ret = add_indirect_ref(fs_info, preftrees, root,
1231 &key, 0, ctx->bytenr,
1232 count, sc, GFP_NOFS);
1247 * The caller has joined a transaction or is holding a read lock on the
1248 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1249 * snapshot field changing while updating or checking the cache.
1251 static bool lookup_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
1252 struct btrfs_root *root,
1253 u64 bytenr, int level, bool *is_shared)
1255 const struct btrfs_fs_info *fs_info = root->fs_info;
1256 struct btrfs_backref_shared_cache_entry *entry;
1258 if (!current->journal_info)
1259 lockdep_assert_held(&fs_info->commit_root_sem);
1261 if (!ctx->use_path_cache)
1264 if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
1268 * Level -1 is used for the data extent, which is not reliable to cache
1269 * because its reference count can increase or decrease without us
1270 * realizing. We cache results only for extent buffers that lead from
1271 * the root node down to the leaf with the file extent item.
1275 entry = &ctx->path_cache_entries[level];
1277 /* Unused cache entry or being used for some other extent buffer. */
1278 if (entry->bytenr != bytenr)
1282 * We cached a false result, but the last snapshot generation of the
1283 * root changed, so we now have a snapshot. Don't trust the result.
1285 if (!entry->is_shared &&
1286 entry->gen != btrfs_root_last_snapshot(&root->root_item))
1290 * If we cached a true result and the last generation used for dropping
1291 * a root changed, we can not trust the result, because the dropped root
1292 * could be a snapshot sharing this extent buffer.
1294 if (entry->is_shared &&
1295 entry->gen != btrfs_get_last_root_drop_gen(fs_info))
1298 *is_shared = entry->is_shared;
1300 * If the node at this level is shared, than all nodes below are also
1301 * shared. Currently some of the nodes below may be marked as not shared
1302 * because we have just switched from one leaf to another, and switched
1303 * also other nodes above the leaf and below the current level, so mark
1307 for (int i = 0; i < level; i++) {
1308 ctx->path_cache_entries[i].is_shared = true;
1309 ctx->path_cache_entries[i].gen = entry->gen;
1317 * The caller has joined a transaction or is holding a read lock on the
1318 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1319 * snapshot field changing while updating or checking the cache.
1321 static void store_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
1322 struct btrfs_root *root,
1323 u64 bytenr, int level, bool is_shared)
1325 const struct btrfs_fs_info *fs_info = root->fs_info;
1326 struct btrfs_backref_shared_cache_entry *entry;
1329 if (!current->journal_info)
1330 lockdep_assert_held(&fs_info->commit_root_sem);
1332 if (!ctx->use_path_cache)
1335 if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
1339 * Level -1 is used for the data extent, which is not reliable to cache
1340 * because its reference count can increase or decrease without us
1341 * realizing. We cache results only for extent buffers that lead from
1342 * the root node down to the leaf with the file extent item.
1347 gen = btrfs_get_last_root_drop_gen(fs_info);
1349 gen = btrfs_root_last_snapshot(&root->root_item);
1351 entry = &ctx->path_cache_entries[level];
1352 entry->bytenr = bytenr;
1353 entry->is_shared = is_shared;
1357 * If we found an extent buffer is shared, set the cache result for all
1358 * extent buffers below it to true. As nodes in the path are COWed,
1359 * their sharedness is moved to their children, and if a leaf is COWed,
1360 * then the sharedness of a data extent becomes direct, the refcount of
1361 * data extent is increased in the extent item at the extent tree.
1364 for (int i = 0; i < level; i++) {
1365 entry = &ctx->path_cache_entries[i];
1366 entry->is_shared = is_shared;
1373 * this adds all existing backrefs (inline backrefs, backrefs and delayed
1374 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1375 * indirect refs to their parent bytenr.
1376 * When roots are found, they're added to the roots list
1378 * @ctx: Backref walking context object, must be not NULL.
1379 * @sc: If !NULL, then immediately return BACKREF_FOUND_SHARED when a
1380 * shared extent is detected.
1382 * Otherwise this returns 0 for success and <0 for an error.
1384 * FIXME some caching might speed things up
1386 static int find_parent_nodes(struct btrfs_backref_walk_ctx *ctx,
1387 struct share_check *sc)
1389 struct btrfs_root *root = btrfs_extent_root(ctx->fs_info, ctx->bytenr);
1390 struct btrfs_key key;
1391 struct btrfs_path *path;
1392 struct btrfs_delayed_ref_root *delayed_refs = NULL;
1393 struct btrfs_delayed_ref_head *head;
1396 struct prelim_ref *ref;
1397 struct rb_node *node;
1398 struct extent_inode_elem *eie = NULL;
1399 struct preftrees preftrees = {
1400 .direct = PREFTREE_INIT,
1401 .indirect = PREFTREE_INIT,
1402 .indirect_missing_keys = PREFTREE_INIT
1405 /* Roots ulist is not needed when using a sharedness check context. */
1407 ASSERT(ctx->roots == NULL);
1409 key.objectid = ctx->bytenr;
1410 key.offset = (u64)-1;
1411 if (btrfs_fs_incompat(ctx->fs_info, SKINNY_METADATA))
1412 key.type = BTRFS_METADATA_ITEM_KEY;
1414 key.type = BTRFS_EXTENT_ITEM_KEY;
1416 path = btrfs_alloc_path();
1420 path->search_commit_root = 1;
1421 path->skip_locking = 1;
1424 if (ctx->time_seq == BTRFS_SEQ_LAST)
1425 path->skip_locking = 1;
1430 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1434 /* This shouldn't happen, indicates a bug or fs corruption. */
1440 if (ctx->trans && likely(ctx->trans->type != __TRANS_DUMMY) &&
1441 ctx->time_seq != BTRFS_SEQ_LAST) {
1443 * We have a specific time_seq we care about and trans which
1444 * means we have the path lock, we need to grab the ref head and
1445 * lock it so we have a consistent view of the refs at the given
1448 delayed_refs = &ctx->trans->transaction->delayed_refs;
1449 spin_lock(&delayed_refs->lock);
1450 head = btrfs_find_delayed_ref_head(delayed_refs, ctx->bytenr);
1452 if (!mutex_trylock(&head->mutex)) {
1453 refcount_inc(&head->refs);
1454 spin_unlock(&delayed_refs->lock);
1456 btrfs_release_path(path);
1459 * Mutex was contended, block until it's
1460 * released and try again
1462 mutex_lock(&head->mutex);
1463 mutex_unlock(&head->mutex);
1464 btrfs_put_delayed_ref_head(head);
1467 spin_unlock(&delayed_refs->lock);
1468 ret = add_delayed_refs(ctx->fs_info, head, ctx->time_seq,
1470 mutex_unlock(&head->mutex);
1474 spin_unlock(&delayed_refs->lock);
1478 if (path->slots[0]) {
1479 struct extent_buffer *leaf;
1483 leaf = path->nodes[0];
1484 slot = path->slots[0];
1485 btrfs_item_key_to_cpu(leaf, &key, slot);
1486 if (key.objectid == ctx->bytenr &&
1487 (key.type == BTRFS_EXTENT_ITEM_KEY ||
1488 key.type == BTRFS_METADATA_ITEM_KEY)) {
1489 ret = add_inline_refs(ctx, path, &info_level,
1493 ret = add_keyed_refs(ctx, root, path, info_level,
1501 * If we have a share context and we reached here, it means the extent
1502 * is not directly shared (no multiple reference items for it),
1503 * otherwise we would have exited earlier with a return value of
1504 * BACKREF_FOUND_SHARED after processing delayed references or while
1505 * processing inline or keyed references from the extent tree.
1506 * The extent may however be indirectly shared through shared subtrees
1507 * as a result from creating snapshots, so we determine below what is
1508 * its parent node, in case we are dealing with a metadata extent, or
1509 * what's the leaf (or leaves), from a fs tree, that has a file extent
1510 * item pointing to it in case we are dealing with a data extent.
1512 ASSERT(extent_is_shared(sc) == 0);
1515 * If we are here for a data extent and we have a share_check structure
1516 * it means the data extent is not directly shared (does not have
1517 * multiple reference items), so we have to check if a path in the fs
1518 * tree (going from the root node down to the leaf that has the file
1519 * extent item pointing to the data extent) is shared, that is, if any
1520 * of the extent buffers in the path is referenced by other trees.
1522 if (sc && ctx->bytenr == sc->data_bytenr) {
1524 * If our data extent is from a generation more recent than the
1525 * last generation used to snapshot the root, then we know that
1526 * it can not be shared through subtrees, so we can skip
1527 * resolving indirect references, there's no point in
1528 * determining the extent buffers for the path from the fs tree
1529 * root node down to the leaf that has the file extent item that
1530 * points to the data extent.
1532 if (sc->data_extent_gen >
1533 btrfs_root_last_snapshot(&sc->root->root_item)) {
1534 ret = BACKREF_FOUND_NOT_SHARED;
1539 * If we are only determining if a data extent is shared or not
1540 * and the corresponding file extent item is located in the same
1541 * leaf as the previous file extent item, we can skip resolving
1542 * indirect references for a data extent, since the fs tree path
1543 * is the same (same leaf, so same path). We skip as long as the
1544 * cached result for the leaf is valid and only if there's only
1545 * one file extent item pointing to the data extent, because in
1546 * the case of multiple file extent items, they may be located
1547 * in different leaves and therefore we have multiple paths.
1549 if (sc->ctx->curr_leaf_bytenr == sc->ctx->prev_leaf_bytenr &&
1550 sc->self_ref_count == 1) {
1554 cached = lookup_backref_shared_cache(sc->ctx, sc->root,
1555 sc->ctx->curr_leaf_bytenr,
1559 ret = BACKREF_FOUND_SHARED;
1561 ret = BACKREF_FOUND_NOT_SHARED;
1567 btrfs_release_path(path);
1569 ret = add_missing_keys(ctx->fs_info, &preftrees, path->skip_locking == 0);
1573 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1575 ret = resolve_indirect_refs(ctx, path, &preftrees, sc);
1579 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1582 * This walks the tree of merged and resolved refs. Tree blocks are
1583 * read in as needed. Unique entries are added to the ulist, and
1584 * the list of found roots is updated.
1586 * We release the entire tree in one go before returning.
1588 node = rb_first_cached(&preftrees.direct.root);
1590 ref = rb_entry(node, struct prelim_ref, rbnode);
1591 node = rb_next(&ref->rbnode);
1593 * ref->count < 0 can happen here if there are delayed
1594 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1595 * prelim_ref_insert() relies on this when merging
1596 * identical refs to keep the overall count correct.
1597 * prelim_ref_insert() will merge only those refs
1598 * which compare identically. Any refs having
1599 * e.g. different offsets would not be merged,
1600 * and would retain their original ref->count < 0.
1602 if (ctx->roots && ref->count && ref->root_id && ref->parent == 0) {
1603 /* no parent == root of tree */
1604 ret = ulist_add(ctx->roots, ref->root_id, 0, GFP_NOFS);
1608 if (ref->count && ref->parent) {
1609 if (!ctx->ignore_extent_item_pos && !ref->inode_list &&
1611 struct btrfs_tree_parent_check check = { 0 };
1612 struct extent_buffer *eb;
1614 check.level = ref->level;
1616 eb = read_tree_block(ctx->fs_info, ref->parent,
1622 if (!extent_buffer_uptodate(eb)) {
1623 free_extent_buffer(eb);
1628 if (!path->skip_locking)
1629 btrfs_tree_read_lock(eb);
1630 ret = find_extent_in_eb(ctx, eb, &eie);
1631 if (!path->skip_locking)
1632 btrfs_tree_read_unlock(eb);
1633 free_extent_buffer(eb);
1634 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
1637 ref->inode_list = eie;
1639 * We transferred the list ownership to the ref,
1640 * so set to NULL to avoid a double free in case
1641 * an error happens after this.
1645 ret = ulist_add_merge_ptr(ctx->refs, ref->parent,
1647 (void **)&eie, GFP_NOFS);
1650 if (!ret && !ctx->ignore_extent_item_pos) {
1652 * We've recorded that parent, so we must extend
1653 * its inode list here.
1655 * However if there was corruption we may not
1656 * have found an eie, return an error in this
1666 eie->next = ref->inode_list;
1670 * We have transferred the inode list ownership from
1671 * this ref to the ref we added to the 'refs' ulist.
1672 * So set this ref's inode list to NULL to avoid
1673 * use-after-free when our caller uses it or double
1674 * frees in case an error happens before we return.
1676 ref->inode_list = NULL;
1682 btrfs_free_path(path);
1684 prelim_release(&preftrees.direct);
1685 prelim_release(&preftrees.indirect);
1686 prelim_release(&preftrees.indirect_missing_keys);
1688 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
1689 free_inode_elem_list(eie);
1694 * Finds all leaves with a reference to the specified combination of
1695 * @ctx->bytenr and @ctx->extent_item_pos. The bytenr of the found leaves are
1696 * added to the ulist at @ctx->refs, and that ulist is allocated by this
1697 * function. The caller should free the ulist with free_leaf_list() if
1698 * @ctx->ignore_extent_item_pos is false, otherwise a fimple ulist_free() is
1701 * Returns 0 on success and < 0 on error. On error @ctx->refs is not allocated.
1703 int btrfs_find_all_leafs(struct btrfs_backref_walk_ctx *ctx)
1707 ASSERT(ctx->refs == NULL);
1709 ctx->refs = ulist_alloc(GFP_NOFS);
1713 ret = find_parent_nodes(ctx, NULL);
1714 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
1715 (ret < 0 && ret != -ENOENT)) {
1716 free_leaf_list(ctx->refs);
1725 * Walk all backrefs for a given extent to find all roots that reference this
1726 * extent. Walking a backref means finding all extents that reference this
1727 * extent and in turn walk the backrefs of those, too. Naturally this is a
1728 * recursive process, but here it is implemented in an iterative fashion: We
1729 * find all referencing extents for the extent in question and put them on a
1730 * list. In turn, we find all referencing extents for those, further appending
1731 * to the list. The way we iterate the list allows adding more elements after
1732 * the current while iterating. The process stops when we reach the end of the
1735 * Found roots are added to @ctx->roots, which is allocated by this function if
1736 * it points to NULL, in which case the caller is responsible for freeing it
1737 * after it's not needed anymore.
1738 * This function requires @ctx->refs to be NULL, as it uses it for allocating a
1739 * ulist to do temporary work, and frees it before returning.
1741 * Returns 0 on success, < 0 on error.
1743 static int btrfs_find_all_roots_safe(struct btrfs_backref_walk_ctx *ctx)
1745 const u64 orig_bytenr = ctx->bytenr;
1746 const bool orig_ignore_extent_item_pos = ctx->ignore_extent_item_pos;
1747 bool roots_ulist_allocated = false;
1748 struct ulist_iterator uiter;
1751 ASSERT(ctx->refs == NULL);
1753 ctx->refs = ulist_alloc(GFP_NOFS);
1758 ctx->roots = ulist_alloc(GFP_NOFS);
1760 ulist_free(ctx->refs);
1764 roots_ulist_allocated = true;
1767 ctx->ignore_extent_item_pos = true;
1769 ULIST_ITER_INIT(&uiter);
1771 struct ulist_node *node;
1773 ret = find_parent_nodes(ctx, NULL);
1774 if (ret < 0 && ret != -ENOENT) {
1775 if (roots_ulist_allocated) {
1776 ulist_free(ctx->roots);
1782 node = ulist_next(ctx->refs, &uiter);
1785 ctx->bytenr = node->val;
1789 ulist_free(ctx->refs);
1791 ctx->bytenr = orig_bytenr;
1792 ctx->ignore_extent_item_pos = orig_ignore_extent_item_pos;
1797 int btrfs_find_all_roots(struct btrfs_backref_walk_ctx *ctx,
1798 bool skip_commit_root_sem)
1802 if (!ctx->trans && !skip_commit_root_sem)
1803 down_read(&ctx->fs_info->commit_root_sem);
1804 ret = btrfs_find_all_roots_safe(ctx);
1805 if (!ctx->trans && !skip_commit_root_sem)
1806 up_read(&ctx->fs_info->commit_root_sem);
1810 struct btrfs_backref_share_check_ctx *btrfs_alloc_backref_share_check_ctx(void)
1812 struct btrfs_backref_share_check_ctx *ctx;
1814 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1818 ulist_init(&ctx->refs);
1823 void btrfs_free_backref_share_ctx(struct btrfs_backref_share_check_ctx *ctx)
1828 ulist_release(&ctx->refs);
1833 * Check if a data extent is shared or not.
1835 * @inode: The inode whose extent we are checking.
1836 * @bytenr: Logical bytenr of the extent we are checking.
1837 * @extent_gen: Generation of the extent (file extent item) or 0 if it is
1839 * @ctx: A backref sharedness check context.
1841 * btrfs_is_data_extent_shared uses the backref walking code but will short
1842 * circuit as soon as it finds a root or inode that doesn't match the
1843 * one passed in. This provides a significant performance benefit for
1844 * callers (such as fiemap) which want to know whether the extent is
1845 * shared but do not need a ref count.
1847 * This attempts to attach to the running transaction in order to account for
1848 * delayed refs, but continues on even when no running transaction exists.
1850 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1852 int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
1854 struct btrfs_backref_share_check_ctx *ctx)
1856 struct btrfs_backref_walk_ctx walk_ctx = { 0 };
1857 struct btrfs_root *root = inode->root;
1858 struct btrfs_fs_info *fs_info = root->fs_info;
1859 struct btrfs_trans_handle *trans;
1860 struct ulist_iterator uiter;
1861 struct ulist_node *node;
1862 struct btrfs_seq_list elem = BTRFS_SEQ_LIST_INIT(elem);
1864 struct share_check shared = {
1867 .inum = btrfs_ino(inode),
1868 .data_bytenr = bytenr,
1869 .data_extent_gen = extent_gen,
1871 .self_ref_count = 0,
1872 .have_delayed_delete_refs = false,
1876 bool leaf_is_shared;
1878 for (int i = 0; i < BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE; i++) {
1879 if (ctx->prev_extents_cache[i].bytenr == bytenr)
1880 return ctx->prev_extents_cache[i].is_shared;
1883 ulist_init(&ctx->refs);
1885 trans = btrfs_join_transaction_nostart(root);
1886 if (IS_ERR(trans)) {
1887 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1888 ret = PTR_ERR(trans);
1892 down_read(&fs_info->commit_root_sem);
1894 btrfs_get_tree_mod_seq(fs_info, &elem);
1895 walk_ctx.time_seq = elem.seq;
1898 ctx->use_path_cache = true;
1901 * We may have previously determined that the current leaf is shared.
1902 * If it is, then we have a data extent that is shared due to a shared
1903 * subtree (caused by snapshotting) and we don't need to check for data
1904 * backrefs. If the leaf is not shared, then we must do backref walking
1905 * to determine if the data extent is shared through reflinks.
1907 leaf_cached = lookup_backref_shared_cache(ctx, root,
1908 ctx->curr_leaf_bytenr, 0,
1910 if (leaf_cached && leaf_is_shared) {
1915 walk_ctx.ignore_extent_item_pos = true;
1916 walk_ctx.trans = trans;
1917 walk_ctx.fs_info = fs_info;
1918 walk_ctx.refs = &ctx->refs;
1920 /* -1 means we are in the bytenr of the data extent. */
1922 ULIST_ITER_INIT(&uiter);
1927 walk_ctx.bytenr = bytenr;
1928 ret = find_parent_nodes(&walk_ctx, &shared);
1929 if (ret == BACKREF_FOUND_SHARED ||
1930 ret == BACKREF_FOUND_NOT_SHARED) {
1931 /* If shared must return 1, otherwise return 0. */
1932 ret = (ret == BACKREF_FOUND_SHARED) ? 1 : 0;
1934 store_backref_shared_cache(ctx, root, bytenr,
1938 if (ret < 0 && ret != -ENOENT)
1943 * If our data extent was not directly shared (without multiple
1944 * reference items), than it might have a single reference item
1945 * with a count > 1 for the same offset, which means there are 2
1946 * (or more) file extent items that point to the data extent -
1947 * this happens when a file extent item needs to be split and
1948 * then one item gets moved to another leaf due to a b+tree leaf
1949 * split when inserting some item. In this case the file extent
1950 * items may be located in different leaves and therefore some
1951 * of the leaves may be referenced through shared subtrees while
1952 * others are not. Since our extent buffer cache only works for
1953 * a single path (by far the most common case and simpler to
1954 * deal with), we can not use it if we have multiple leaves
1955 * (which implies multiple paths).
1957 if (level == -1 && ctx->refs.nnodes > 1)
1958 ctx->use_path_cache = false;
1961 store_backref_shared_cache(ctx, root, bytenr,
1963 node = ulist_next(&ctx->refs, &uiter);
1968 cached = lookup_backref_shared_cache(ctx, root, bytenr, level,
1971 ret = (is_shared ? 1 : 0);
1974 shared.share_count = 0;
1975 shared.have_delayed_delete_refs = false;
1980 * Cache the sharedness result for the data extent if we know our inode
1981 * has more than 1 file extent item that refers to the data extent.
1983 if (ret >= 0 && shared.self_ref_count > 1) {
1984 int slot = ctx->prev_extents_cache_slot;
1986 ctx->prev_extents_cache[slot].bytenr = shared.data_bytenr;
1987 ctx->prev_extents_cache[slot].is_shared = (ret == 1);
1989 slot = (slot + 1) % BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE;
1990 ctx->prev_extents_cache_slot = slot;
1995 btrfs_put_tree_mod_seq(fs_info, &elem);
1996 btrfs_end_transaction(trans);
1998 up_read(&fs_info->commit_root_sem);
2001 ulist_release(&ctx->refs);
2002 ctx->prev_leaf_bytenr = ctx->curr_leaf_bytenr;
2007 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
2008 u64 start_off, struct btrfs_path *path,
2009 struct btrfs_inode_extref **ret_extref,
2013 struct btrfs_key key;
2014 struct btrfs_key found_key;
2015 struct btrfs_inode_extref *extref;
2016 const struct extent_buffer *leaf;
2019 key.objectid = inode_objectid;
2020 key.type = BTRFS_INODE_EXTREF_KEY;
2021 key.offset = start_off;
2023 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2028 leaf = path->nodes[0];
2029 slot = path->slots[0];
2030 if (slot >= btrfs_header_nritems(leaf)) {
2032 * If the item at offset is not found,
2033 * btrfs_search_slot will point us to the slot
2034 * where it should be inserted. In our case
2035 * that will be the slot directly before the
2036 * next INODE_REF_KEY_V2 item. In the case
2037 * that we're pointing to the last slot in a
2038 * leaf, we must move one leaf over.
2040 ret = btrfs_next_leaf(root, path);
2049 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2052 * Check that we're still looking at an extended ref key for
2053 * this particular objectid. If we have different
2054 * objectid or type then there are no more to be found
2055 * in the tree and we can exit.
2058 if (found_key.objectid != inode_objectid)
2060 if (found_key.type != BTRFS_INODE_EXTREF_KEY)
2064 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
2065 extref = (struct btrfs_inode_extref *)ptr;
2066 *ret_extref = extref;
2068 *found_off = found_key.offset;
2076 * this iterates to turn a name (from iref/extref) into a full filesystem path.
2077 * Elements of the path are separated by '/' and the path is guaranteed to be
2078 * 0-terminated. the path is only given within the current file system.
2079 * Therefore, it never starts with a '/'. the caller is responsible to provide
2080 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
2081 * the start point of the resulting string is returned. this pointer is within
2083 * in case the path buffer would overflow, the pointer is decremented further
2084 * as if output was written to the buffer, though no more output is actually
2085 * generated. that way, the caller can determine how much space would be
2086 * required for the path to fit into the buffer. in that case, the returned
2087 * value will be smaller than dest. callers must check this!
2089 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
2090 u32 name_len, unsigned long name_off,
2091 struct extent_buffer *eb_in, u64 parent,
2092 char *dest, u32 size)
2097 s64 bytes_left = ((s64)size) - 1;
2098 struct extent_buffer *eb = eb_in;
2099 struct btrfs_key found_key;
2100 struct btrfs_inode_ref *iref;
2102 if (bytes_left >= 0)
2103 dest[bytes_left] = '\0';
2106 bytes_left -= name_len;
2107 if (bytes_left >= 0)
2108 read_extent_buffer(eb, dest + bytes_left,
2109 name_off, name_len);
2111 if (!path->skip_locking)
2112 btrfs_tree_read_unlock(eb);
2113 free_extent_buffer(eb);
2115 ret = btrfs_find_item(fs_root, path, parent, 0,
2116 BTRFS_INODE_REF_KEY, &found_key);
2122 next_inum = found_key.offset;
2124 /* regular exit ahead */
2125 if (parent == next_inum)
2128 slot = path->slots[0];
2129 eb = path->nodes[0];
2130 /* make sure we can use eb after releasing the path */
2132 path->nodes[0] = NULL;
2135 btrfs_release_path(path);
2136 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2138 name_len = btrfs_inode_ref_name_len(eb, iref);
2139 name_off = (unsigned long)(iref + 1);
2143 if (bytes_left >= 0)
2144 dest[bytes_left] = '/';
2147 btrfs_release_path(path);
2150 return ERR_PTR(ret);
2152 return dest + bytes_left;
2156 * this makes the path point to (logical EXTENT_ITEM *)
2157 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
2158 * tree blocks and <0 on error.
2160 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
2161 struct btrfs_path *path, struct btrfs_key *found_key,
2164 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, logical);
2169 const struct extent_buffer *eb;
2170 struct btrfs_extent_item *ei;
2171 struct btrfs_key key;
2173 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2174 key.type = BTRFS_METADATA_ITEM_KEY;
2176 key.type = BTRFS_EXTENT_ITEM_KEY;
2177 key.objectid = logical;
2178 key.offset = (u64)-1;
2180 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2184 ret = btrfs_previous_extent_item(extent_root, path, 0);
2190 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
2191 if (found_key->type == BTRFS_METADATA_ITEM_KEY)
2192 size = fs_info->nodesize;
2193 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
2194 size = found_key->offset;
2196 if (found_key->objectid > logical ||
2197 found_key->objectid + size <= logical) {
2198 btrfs_debug(fs_info,
2199 "logical %llu is not within any extent", logical);
2203 eb = path->nodes[0];
2204 item_size = btrfs_item_size(eb, path->slots[0]);
2205 BUG_ON(item_size < sizeof(*ei));
2207 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
2208 flags = btrfs_extent_flags(eb, ei);
2210 btrfs_debug(fs_info,
2211 "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
2212 logical, logical - found_key->objectid, found_key->objectid,
2213 found_key->offset, flags, item_size);
2215 WARN_ON(!flags_ret);
2217 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2218 *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
2219 else if (flags & BTRFS_EXTENT_FLAG_DATA)
2220 *flags_ret = BTRFS_EXTENT_FLAG_DATA;
2230 * helper function to iterate extent inline refs. ptr must point to a 0 value
2231 * for the first call and may be modified. it is used to track state.
2232 * if more refs exist, 0 is returned and the next call to
2233 * get_extent_inline_ref must pass the modified ptr parameter to get the
2234 * next ref. after the last ref was processed, 1 is returned.
2235 * returns <0 on error
2237 static int get_extent_inline_ref(unsigned long *ptr,
2238 const struct extent_buffer *eb,
2239 const struct btrfs_key *key,
2240 const struct btrfs_extent_item *ei,
2242 struct btrfs_extent_inline_ref **out_eiref,
2247 struct btrfs_tree_block_info *info;
2251 flags = btrfs_extent_flags(eb, ei);
2252 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2253 if (key->type == BTRFS_METADATA_ITEM_KEY) {
2254 /* a skinny metadata extent */
2256 (struct btrfs_extent_inline_ref *)(ei + 1);
2258 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
2259 info = (struct btrfs_tree_block_info *)(ei + 1);
2261 (struct btrfs_extent_inline_ref *)(info + 1);
2264 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
2266 *ptr = (unsigned long)*out_eiref;
2267 if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
2271 end = (unsigned long)ei + item_size;
2272 *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
2273 *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
2274 BTRFS_REF_TYPE_ANY);
2275 if (*out_type == BTRFS_REF_TYPE_INVALID)
2278 *ptr += btrfs_extent_inline_ref_size(*out_type);
2279 WARN_ON(*ptr > end);
2281 return 1; /* last */
2287 * reads the tree block backref for an extent. tree level and root are returned
2288 * through out_level and out_root. ptr must point to a 0 value for the first
2289 * call and may be modified (see get_extent_inline_ref comment).
2290 * returns 0 if data was provided, 1 if there was no more data to provide or
2293 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
2294 struct btrfs_key *key, struct btrfs_extent_item *ei,
2295 u32 item_size, u64 *out_root, u8 *out_level)
2299 struct btrfs_extent_inline_ref *eiref;
2301 if (*ptr == (unsigned long)-1)
2305 ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
2310 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
2311 type == BTRFS_SHARED_BLOCK_REF_KEY)
2318 /* we can treat both ref types equally here */
2319 *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
2321 if (key->type == BTRFS_EXTENT_ITEM_KEY) {
2322 struct btrfs_tree_block_info *info;
2324 info = (struct btrfs_tree_block_info *)(ei + 1);
2325 *out_level = btrfs_tree_block_level(eb, info);
2327 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
2328 *out_level = (u8)key->offset;
2332 *ptr = (unsigned long)-1;
2337 static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
2338 struct extent_inode_elem *inode_list,
2339 u64 root, u64 extent_item_objectid,
2340 iterate_extent_inodes_t *iterate, void *ctx)
2342 struct extent_inode_elem *eie;
2345 for (eie = inode_list; eie; eie = eie->next) {
2346 btrfs_debug(fs_info,
2347 "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
2348 extent_item_objectid, eie->inum,
2350 ret = iterate(eie->inum, eie->offset, eie->num_bytes, root, ctx);
2352 btrfs_debug(fs_info,
2353 "stopping iteration for %llu due to ret=%d",
2354 extent_item_objectid, ret);
2363 * calls iterate() for every inode that references the extent identified by
2364 * the given parameters.
2365 * when the iterator function returns a non-zero value, iteration stops.
2367 int iterate_extent_inodes(struct btrfs_backref_walk_ctx *ctx,
2368 bool search_commit_root,
2369 iterate_extent_inodes_t *iterate, void *user_ctx)
2373 struct ulist_node *ref_node;
2374 struct btrfs_seq_list seq_elem = BTRFS_SEQ_LIST_INIT(seq_elem);
2375 struct ulist_iterator ref_uiter;
2377 btrfs_debug(ctx->fs_info, "resolving all inodes for extent %llu",
2380 ASSERT(ctx->trans == NULL);
2381 ASSERT(ctx->roots == NULL);
2383 if (!search_commit_root) {
2384 struct btrfs_trans_handle *trans;
2386 trans = btrfs_attach_transaction(ctx->fs_info->tree_root);
2387 if (IS_ERR(trans)) {
2388 if (PTR_ERR(trans) != -ENOENT &&
2389 PTR_ERR(trans) != -EROFS)
2390 return PTR_ERR(trans);
2397 btrfs_get_tree_mod_seq(ctx->fs_info, &seq_elem);
2398 ctx->time_seq = seq_elem.seq;
2400 down_read(&ctx->fs_info->commit_root_sem);
2403 ret = btrfs_find_all_leafs(ctx);
2409 ULIST_ITER_INIT(&ref_uiter);
2410 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
2411 const u64 leaf_bytenr = ref_node->val;
2412 struct ulist_node *root_node;
2413 struct ulist_iterator root_uiter;
2414 struct extent_inode_elem *inode_list;
2416 inode_list = (struct extent_inode_elem *)(uintptr_t)ref_node->aux;
2418 if (ctx->cache_lookup) {
2419 const u64 *root_ids;
2423 cached = ctx->cache_lookup(leaf_bytenr, ctx->user_ctx,
2424 &root_ids, &root_count);
2426 for (int i = 0; i < root_count; i++) {
2427 ret = iterate_leaf_refs(ctx->fs_info,
2441 ctx->roots = ulist_alloc(GFP_NOFS);
2448 ctx->bytenr = leaf_bytenr;
2449 ret = btrfs_find_all_roots_safe(ctx);
2453 if (ctx->cache_store)
2454 ctx->cache_store(leaf_bytenr, ctx->roots, ctx->user_ctx);
2456 ULIST_ITER_INIT(&root_uiter);
2457 while (!ret && (root_node = ulist_next(ctx->roots, &root_uiter))) {
2458 btrfs_debug(ctx->fs_info,
2459 "root %llu references leaf %llu, data list %#llx",
2460 root_node->val, ref_node->val,
2462 ret = iterate_leaf_refs(ctx->fs_info, inode_list,
2463 root_node->val, ctx->bytenr,
2466 ulist_reinit(ctx->roots);
2469 free_leaf_list(refs);
2472 btrfs_put_tree_mod_seq(ctx->fs_info, &seq_elem);
2473 btrfs_end_transaction(ctx->trans);
2476 up_read(&ctx->fs_info->commit_root_sem);
2479 ulist_free(ctx->roots);
2482 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP)
2488 static int build_ino_list(u64 inum, u64 offset, u64 num_bytes, u64 root, void *ctx)
2490 struct btrfs_data_container *inodes = ctx;
2491 const size_t c = 3 * sizeof(u64);
2493 if (inodes->bytes_left >= c) {
2494 inodes->bytes_left -= c;
2495 inodes->val[inodes->elem_cnt] = inum;
2496 inodes->val[inodes->elem_cnt + 1] = offset;
2497 inodes->val[inodes->elem_cnt + 2] = root;
2498 inodes->elem_cnt += 3;
2500 inodes->bytes_missing += c - inodes->bytes_left;
2501 inodes->bytes_left = 0;
2502 inodes->elem_missed += 3;
2508 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
2509 struct btrfs_path *path,
2510 void *ctx, bool ignore_offset)
2512 struct btrfs_backref_walk_ctx walk_ctx = { 0 };
2515 struct btrfs_key found_key;
2516 int search_commit_root = path->search_commit_root;
2518 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
2519 btrfs_release_path(path);
2522 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2525 walk_ctx.bytenr = found_key.objectid;
2527 walk_ctx.ignore_extent_item_pos = true;
2529 walk_ctx.extent_item_pos = logical - found_key.objectid;
2530 walk_ctx.fs_info = fs_info;
2532 return iterate_extent_inodes(&walk_ctx, search_commit_root,
2533 build_ino_list, ctx);
2536 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2537 struct extent_buffer *eb, struct inode_fs_paths *ipath);
2539 static int iterate_inode_refs(u64 inum, struct inode_fs_paths *ipath)
2548 struct btrfs_root *fs_root = ipath->fs_root;
2549 struct btrfs_path *path = ipath->btrfs_path;
2550 struct extent_buffer *eb;
2551 struct btrfs_inode_ref *iref;
2552 struct btrfs_key found_key;
2555 ret = btrfs_find_item(fs_root, path, inum,
2556 parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2562 ret = found ? 0 : -ENOENT;
2567 parent = found_key.offset;
2568 slot = path->slots[0];
2569 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2574 btrfs_release_path(path);
2576 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2578 for (cur = 0; cur < btrfs_item_size(eb, slot); cur += len) {
2579 name_len = btrfs_inode_ref_name_len(eb, iref);
2580 /* path must be released before calling iterate()! */
2581 btrfs_debug(fs_root->fs_info,
2582 "following ref at offset %u for inode %llu in tree %llu",
2583 cur, found_key.objectid,
2584 fs_root->root_key.objectid);
2585 ret = inode_to_path(parent, name_len,
2586 (unsigned long)(iref + 1), eb, ipath);
2589 len = sizeof(*iref) + name_len;
2590 iref = (struct btrfs_inode_ref *)((char *)iref + len);
2592 free_extent_buffer(eb);
2595 btrfs_release_path(path);
2600 static int iterate_inode_extrefs(u64 inum, struct inode_fs_paths *ipath)
2607 struct btrfs_root *fs_root = ipath->fs_root;
2608 struct btrfs_path *path = ipath->btrfs_path;
2609 struct extent_buffer *eb;
2610 struct btrfs_inode_extref *extref;
2616 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2621 ret = found ? 0 : -ENOENT;
2626 slot = path->slots[0];
2627 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2632 btrfs_release_path(path);
2634 item_size = btrfs_item_size(eb, slot);
2635 ptr = btrfs_item_ptr_offset(eb, slot);
2638 while (cur_offset < item_size) {
2641 extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2642 parent = btrfs_inode_extref_parent(eb, extref);
2643 name_len = btrfs_inode_extref_name_len(eb, extref);
2644 ret = inode_to_path(parent, name_len,
2645 (unsigned long)&extref->name, eb, ipath);
2649 cur_offset += btrfs_inode_extref_name_len(eb, extref);
2650 cur_offset += sizeof(*extref);
2652 free_extent_buffer(eb);
2657 btrfs_release_path(path);
2663 * returns 0 if the path could be dumped (probably truncated)
2664 * returns <0 in case of an error
2666 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2667 struct extent_buffer *eb, struct inode_fs_paths *ipath)
2671 int i = ipath->fspath->elem_cnt;
2672 const int s_ptr = sizeof(char *);
2675 bytes_left = ipath->fspath->bytes_left > s_ptr ?
2676 ipath->fspath->bytes_left - s_ptr : 0;
2678 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2679 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2680 name_off, eb, inum, fspath_min, bytes_left);
2682 return PTR_ERR(fspath);
2684 if (fspath > fspath_min) {
2685 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2686 ++ipath->fspath->elem_cnt;
2687 ipath->fspath->bytes_left = fspath - fspath_min;
2689 ++ipath->fspath->elem_missed;
2690 ipath->fspath->bytes_missing += fspath_min - fspath;
2691 ipath->fspath->bytes_left = 0;
2698 * this dumps all file system paths to the inode into the ipath struct, provided
2699 * is has been created large enough. each path is zero-terminated and accessed
2700 * from ipath->fspath->val[i].
2701 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2702 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2703 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2704 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2705 * have been needed to return all paths.
2707 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2712 ret = iterate_inode_refs(inum, ipath);
2715 else if (ret != -ENOENT)
2718 ret = iterate_inode_extrefs(inum, ipath);
2719 if (ret == -ENOENT && found_refs)
2725 struct btrfs_data_container *init_data_container(u32 total_bytes)
2727 struct btrfs_data_container *data;
2730 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2731 data = kvmalloc(alloc_bytes, GFP_KERNEL);
2733 return ERR_PTR(-ENOMEM);
2735 if (total_bytes >= sizeof(*data)) {
2736 data->bytes_left = total_bytes - sizeof(*data);
2737 data->bytes_missing = 0;
2739 data->bytes_missing = sizeof(*data) - total_bytes;
2740 data->bytes_left = 0;
2744 data->elem_missed = 0;
2750 * allocates space to return multiple file system paths for an inode.
2751 * total_bytes to allocate are passed, note that space usable for actual path
2752 * information will be total_bytes - sizeof(struct inode_fs_paths).
2753 * the returned pointer must be freed with free_ipath() in the end.
2755 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2756 struct btrfs_path *path)
2758 struct inode_fs_paths *ifp;
2759 struct btrfs_data_container *fspath;
2761 fspath = init_data_container(total_bytes);
2763 return ERR_CAST(fspath);
2765 ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2768 return ERR_PTR(-ENOMEM);
2771 ifp->btrfs_path = path;
2772 ifp->fspath = fspath;
2773 ifp->fs_root = fs_root;
2778 void free_ipath(struct inode_fs_paths *ipath)
2782 kvfree(ipath->fspath);
2786 struct btrfs_backref_iter *btrfs_backref_iter_alloc(struct btrfs_fs_info *fs_info)
2788 struct btrfs_backref_iter *ret;
2790 ret = kzalloc(sizeof(*ret), GFP_NOFS);
2794 ret->path = btrfs_alloc_path();
2800 /* Current backref iterator only supports iteration in commit root */
2801 ret->path->search_commit_root = 1;
2802 ret->path->skip_locking = 1;
2803 ret->fs_info = fs_info;
2808 int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
2810 struct btrfs_fs_info *fs_info = iter->fs_info;
2811 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr);
2812 struct btrfs_path *path = iter->path;
2813 struct btrfs_extent_item *ei;
2814 struct btrfs_key key;
2817 key.objectid = bytenr;
2818 key.type = BTRFS_METADATA_ITEM_KEY;
2819 key.offset = (u64)-1;
2820 iter->bytenr = bytenr;
2822 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2829 if (path->slots[0] == 0) {
2830 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
2836 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2837 if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
2838 key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
2842 memcpy(&iter->cur_key, &key, sizeof(key));
2843 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2845 iter->end_ptr = (u32)(iter->item_ptr +
2846 btrfs_item_size(path->nodes[0], path->slots[0]));
2847 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2848 struct btrfs_extent_item);
2851 * Only support iteration on tree backref yet.
2853 * This is an extra precaution for non skinny-metadata, where
2854 * EXTENT_ITEM is also used for tree blocks, that we can only use
2855 * extent flags to determine if it's a tree block.
2857 if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2861 iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2863 /* If there is no inline backref, go search for keyed backref */
2864 if (iter->cur_ptr >= iter->end_ptr) {
2865 ret = btrfs_next_item(extent_root, path);
2867 /* No inline nor keyed ref */
2875 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2877 if (iter->cur_key.objectid != bytenr ||
2878 (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2879 iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2883 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2885 iter->item_ptr = iter->cur_ptr;
2886 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size(
2887 path->nodes[0], path->slots[0]));
2892 btrfs_backref_iter_release(iter);
2897 * Go to the next backref item of current bytenr, can be either inlined or
2900 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2902 * Return 0 if we get next backref without problem.
2903 * Return >0 if there is no extra backref for this bytenr.
2904 * Return <0 if there is something wrong happened.
2906 int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
2908 struct extent_buffer *eb = btrfs_backref_get_eb(iter);
2909 struct btrfs_root *extent_root;
2910 struct btrfs_path *path = iter->path;
2911 struct btrfs_extent_inline_ref *iref;
2915 if (btrfs_backref_iter_is_inline_ref(iter)) {
2916 /* We're still inside the inline refs */
2917 ASSERT(iter->cur_ptr < iter->end_ptr);
2919 if (btrfs_backref_has_tree_block_info(iter)) {
2920 /* First tree block info */
2921 size = sizeof(struct btrfs_tree_block_info);
2923 /* Use inline ref type to determine the size */
2926 iref = (struct btrfs_extent_inline_ref *)
2927 ((unsigned long)iter->cur_ptr);
2928 type = btrfs_extent_inline_ref_type(eb, iref);
2930 size = btrfs_extent_inline_ref_size(type);
2932 iter->cur_ptr += size;
2933 if (iter->cur_ptr < iter->end_ptr)
2936 /* All inline items iterated, fall through */
2939 /* We're at keyed items, there is no inline item, go to the next one */
2940 extent_root = btrfs_extent_root(iter->fs_info, iter->bytenr);
2941 ret = btrfs_next_item(extent_root, iter->path);
2945 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
2946 if (iter->cur_key.objectid != iter->bytenr ||
2947 (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
2948 iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
2950 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2952 iter->cur_ptr = iter->item_ptr;
2953 iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size(path->nodes[0],
2958 void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
2959 struct btrfs_backref_cache *cache, int is_reloc)
2963 cache->rb_root = RB_ROOT;
2964 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2965 INIT_LIST_HEAD(&cache->pending[i]);
2966 INIT_LIST_HEAD(&cache->changed);
2967 INIT_LIST_HEAD(&cache->detached);
2968 INIT_LIST_HEAD(&cache->leaves);
2969 INIT_LIST_HEAD(&cache->pending_edge);
2970 INIT_LIST_HEAD(&cache->useless_node);
2971 cache->fs_info = fs_info;
2972 cache->is_reloc = is_reloc;
2975 struct btrfs_backref_node *btrfs_backref_alloc_node(
2976 struct btrfs_backref_cache *cache, u64 bytenr, int level)
2978 struct btrfs_backref_node *node;
2980 ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
2981 node = kzalloc(sizeof(*node), GFP_NOFS);
2985 INIT_LIST_HEAD(&node->list);
2986 INIT_LIST_HEAD(&node->upper);
2987 INIT_LIST_HEAD(&node->lower);
2988 RB_CLEAR_NODE(&node->rb_node);
2990 node->level = level;
2991 node->bytenr = bytenr;
2996 struct btrfs_backref_edge *btrfs_backref_alloc_edge(
2997 struct btrfs_backref_cache *cache)
2999 struct btrfs_backref_edge *edge;
3001 edge = kzalloc(sizeof(*edge), GFP_NOFS);
3008 * Drop the backref node from cache, also cleaning up all its
3009 * upper edges and any uncached nodes in the path.
3011 * This cleanup happens bottom up, thus the node should either
3012 * be the lowest node in the cache or a detached node.
3014 void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
3015 struct btrfs_backref_node *node)
3017 struct btrfs_backref_node *upper;
3018 struct btrfs_backref_edge *edge;
3023 BUG_ON(!node->lowest && !node->detached);
3024 while (!list_empty(&node->upper)) {
3025 edge = list_entry(node->upper.next, struct btrfs_backref_edge,
3027 upper = edge->node[UPPER];
3028 list_del(&edge->list[LOWER]);
3029 list_del(&edge->list[UPPER]);
3030 btrfs_backref_free_edge(cache, edge);
3033 * Add the node to leaf node list if no other child block
3036 if (list_empty(&upper->lower)) {
3037 list_add_tail(&upper->lower, &cache->leaves);
3042 btrfs_backref_drop_node(cache, node);
3046 * Release all nodes/edges from current cache
3048 void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
3050 struct btrfs_backref_node *node;
3053 while (!list_empty(&cache->detached)) {
3054 node = list_entry(cache->detached.next,
3055 struct btrfs_backref_node, list);
3056 btrfs_backref_cleanup_node(cache, node);
3059 while (!list_empty(&cache->leaves)) {
3060 node = list_entry(cache->leaves.next,
3061 struct btrfs_backref_node, lower);
3062 btrfs_backref_cleanup_node(cache, node);
3065 cache->last_trans = 0;
3067 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
3068 ASSERT(list_empty(&cache->pending[i]));
3069 ASSERT(list_empty(&cache->pending_edge));
3070 ASSERT(list_empty(&cache->useless_node));
3071 ASSERT(list_empty(&cache->changed));
3072 ASSERT(list_empty(&cache->detached));
3073 ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
3074 ASSERT(!cache->nr_nodes);
3075 ASSERT(!cache->nr_edges);
3079 * Handle direct tree backref
3081 * Direct tree backref means, the backref item shows its parent bytenr
3082 * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
3084 * @ref_key: The converted backref key.
3085 * For keyed backref, it's the item key.
3086 * For inlined backref, objectid is the bytenr,
3087 * type is btrfs_inline_ref_type, offset is
3088 * btrfs_inline_ref_offset.
3090 static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
3091 struct btrfs_key *ref_key,
3092 struct btrfs_backref_node *cur)
3094 struct btrfs_backref_edge *edge;
3095 struct btrfs_backref_node *upper;
3096 struct rb_node *rb_node;
3098 ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
3100 /* Only reloc root uses backref pointing to itself */
3101 if (ref_key->objectid == ref_key->offset) {
3102 struct btrfs_root *root;
3104 cur->is_reloc_root = 1;
3105 /* Only reloc backref cache cares about a specific root */
3106 if (cache->is_reloc) {
3107 root = find_reloc_root(cache->fs_info, cur->bytenr);
3113 * For generic purpose backref cache, reloc root node
3116 list_add(&cur->list, &cache->useless_node);
3121 edge = btrfs_backref_alloc_edge(cache);
3125 rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
3127 /* Parent node not yet cached */
3128 upper = btrfs_backref_alloc_node(cache, ref_key->offset,
3131 btrfs_backref_free_edge(cache, edge);
3136 * Backrefs for the upper level block isn't cached, add the
3137 * block to pending list
3139 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
3141 /* Parent node already cached */
3142 upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
3143 ASSERT(upper->checked);
3144 INIT_LIST_HEAD(&edge->list[UPPER]);
3146 btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
3151 * Handle indirect tree backref
3153 * Indirect tree backref means, we only know which tree the node belongs to.
3154 * We still need to do a tree search to find out the parents. This is for
3155 * TREE_BLOCK_REF backref (keyed or inlined).
3157 * @ref_key: The same as @ref_key in handle_direct_tree_backref()
3158 * @tree_key: The first key of this tree block.
3159 * @path: A clean (released) path, to avoid allocating path every time
3160 * the function get called.
3162 static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
3163 struct btrfs_path *path,
3164 struct btrfs_key *ref_key,
3165 struct btrfs_key *tree_key,
3166 struct btrfs_backref_node *cur)
3168 struct btrfs_fs_info *fs_info = cache->fs_info;
3169 struct btrfs_backref_node *upper;
3170 struct btrfs_backref_node *lower;
3171 struct btrfs_backref_edge *edge;
3172 struct extent_buffer *eb;
3173 struct btrfs_root *root;
3174 struct rb_node *rb_node;
3176 bool need_check = true;
3179 root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
3181 return PTR_ERR(root);
3182 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
3185 if (btrfs_root_level(&root->root_item) == cur->level) {
3187 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
3189 * For reloc backref cache, we may ignore reloc root. But for
3190 * general purpose backref cache, we can't rely on
3191 * btrfs_should_ignore_reloc_root() as it may conflict with
3192 * current running relocation and lead to missing root.
3194 * For general purpose backref cache, reloc root detection is
3195 * completely relying on direct backref (key->offset is parent
3196 * bytenr), thus only do such check for reloc cache.
3198 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
3199 btrfs_put_root(root);
3200 list_add(&cur->list, &cache->useless_node);
3207 level = cur->level + 1;
3209 /* Search the tree to find parent blocks referring to the block */
3210 path->search_commit_root = 1;
3211 path->skip_locking = 1;
3212 path->lowest_level = level;
3213 ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
3214 path->lowest_level = 0;
3216 btrfs_put_root(root);
3219 if (ret > 0 && path->slots[level] > 0)
3220 path->slots[level]--;
3222 eb = path->nodes[level];
3223 if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
3225 "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
3226 cur->bytenr, level - 1, root->root_key.objectid,
3227 tree_key->objectid, tree_key->type, tree_key->offset);
3228 btrfs_put_root(root);
3234 /* Add all nodes and edges in the path */
3235 for (; level < BTRFS_MAX_LEVEL; level++) {
3236 if (!path->nodes[level]) {
3237 ASSERT(btrfs_root_bytenr(&root->root_item) ==
3239 /* Same as previous should_ignore_reloc_root() call */
3240 if (btrfs_should_ignore_reloc_root(root) &&
3242 btrfs_put_root(root);
3243 list_add(&lower->list, &cache->useless_node);
3250 edge = btrfs_backref_alloc_edge(cache);
3252 btrfs_put_root(root);
3257 eb = path->nodes[level];
3258 rb_node = rb_simple_search(&cache->rb_root, eb->start);
3260 upper = btrfs_backref_alloc_node(cache, eb->start,
3263 btrfs_put_root(root);
3264 btrfs_backref_free_edge(cache, edge);
3268 upper->owner = btrfs_header_owner(eb);
3269 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
3273 * If we know the block isn't shared we can avoid
3274 * checking its backrefs.
3276 if (btrfs_block_can_be_shared(root, eb))
3282 * Add the block to pending list if we need to check its
3283 * backrefs, we only do this once while walking up a
3284 * tree as we will catch anything else later on.
3286 if (!upper->checked && need_check) {
3288 list_add_tail(&edge->list[UPPER],
3289 &cache->pending_edge);
3293 INIT_LIST_HEAD(&edge->list[UPPER]);
3296 upper = rb_entry(rb_node, struct btrfs_backref_node,
3298 ASSERT(upper->checked);
3299 INIT_LIST_HEAD(&edge->list[UPPER]);
3301 upper->owner = btrfs_header_owner(eb);
3303 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
3306 btrfs_put_root(root);
3313 btrfs_release_path(path);
3318 * Add backref node @cur into @cache.
3320 * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
3321 * links aren't yet bi-directional. Needs to finish such links.
3322 * Use btrfs_backref_finish_upper_links() to finish such linkage.
3324 * @path: Released path for indirect tree backref lookup
3325 * @iter: Released backref iter for extent tree search
3326 * @node_key: The first key of the tree block
3328 int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
3329 struct btrfs_path *path,
3330 struct btrfs_backref_iter *iter,
3331 struct btrfs_key *node_key,
3332 struct btrfs_backref_node *cur)
3334 struct btrfs_fs_info *fs_info = cache->fs_info;
3335 struct btrfs_backref_edge *edge;
3336 struct btrfs_backref_node *exist;
3339 ret = btrfs_backref_iter_start(iter, cur->bytenr);
3343 * We skip the first btrfs_tree_block_info, as we don't use the key
3344 * stored in it, but fetch it from the tree block
3346 if (btrfs_backref_has_tree_block_info(iter)) {
3347 ret = btrfs_backref_iter_next(iter);
3350 /* No extra backref? This means the tree block is corrupted */
3356 WARN_ON(cur->checked);
3357 if (!list_empty(&cur->upper)) {
3359 * The backref was added previously when processing backref of
3360 * type BTRFS_TREE_BLOCK_REF_KEY
3362 ASSERT(list_is_singular(&cur->upper));
3363 edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
3365 ASSERT(list_empty(&edge->list[UPPER]));
3366 exist = edge->node[UPPER];
3368 * Add the upper level block to pending list if we need check
3371 if (!exist->checked)
3372 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
3377 for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
3378 struct extent_buffer *eb;
3379 struct btrfs_key key;
3383 eb = btrfs_backref_get_eb(iter);
3385 key.objectid = iter->bytenr;
3386 if (btrfs_backref_iter_is_inline_ref(iter)) {
3387 struct btrfs_extent_inline_ref *iref;
3389 /* Update key for inline backref */
3390 iref = (struct btrfs_extent_inline_ref *)
3391 ((unsigned long)iter->cur_ptr);
3392 type = btrfs_get_extent_inline_ref_type(eb, iref,
3393 BTRFS_REF_TYPE_BLOCK);
3394 if (type == BTRFS_REF_TYPE_INVALID) {
3399 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
3401 key.type = iter->cur_key.type;
3402 key.offset = iter->cur_key.offset;
3406 * Parent node found and matches current inline ref, no need to
3407 * rebuild this node for this inline ref
3410 ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
3411 exist->owner == key.offset) ||
3412 (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
3413 exist->bytenr == key.offset))) {
3418 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
3419 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
3420 ret = handle_direct_tree_backref(cache, &key, cur);
3424 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
3426 btrfs_print_v0_err(fs_info);
3427 btrfs_handle_fs_error(fs_info, ret, NULL);
3429 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
3434 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
3435 * means the root objectid. We need to search the tree to get
3436 * its parent bytenr.
3438 ret = handle_indirect_tree_backref(cache, path, &key, node_key,
3447 btrfs_backref_iter_release(iter);
3452 * Finish the upwards linkage created by btrfs_backref_add_tree_node()
3454 int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
3455 struct btrfs_backref_node *start)
3457 struct list_head *useless_node = &cache->useless_node;
3458 struct btrfs_backref_edge *edge;
3459 struct rb_node *rb_node;
3460 LIST_HEAD(pending_edge);
3462 ASSERT(start->checked);
3464 /* Insert this node to cache if it's not COW-only */
3465 if (!start->cowonly) {
3466 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
3469 btrfs_backref_panic(cache->fs_info, start->bytenr,
3471 list_add_tail(&start->lower, &cache->leaves);
3475 * Use breadth first search to iterate all related edges.
3477 * The starting points are all the edges of this node
3479 list_for_each_entry(edge, &start->upper, list[LOWER])
3480 list_add_tail(&edge->list[UPPER], &pending_edge);
3482 while (!list_empty(&pending_edge)) {
3483 struct btrfs_backref_node *upper;
3484 struct btrfs_backref_node *lower;
3486 edge = list_first_entry(&pending_edge,
3487 struct btrfs_backref_edge, list[UPPER]);
3488 list_del_init(&edge->list[UPPER]);
3489 upper = edge->node[UPPER];
3490 lower = edge->node[LOWER];
3492 /* Parent is detached, no need to keep any edges */
3493 if (upper->detached) {
3494 list_del(&edge->list[LOWER]);
3495 btrfs_backref_free_edge(cache, edge);
3497 /* Lower node is orphan, queue for cleanup */
3498 if (list_empty(&lower->upper))
3499 list_add(&lower->list, useless_node);
3504 * All new nodes added in current build_backref_tree() haven't
3505 * been linked to the cache rb tree.
3506 * So if we have upper->rb_node populated, this means a cache
3507 * hit. We only need to link the edge, as @upper and all its
3508 * parents have already been linked.
3510 if (!RB_EMPTY_NODE(&upper->rb_node)) {
3511 if (upper->lowest) {
3512 list_del_init(&upper->lower);
3516 list_add_tail(&edge->list[UPPER], &upper->lower);
3520 /* Sanity check, we shouldn't have any unchecked nodes */
3521 if (!upper->checked) {
3526 /* Sanity check, COW-only node has non-COW-only parent */
3527 if (start->cowonly != upper->cowonly) {
3532 /* Only cache non-COW-only (subvolume trees) tree blocks */
3533 if (!upper->cowonly) {
3534 rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3537 btrfs_backref_panic(cache->fs_info,
3538 upper->bytenr, -EEXIST);
3543 list_add_tail(&edge->list[UPPER], &upper->lower);
3546 * Also queue all the parent edges of this uncached node
3547 * to finish the upper linkage
3549 list_for_each_entry(edge, &upper->upper, list[LOWER])
3550 list_add_tail(&edge->list[UPPER], &pending_edge);
3555 void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3556 struct btrfs_backref_node *node)
3558 struct btrfs_backref_node *lower;
3559 struct btrfs_backref_node *upper;
3560 struct btrfs_backref_edge *edge;
3562 while (!list_empty(&cache->useless_node)) {
3563 lower = list_first_entry(&cache->useless_node,
3564 struct btrfs_backref_node, list);
3565 list_del_init(&lower->list);
3567 while (!list_empty(&cache->pending_edge)) {
3568 edge = list_first_entry(&cache->pending_edge,
3569 struct btrfs_backref_edge, list[UPPER]);
3570 list_del(&edge->list[UPPER]);
3571 list_del(&edge->list[LOWER]);
3572 lower = edge->node[LOWER];
3573 upper = edge->node[UPPER];
3574 btrfs_backref_free_edge(cache, edge);
3577 * Lower is no longer linked to any upper backref nodes and
3578 * isn't in the cache, we can free it ourselves.
3580 if (list_empty(&lower->upper) &&
3581 RB_EMPTY_NODE(&lower->rb_node))
3582 list_add(&lower->list, &cache->useless_node);
3584 if (!RB_EMPTY_NODE(&upper->rb_node))
3587 /* Add this guy's upper edges to the list to process */
3588 list_for_each_entry(edge, &upper->upper, list[LOWER])
3589 list_add_tail(&edge->list[UPPER],
3590 &cache->pending_edge);
3591 if (list_empty(&upper->upper))
3592 list_add(&upper->list, &cache->useless_node);
3595 while (!list_empty(&cache->useless_node)) {
3596 lower = list_first_entry(&cache->useless_node,
3597 struct btrfs_backref_node, list);
3598 list_del_init(&lower->list);
3601 btrfs_backref_drop_node(cache, lower);
3604 btrfs_backref_cleanup_node(cache, node);
3605 ASSERT(list_empty(&cache->useless_node) &&
3606 list_empty(&cache->pending_edge));